source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
process.py | import atexit
import logging
import os
import shlex
import subprocess
import sys
import threading
import time
import yaml
from future.standard_library import install_aliases
from pyngrok import conf
from pyngrok.exception import PyngrokNgrokError, PyngrokSecurityError
from pyngrok.installer import validate_config
install_aliases()
from urllib.request import urlopen, Request
try:
from http import HTTPStatus as StatusCodes
except ImportError: # pragma: no cover
try:
from http import client as StatusCodes
except ImportError:
import httplib as StatusCodes
__author__ = "Alex Laird"
__copyright__ = "Copyright 2020, Alex Laird"
__version__ = "4.1.9"
logger = logging.getLogger(__name__)
_current_processes = {}
class NgrokProcess:
"""
An object containing information about the ``ngrok`` process.
:var proc: The child process that is running ``ngrok``.
:vartype proc: subprocess.Popen
:var pyngrok_config: The ``pyngrok`` configuration to use with ``ngrok``.
:vartype pyngrok_config: PyngrokConfig
:var api_url: The API URL for the ``ngrok`` web interface.
:vartype api_url: str
:var logs: A list of the most recent logs from ``ngrok``, limited in size to ``max_logs``.
:vartype logs: list[NgrokLog]
:var startup_error: If ``ngrok`` startup fails, this will be the log of the failure.
:vartype startup_error: str
"""
def __init__(self, proc, pyngrok_config):
self.proc = proc
self.pyngrok_config = pyngrok_config
self.api_url = None
self.logs = []
self.startup_error = None
self._tunnel_started = False
self._client_connected = False
self._monitor_thread = None
def __repr__(self):
return "<NgrokProcess: \"{}\">".format(self.api_url)
def __str__(self): # pragma: no cover
return "NgrokProcess: \"{}\"".format(self.api_url)
@staticmethod
def _line_has_error(log):
return log.lvl in ["ERROR", "CRITICAL"]
def _log_startup_line(self, line):
"""
Parse the given startup log line and use it to manage the startup state
of the ``ngrok`` process.
:param line: The line to be parsed and logged.
:type line: str
:return: The parsed log.
:rtype: NgrokLog
"""
log = self._log_line(line)
if log is None:
return
elif self._line_has_error(log):
self.startup_error = log.err
else:
# Log `ngrok` startup states as they come in
if "starting web service" in log.msg and log.addr is not None:
self.api_url = "http://{}".format(log.addr)
elif "tunnel session started" in log.msg:
self._tunnel_started = True
elif "client session established" in log.msg:
self._client_connected = True
return log
def _log_line(self, line):
"""
Parse, log, and emit (if ``log_event_callback`` in :class:`~pyngrok.conf.PyngrokConfig` is registered) the
given log line.
:param line: The line to be processed.
:type line: str
:return: The parsed log.
:rtype: NgrokLog
"""
log = NgrokLog(line)
if log.line == "":
return None
logger.log(getattr(logging, log.lvl), line)
self.logs.append(log)
if len(self.logs) > self.pyngrok_config.max_logs:
self.logs.pop(0)
if self.pyngrok_config.log_event_callback is not None:
self.pyngrok_config.log_event_callback(log)
return log
def healthy(self):
"""
Check whether the ``ngrok`` process has finished starting up and is in a running, healthy state.
:return: ``True`` if the ``ngrok`` process is started, running, and healthy.
:rtype: bool
"""
if self.api_url is None or \
not self._tunnel_started or not self._client_connected:
return False
if not self.api_url.lower().startswith("http"):
raise PyngrokSecurityError("URL must start with \"http\": {}".format(self.api_url))
# Ensure the process is available for requests before registering it as healthy
request = Request("{}/api/tunnels".format(self.api_url))
response = urlopen(request)
if response.getcode() != StatusCodes.OK:
return False
return self.proc.poll() is None and \
self.startup_error is None
def _monitor_process(self):
thread = threading.current_thread()
thread.alive = True
while thread.alive and self.proc.poll() is None:
self._log_line(self.proc.stdout.readline())
self._monitor_thread = None
def start_monitor_thread(self):
"""
Start a thread that will monitor the ``ngrok`` process and its logs until it completes.
If a monitor thread is already running, nothing will be done.
"""
if self._monitor_thread is None:
self._monitor_thread = threading.Thread(target=self._monitor_process)
self._monitor_thread.daemon = True
self._monitor_thread.start()
def stop_monitor_thread(self):
"""
Set the monitor thread to stop monitoring the ``ngrok`` process after the next log event. This will not
necessarily terminate the thread immediately, as the thread may currently be idle, rather it sets a flag
on the thread telling it to terminate the next time it wakes up.
This has no impact on the ``ngrok`` process itself, only ``pyngrok``'s monitor of the process and
its logs.
"""
if self._monitor_thread is not None:
self._monitor_thread.alive = False
class NgrokLog:
"""
An object containing a parsed log from the ``ngrok`` process.
:var line: The raw, unparsed log line.
:vartype line: str
:var t: The log's ISO 8601 timestamp.
:vartype t: str
:var lvl: The log's level.
:vartype lvl: str
:var msg: The log's message.
:vartype msg: str
:var err: The log's error, if applicable.
:vartype err: str
:var addr: The URL, if ``obj`` is "web".
:vartype addr: str
"""
def __init__(self, line):
self.line = line.strip()
self.t = None
self.lvl = "NOTSET"
self.msg = None
self.err = None
self.addr = None
for i in shlex.split(self.line):
if "=" not in i:
continue
key, value = i.split("=", 1)
if key == "lvl":
if not value:
value = self.lvl
value = value.upper()
if value == "CRIT":
value = "CRITICAL"
elif value in ["ERR", "EROR"]:
value = "ERROR"
elif value == "WARN":
value = "WARNING"
if not hasattr(logging, value):
value = self.lvl
setattr(self, key, value)
def __repr__(self):
return "<NgrokLog: t={} lvl={} msg=\"{}\">".format(self.t, self.lvl, self.msg)
def __str__(self): # pragma: no cover
attrs = [attr for attr in dir(self) if not attr.startswith("_") and getattr(self, attr) is not None]
attrs.remove("line")
return " ".join("{}=\"{}\"".format(attr, getattr(self, attr)) for attr in attrs)
def set_auth_token(pyngrok_config, token):
"""
Set the ``ngrok`` auth token in the config file, enabling authenticated features (for instance,
more concurrent tunnels, custom subdomains, etc.).
:param pyngrok_config: The ``pyngrok`` configuration to use when interacting with the ``ngrok`` binary.
:type pyngrok_config: PyngrokConfig
:param token: The auth token to set.
:type token: str
"""
start = [pyngrok_config.ngrok_path, "authtoken", token, "--log=stdout"]
if pyngrok_config.config_path:
start.append("--config={}".format(pyngrok_config.config_path))
result = subprocess.check_output(start)
if "Authtoken saved" not in str(result):
raise PyngrokNgrokError("An error occurred when saving the auth token: {}".format(result))
def get_process(pyngrok_config):
"""
Retrieve the current ``ngrok`` process for the given config's ``ngrok_path``.
If ``ngrok`` is not running, calling this method will first start a process with
:class:`~pyngrok.conf.PyngrokConfig`.
:param pyngrok_config: The ``pyngrok`` configuration to use when interacting with the ``ngrok`` binary.
:type pyngrok_config: PyngrokConfig
:return: The ``ngrok`` process.
:rtype: NgrokProcess
"""
if pyngrok_config.ngrok_path in _current_processes:
# Ensure the process is still running and hasn't been killed externally
if _current_processes[pyngrok_config.ngrok_path].proc.poll() is None:
return _current_processes[pyngrok_config.ngrok_path]
else:
_current_processes.pop(pyngrok_config.ngrok_path, None)
return _start_process(pyngrok_config)
def kill_process(ngrok_path):
"""
Terminate the ``ngrok`` processes, if running, for the given path. This method will not block, it will just
issue a kill request.
:param ngrok_path: The path to the ``ngrok`` binary.
:type ngrok_path: str
"""
if ngrok_path in _current_processes:
ngrok_process = _current_processes[ngrok_path]
logger.info("Killing ngrok process: {}".format(ngrok_process.proc.pid))
try:
ngrok_process.proc.kill()
ngrok_process.proc.wait()
except OSError as e:
# If the process was already killed, nothing to do but cleanup state
if e.errno != 3:
raise e
_current_processes.pop(ngrok_path, None)
else:
logger.debug("\"ngrok_path\" {} is not running a process".format(ngrok_path))
def run_process(ngrok_path, args):
"""
Start a blocking ``ngrok`` process with the binary at the given path and the passed args.
This method is meant for invoking ``ngrok`` directly (for instance, from the command line) and is not
necessarily compatible with non-blocking API methods. For that, use :func:`~pyngrok.process.get_process`.
:param ngrok_path: The path to the ``ngrok`` binary.
:type ngrok_path: str
:param args: The args to pass to ``ngrok``.
:type args: list[str]
"""
_ensure_path_ready(ngrok_path)
start = [ngrok_path] + args
subprocess.call(start)
def _ensure_path_ready(ngrok_path):
"""
Ensure the binary for ``ngrok`` at the given path is ready to be started, raise a relevant
exception if not.
:param ngrok_path: The path to the ``ngrok`` binary.
"""
if not os.path.exists(ngrok_path):
raise PyngrokNgrokError(
"ngrok binary was not found. Be sure to call \"ngrok.ensure_ngrok_installed()\" first for "
"\"ngrok_path\": {}".format(ngrok_path))
if ngrok_path in _current_processes:
raise PyngrokNgrokError("ngrok is already running for the \"ngrok_path\": {}".format(ngrok_path))
def _validate_config(config_path):
with open(config_path, "r") as config_file:
config = yaml.safe_load(config_file)
if config is not None:
validate_config(config)
def _terminate_process(process):
if process is None:
return
try:
process.terminate()
except OSError:
logger.debug("ngrok process already terminated: {}".format(process.pid))
def _start_process(pyngrok_config):
"""
Start a ``ngrok`` process with no tunnels. This will start the ``ngrok`` web interface, against
which HTTP requests can be made to create, interact with, and destroy tunnels.
:param pyngrok_config: The ``pyngrok`` configuration to use when interacting with the ``ngrok`` binary.
:type pyngrok_config: PyngrokConfig
:return: The ``ngrok`` process.
:rtype: NgrokProcess
"""
_ensure_path_ready(pyngrok_config.ngrok_path)
if pyngrok_config.config_path is not None:
_validate_config(pyngrok_config.config_path)
else:
_validate_config(conf.DEFAULT_NGROK_CONFIG_PATH)
start = [pyngrok_config.ngrok_path, "start", "--none", "--log=stdout"]
if pyngrok_config.config_path:
logger.info("Starting ngrok with config file: {}".format(pyngrok_config.config_path))
start.append("--config={}".format(pyngrok_config.config_path))
if pyngrok_config.auth_token:
logger.info("Overriding default auth token")
start.append("--authtoken={}".format(pyngrok_config.auth_token))
if pyngrok_config.region:
logger.info("Starting ngrok in region: {}".format(pyngrok_config.region))
start.append("--region={}".format(pyngrok_config.region))
popen_kwargs = {"stdout": subprocess.PIPE, "universal_newlines": True}
if sys.version_info.major >= 3 and os.name == "posix":
popen_kwargs.update(start_new_session=pyngrok_config.start_new_session)
elif pyngrok_config.start_new_session:
logger.warning("Ignoring start_new_session=True, which requires Python 3 and POSIX")
proc = subprocess.Popen(start, **popen_kwargs)
atexit.register(_terminate_process, proc)
logger.info("ngrok process starting: {}".format(proc.pid))
ngrok_process = NgrokProcess(proc, pyngrok_config)
_current_processes[pyngrok_config.ngrok_path] = ngrok_process
timeout = time.time() + pyngrok_config.startup_timeout
while time.time() < timeout:
line = proc.stdout.readline()
ngrok_process._log_startup_line(line)
if ngrok_process.healthy():
logger.info("ngrok process has started: {}".format(ngrok_process.api_url))
if pyngrok_config.monitor_thread:
ngrok_process.start_monitor_thread()
break
elif ngrok_process.startup_error is not None or \
ngrok_process.proc.poll() is not None:
break
if not ngrok_process.healthy():
# If the process did not come up in a healthy state, clean up the state
kill_process(pyngrok_config.ngrok_path)
if ngrok_process.startup_error is not None:
raise PyngrokNgrokError("The ngrok process errored on start: {}.".format(ngrok_process.startup_error),
ngrok_process.logs,
ngrok_process.startup_error)
else:
raise PyngrokNgrokError("The ngrok process was unable to start.", ngrok_process.logs)
return ngrok_process
|
operationsframe.py | import tkinter as tk
import tkinter.filedialog
from tkinter import ttk
import os
import webbrowser
import threading
from http.server import HTTPServer, SimpleHTTPRequestHandler
import eosim.gui.helpwindow as helpwindow
from eosim import config
from eosim.gui.visualizeframe.visglobeframe import VisGlobeFrame
from eosim.gui.mapprojections import Mercator, EquidistantConic, LambertConformal, Robinson, LambertAzimuthalEqualArea, Gnomonic
from orbitpy.util import EnumEntity
from orbitpy.sensorfovprojection import SensorFOVProjection, PixelShapelyPolygon
from instrupy.public_library import Instrument
import pandas as pd
import json
import datetime
import numpy as np
import pandas as pd
import time
import copy
import uuid
import shutil
import pickle
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
# Implement the default Matplotlib key bindings.
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import cartopy
import cartopy.crs as ccrs
import logging
logger = logging.getLogger(__name__)
class CommandType(EnumEntity):
"""Enumeration of recoginized command types."""
TAKEIMAGE = "TAKEIMAGE",
TRANSMITDATA = "TRANSMITDATA",
SETTINGS = "SETTINGS"
class MissionEntityType(EnumEntity):
"""Enumeration of recoginized command types."""
SATELLITE = "SATELLITE",
GROUNDSTATION = "GROUNDSTATION"
class OperationsFrame(ttk.Frame):
def __init__(self, parent, controller):
ttk.Frame.__init__(self, parent)
self.controller = controller
self.rowconfigure(0,weight=1)
self.columnconfigure(0,weight=1)
self.columnconfigure(1,weight=1)
self.commands = list() # initialize list of dictionaries to store user supplied commands
self.operations = list() # initialize list of dictionaries to store mission-operations (processed user-supplied commands)
# operations schedule frame
opschedule_frame = ttk.LabelFrame(self, text="Command", labelanchor='n')
opschedule_frame.grid(row=0, column=0, ipadx=20, ipady=20, sticky='nsew')
opschedule_frame.rowconfigure(0,weight=1)
opschedule_frame.rowconfigure(1,weight=6)
opschedule_frame.rowconfigure(2,weight=1)
opschedule_frame.columnconfigure(0,weight=1)
obssyn_frame = ttk.LabelFrame(opschedule_frame, text="Synthesize Observations", labelanchor='n')
obssyn_frame.grid(row=1, column=0, padx=20, pady=20)
progress_bar = ttk.Progressbar(opschedule_frame, orient='horizontal', length=300, mode='indeterminate')
progress_bar.grid(row=2, column=0, padx=20, pady=20, sticky='s')
# operations visualization frame
opvisz_frame = ttk.LabelFrame(self, text="Operations Visualization", labelanchor='n')
opvisz_frame.grid(row=0, column=1, rowspan=2, ipadx=20, ipady=20, sticky='nsew')
opvisz_frame.rowconfigure(0,weight=1)
opvisz_frame.columnconfigure(0,weight=1)
# define the widgets in the opschedule_frame
tk.Button(opschedule_frame, text="Upload Command File", wraplength=100, width=15, command=self.click_select_command_file).grid(row=0, column=0, pady=20)
# define the widgets in the opexec_frame
ttk.Button(obssyn_frame, text="Select", state='disabled').grid(row=0, column=0, padx=10, pady=10)
ttk.Button(obssyn_frame, text="Execute", command=lambda:self.click_synobsexec_btn(progress_bar)).grid(row=0, column=1, padx=10, pady=10)
# define widgets for the opvisz_frame
tabControl = ttk.Notebook(opvisz_frame)
tab1 = ttk.Frame(tabControl)
tab2 = ttk.Frame(tabControl)
tabControl.add(tab1, text='3D Globe')
tabControl.add(tab2, text='Synthetic Observations')
tabControl.pack(expand = True, fill ="both")
CesiumGlobeOperationsVisualizationFrame(opvisz_frame, tab1)
SyntheticObservationsVisualizationFrame(opvisz_frame, tab2)
def click_synobsexec_btn(self, progress_bar):
""" Synthesize the observations indicated in the operation file."""
def real_click_sat2satconexec_btn():
user_dir = config.out_config.get_user_dir()
with open(user_dir+"operations.json", 'r') as f:
operations = json.load(f)
# prepare synthetic data result dir
syn_data_dir = user_dir+'synthetic_data/'
config.out_config.update_syndatadir(syn_data_dir)
if os.path.exists(syn_data_dir):
shutil.rmtree(syn_data_dir)
os.makedirs(syn_data_dir)
progress_bar.start(10)
for oper in operations:
if(CommandType.get(oper['@type']) == CommandType.TAKEIMAGE):
cmd_id = oper["@id"]
logger.info('Observation corresponding to command id ' + str(cmd_id))
# get the satellite state file and state corresponding to the middle to the imaging interval
sat_id = oper["satelliteId"]
sat_state_fp = config.out_config.get_satellite_state_fp()[config.out_config.get_satellite_ids().index(sat_id)]
epoch_JDUT1 = pd.read_csv(sat_state_fp, skiprows = [0], nrows=1, header=None).astype(str) # 2nd row contains the epoch
epoch_JDUT1 = float(epoch_JDUT1[0][0].split()[2])
step_size = pd.read_csv(sat_state_fp, skiprows = [0,1], nrows=1, header=None).astype(str) # 3rd row contains the stepsize
step_size = float(step_size[0][0].split()[4])
imaging_time_index = int(oper["timeIndexStart"] + np.floor(0.5*(oper["timeIndexEnd"] - oper["timeIndexStart"])))
date_JDUt1 = epoch_JDUT1 + imaging_time_index*step_size/(24*3600)
sat_state_df = pd.read_csv(sat_state_fp,skiprows = [0,1,2,3])
sat_state_df.set_index('TimeIndex', inplace=True)
sat_state = sat_state_df.iloc[imaging_time_index]
state_eci = str(sat_state['X[km]']) + "," + str(sat_state['Y[km]']) + "," + str(sat_state['Z[km]']) + "," + str(sat_state['VX[km/s]']) + "," + str(sat_state['VY[km/s]']) + "," + str(sat_state['VZ[km/s]'])
# get the satellite and instrument orientations
sat_orien = "1,2,3," + oper['satelliteOrientation'] # 1,2,3 indicate the euler sequence
sen_orien = "1,2,3," + oper['instrumentOrientation']
# get the instrument fov,pixel configuration
instru = config.miss_specs.get_sensor_specs(oper["instrumentId"])
#print(sensor_specs)
#instru = Instrument.from_dict(sensor_specs)
[angleHeight, angleWidth] = instru.get_fov().get_ATCT_fov() #TODO: check that angleHeight==ATFOV
[heightDetectors, widthDetectors] = instru.get_pixel_config()
# compute the pixel position data (center, edges, poles)
logger.info("...start computation of pixel positions...")
pixel_pos_data = SensorFOVProjection.get_pixel_position_data(user_dir, date_JDUt1, state_eci, sat_orien, sen_orien, angleWidth, angleHeight, heightDetectors, widthDetectors)
logger.info("...stop computation of pixel positions...")
# compute the pixel polygons
pixels = PixelShapelyPolygon(pixel_pos_data)
logger.info("...start computation of pixel polygons...")
[pixel_poly, pixel_center_pos] = pixels.make_all_pixel_polygon()
logger.info("...stop computation of pixel polygons...")
# compute the synthetic data from the instrument
logger.info("...start interpolation ...")
[pixel_center_pos, interpl_var_data, env_var] = instru.synthesize_observation(time_JDUT1=date_JDUt1, pixel_center_pos=pixel_center_pos)
logger.info("...stop interpolation...")
# store the results (pixel position data, shapely polygons, interpolated data) inside a pickle object with the name same as the cmd_id
syn_data_dir = config.out_config.get_syndatadir()
syndata_fp = syn_data_dir+"syndata_cmd" + str(cmd_id)+".p"
with open(syndata_fp, "wb") as f:
pickle.dump(env_var, f)
pickle.dump(pixel_poly, f)
pickle.dump(pixel_center_pos, f)
pickle.dump(interpl_var_data, f)
config.out_config.update_syndata(cmd_id, syndata_fp)
# update the output.json file inside user-dir
ocf = user_dir + 'output.json'
with open(ocf, 'w', encoding='utf-8') as f:
json.dump(config.out_config.to_dict(), f, ensure_ascii=False, indent=4)
progress_bar.stop()
logger.info('Start synthesize observations.')
threading.Thread(target=real_click_sat2satconexec_btn).start()
logger.info('Finished synthesizing observations.')
def click_select_command_file(self):
# reinitialize operations
self.operations = list()
# read in command data
cmdfile_fp = None
cmdfile_fp = tkinter.filedialog.askopenfilename(initialdir=os.getcwd(), title="Please select the command file:", filetypes=(("All files","*.*"),("json files","*.json")))
if cmdfile_fp != '':
with open(cmdfile_fp) as f:
self.commands = json.load(f)
logger.info("Command file successfully is read.")
# build the operations.json file
#
# iterate through the command packets
for cmd in self.commands:
cmd_typ = CommandType.get(cmd['@type'])
if(cmd_typ==CommandType.TRANSMITDATA):
# TODO: validate that contact is indeed possible
self.operations.append(cmd)
elif(cmd_typ==CommandType.TAKEIMAGE):
# TODO: validate that observation is indeed possible
self.operations.append(cmd)
# write the operations.json file
user_dir = config.out_config.get_user_dir()
with open(user_dir+'operations.json', 'w', encoding='utf-8') as f:
json.dump(self.operations, f, ensure_ascii=False, indent=4)
logger.info("Operations updated.")
class CesiumGlobeOperationsVisualizationFrame:
def __init__(self, win, tab):
globe_visz_frame = ttk.Frame(tab)
globe_visz_frame.pack( expand=True)
self.http_server_started = False
tk.Button(globe_visz_frame, text="Launch \n (Cesium powered Globe visualization)", wraplength=150, width=20, command=self.click_launch_globe_visz).pack(padx=10, pady=10, ipadx=10, ipady=10, expand=True)
def click_launch_globe_visz(self):
user_dir = config.out_config.get_user_dir()
czml_template_dir = os.path.dirname(__file__) + "/../visualizeframe/"
[epoch, step_size, num_time_indices, czml_pkts] = VisGlobeFrame.build_czmlpkts_for_mission_background(user_dir, czml_template_dir)
_czml_pkts = CesiumGlobeOperationsVisualizationFrame.build_czmlpkts_for_operational_contacts(user_dir, czml_template_dir, epoch, step_size, num_time_indices)
czml_pkts.extend(_czml_pkts)
cesium_data_dir = os.path.dirname(__file__) + "/../../../cesium_app/Source/SampleData/"
self.execute_cesium_engine(cesium_data_dir, czml_pkts)
@staticmethod
def build_czmlpkts_for_operational_contacts(user_dir, czml_template_dir, epoch, step_size, num_time_indices):
with open(user_dir+"operations.json", 'r') as f:
operations = json.load(f)
czml_pkts = []
# observed position packet
with open(czml_template_dir+"observed_gp_template.json", 'r') as f:
oberv_pkt = json.load(f)
# ground-station, inter-satellite comm packets
with open(czml_template_dir+"contacts_template.json", 'r') as f:
contacts_pkt = json.load(f)
contacts_pkt[0]["id"] = str(uuid.uuid4()) # TODO: Not sure if this parent packet is required
czml_pkts.append(contacts_pkt[0])
# initialize communication between every pair of entities to "no-contact". Note that these packets have to appear strictly before the packets showing the contacts.
# between satellite and ground-station
sat_out = config.out_config.get_satout()
miss_time_from = epoch.isoformat() + 'Z' #TODO: check Z
miss_time_to = (epoch + datetime.timedelta(0,int(num_time_indices* step_size))).isoformat() + 'Z' #TODO: check Z
mission_interval = miss_time_from + "/" + miss_time_to
# satellite with ground-station
sat_with_gs_comm_ids = []
for _sat in sat_out:
sat_id = _sat["@id"]
if _sat.get("GroundStationComm", None) is not None:
for _gs in _sat["GroundStationComm"]:
groundstn_id = _gs["@id"]
_pkt = copy.deepcopy(contacts_pkt[1])
_pkt["id"] = str(sat_id) + "-with-" + str(groundstn_id)
sat_with_gs_comm_ids.append(_pkt["id"])
_pkt["name"] = _pkt["id"]
_pkt["polyline"]["show"] = {"interval":mission_interval, "boolean":False} # initialization of no contacts throughout the mission case
_pkt["polyline"]["positions"]["references"] = [str(sat_id)+"#position",str(groundstn_id)+"#position"]
czml_pkts.append(_pkt)
# between satellite and satellite
intersatcomm_ids = []
for j in range(0,len(sat_out)):
sat1_id = sat_out[j]["@id"]
for k in range(j+1,len(sat_out)):
sat2_id = sat_out[k]["@id"]
_pkt = copy.deepcopy(contacts_pkt[1])
_pkt["id"] = str(sat1_id) + "-with-" + str(sat2_id)
intersatcomm_ids.append(_pkt["id"]) # record the ids stored. the order is important to reference the subsequent packets
_pkt["name"] = _pkt["id"]
_pkt["polyline"]["show"] = {"interval":mission_interval, "boolean":False} # initialization of no contacts throughout the mission case
_pkt["polyline"]["positions"]["references"] = [str(sat1_id)+"#position",str(sat2_id)+"#position"]
czml_pkts.append(_pkt)
# iterate over each operation in the list of operations. If they correspond to ground-station communications or intersatellite communications,
# make the corresponding czml packet
for oper in operations:
if(CommandType.get(oper['@type']) == CommandType.TRANSMITDATA):
tx_entity_id = oper["txEntityId"]
rx_entity_id = oper["rxEntityId"]
time_from = (epoch + datetime.timedelta(0,int(oper['timeIndexStart'] * step_size))).isoformat() + 'Z' #TODO: check Z
time_to = (epoch + datetime.timedelta(0,int(oper['timeIndexEnd'] * step_size))).isoformat() + 'Z' #TODO: check Z
interval = time_from + "/" + time_to
contact = {"interval":interval, "boolean":True}
_pkt = copy.deepcopy(contacts_pkt[1])
if(MissionEntityType.get(oper["txEntityType"])==MissionEntityType.SATELLITE and MissionEntityType.get(oper["rxEntityType"])==MissionEntityType.SATELLITE):
if(str(tx_entity_id) + "-with-" + str(rx_entity_id) in intersatcomm_ids):
_pkt["id"] = str(tx_entity_id) + "-with-" + str(rx_entity_id)
_pkt["polyline"]["positions"]= [str(tx_entity_id)+"#position",str(rx_entity_id)+"#position"]
else:
_pkt["id"] = str(rx_entity_id) + "-with-" + str(tx_entity_id) # note the change in order
_pkt["polyline"]["positions"]= [str(rx_entity_id)+"#position",str(tx_entity_id)+"#position"]
else: # satellite with ground-station communications
if(str(tx_entity_id) + "-with-" + str(rx_entity_id) in sat_with_gs_comm_ids):
_pkt["id"] = str(tx_entity_id) + "-with-" + str(rx_entity_id)
_pkt["polyline"]["positions"]= [str(tx_entity_id)+"#position",str(rx_entity_id)+"#position"]
else:
_pkt["id"] = str(rx_entity_id) + "-with-" + str(tx_entity_id) # note the change in order
_pkt["polyline"]["positions"]= [str(rx_entity_id)+"#position",str(tx_entity_id)+"#position"]
_pkt["name"] = _pkt["id"]
_pkt["polyline"]["show"] = contact if bool(contact) else False # no (valid) contacts throughout the mission case
czml_pkts.append(_pkt)
elif(CommandType.get(oper['@type']) == CommandType.TAKEIMAGE):
offset = 0 # TODO: Need to Revise
time_from = (epoch + datetime.timedelta(0,offset+int(oper['timeIndexStart'] * step_size))).isoformat() + 'Z' #TODO: check Z
#time_to = (epoch + datetime.timedelta(0,offset+int(oper['timeIndexEnd'] * step_size))).isoformat() + 'Z' #TODO: check Z
time_to = miss_time_to# TODO: Undo this
interval = time_from + "/" + time_to
initialize_interval = {"interval":mission_interval, "boolean":False} # this is necessary, else the point is shown over entire mission interval
obs_interval = {"interval":interval, "boolean":True}
if(not isinstance(oper["observedPosition"]["cartographicDegrees"][0],list)):
oper["observedPosition"]["cartographicDegrees"] = [oper["observedPosition"]["cartographicDegrees"]]
k = 0
for obs_pos in oper["observedPosition"]["cartographicDegrees"]: # iterate over possibly multiple points seen over the same time-interval
_pkt = copy.deepcopy(oberv_pkt)
_pkt["id"] = "ObservedGroundPointSat" + str(oper["satelliteId"]) + "Instru" + str(oper["instrumentId"]) + str(time_from) + "_"+str(k) # only one czml packet per (sat, instru, time-start)
_pkt["point"]["show"] = [initialize_interval, obs_interval]
_pkt["position"]["cartographicDegrees"]= obs_pos
if(oper["observationValue"] <= 0.3333):
_pkt["point"]["color"] = {"rgba": [255,0,0,255]}
elif(oper["observationValue"] > 0.333 and oper["observationValue"] <= 0.66666):
_pkt["point"]["color"] = {"rgba": [255,255,0,255]}
elif(oper["observationValue"] > 0.66666):
_pkt["point"]["color"] = {"rgba": [0,255,17,255]}
czml_pkts.append(_pkt)
k = k + 1
return czml_pkts
def execute_cesium_engine(self, cesium_data_dir, czml_pkts):
# write the CZML data file
with open(cesium_data_dir+"eosim_data.json", 'w') as f: # TODO change the directory where the CZML file is stored
json.dump(czml_pkts, f, indent=4)
# rename file to czml extension
os.rename(cesium_data_dir+'eosim_data.json', cesium_data_dir+'eosim_data.czml')
# Execute the cesium app
def start_webserver():
if(self.http_server_started is False):
web_dir = os.path.join(os.path.dirname(__file__), '../../../cesium_app/')
os.chdir(web_dir)
self.httpd = HTTPServer(('localhost', 8080), SimpleHTTPRequestHandler)
self.http_server_started = True
self.httpd.serve_forever()
else:
pass
# start webserver
threading.Thread(target=start_webserver).start()
time.sleep(1) # allow enough time for the server to start
webbrowser.open('http://localhost:8080/', new=2) # open webbrowser
class SyntheticObservationsVisualizationFrame:
def __init__(self, win, tab):
synobsvis_frame = ttk.Frame(tab)
synobsvis_frame.pack(expand=True) # if grid was used and the child frames directly defined on 'tab',
# then the widgets within the child-frames cannot be aligned to the center for some unknown reason.
synobsvis_choose_image_frame = ttk.Frame(synobsvis_frame)
synobsvis_choose_image_frame.grid(row=0, column=0, sticky='nswe')
synobsvis_choose_image_frame.columnconfigure(0,weight=1)
synobsvis_choose_image_frame.rowconfigure(0,weight=1)
synobsvis_map_proj_frame = ttk.LabelFrame(synobsvis_frame, text='Set Map Projection', labelanchor='n')
synobsvis_map_proj_frame.grid(row=1, column=0, sticky='nswe', padx=10)
synobsvis_map_proj_frame.columnconfigure(0,weight=1)
synobsvis_map_proj_frame.rowconfigure(0,weight=1)
synobsvis_map_proj_frame.rowconfigure(1,weight=1)
synobsvis_map_proj_type_frame = ttk.Frame(synobsvis_map_proj_frame)
synobsvis_map_proj_type_frame.grid(row=0, column=0)
proj_specs_container = ttk.Frame(synobsvis_map_proj_frame)
proj_specs_container.grid(row=1, column=0, sticky='nswe')
proj_specs_container.columnconfigure(0,weight=1)
proj_specs_container.rowconfigure(0,weight=1)
proj_specs_container_frames = {}
for F in (Mercator, EquidistantConic, LambertConformal,Robinson,LambertAzimuthalEqualArea,Gnomonic):
page_name = F.__name__
self._prj_typ_frame = F(parent=proj_specs_container, controller=self)
proj_specs_container_frames[page_name] = self._prj_typ_frame
self._prj_typ_frame.grid(row=0, column=0, sticky="nsew")
self._prj_typ_frame = proj_specs_container_frames['Mercator'] # default projection type
self._prj_typ_frame.tkraise()
synobsvis_map_plot_frame = ttk.Frame(synobsvis_frame)
synobsvis_map_plot_frame.grid(row=2, column=0, sticky='nswe')
synobsvis_map_plot_frame.columnconfigure(0,weight=1)
synobsvis_map_plot_frame.columnconfigure(1,weight=1)
synobsvis_map_plot_frame.columnconfigure(2,weight=1)
synobsvis_map_plot_frame.rowconfigure(0,weight=1)
def updtcblist():
available_images= config.out_config.get_syndata() # get all available sats for which outputs are available
self.available_images_ids = [x['@id'] for x in available_images] if available_images is not None else None
self.select_img_combo_box['values'] = self.available_images_ids
self.select_img_combo_box.current(0)
self.select_img_combo_box = ttk.Combobox(synobsvis_choose_image_frame, postcommand = updtcblist)
self.select_img_combo_box.grid(row=0, column=0)
# projection
PROJ_TYPES = ['Mercator', 'EquidistantConic', 'LambertConformal', 'Robinson', 'LambertAzimuthalEqualArea', 'Gnomonic']
self._proj_type = tk.StringVar() # using self so that the variable is retained even after exit from the function
self._proj_type.set("Mercator") # initialize
def proj_type_combobox_change(event=None):
if self._proj_type.get() == "Mercator":
self._prj_typ_frame = proj_specs_container_frames['Mercator']
elif self._proj_type.get() == "EquidistantConic":
self._prj_typ_frame = proj_specs_container_frames['EquidistantConic']
elif self._proj_type.get() == "LambertConformal":
self._prj_typ_frame = proj_specs_container_frames['LambertConformal']
elif self._proj_type.get() == "Robinson":
self._prj_typ_frame = proj_specs_container_frames['Robinson']
elif self._proj_type.get() == "LambertAzimuthalEqualArea":
self._prj_typ_frame = proj_specs_container_frames['LambertAzimuthalEqualArea']
elif self._proj_type.get() == "Gnomonic":
self._prj_typ_frame = proj_specs_container_frames['Gnomonic']
self._prj_typ_frame.tkraise()
projtype_combo_box = ttk.Combobox(synobsvis_map_proj_type_frame,
values=PROJ_TYPES, textvariable = self._proj_type, width=25)
projtype_combo_box.current(0)
projtype_combo_box.grid(row=0, column=0)
projtype_combo_box.bind("<<ComboboxSelected>>", proj_type_combobox_change)
# plot frame
self.autocrop_var = tk.IntVar(value=1)
self.autocrop_chkbtn = ttk.Checkbutton(synobsvis_map_plot_frame, text="Auto crop", onvalue=1, offvalue=0, variable=self.autocrop_var).grid(row=0, column=0)
ttk.Button(synobsvis_map_plot_frame, text="Plot", command=self.click_plot_btn).grid(row=0, column=1, columnspan=2, padx=20)
def click_plot_btn(self):
"""
"""
# get the relevant data
syn_img_id = self.select_img_combo_box.get()
syn_img_fp = config.out_config.get_syndata_filepath(syn_img_id)
with open(syn_img_fp, 'rb') as f:
env_var = pickle.load(f)
pixel_poly = pickle.load(f)
pixel_center_pos = pickle.load(f)
interpl_var_data = pickle.load(f)
# make the plot
fig_win = tk.Toplevel()
fig = Figure(figsize=(6,6), dpi=100)
proj = self._prj_typ_frame.get_specs()
ax = fig.add_subplot(1,1,1,projection=proj)
ax.stock_img()
cmap=plt.cm.get_cmap("jet")
norm=plt.Normalize(min(interpl_var_data),max(interpl_var_data))
for k in range(0,len(pixel_poly)):
color=cmap(norm(interpl_var_data[k]))
ax.add_geometries((pixel_poly[k],), crs=cartopy.crs.PlateCarree(), facecolor=color)
ax.coastlines()
if(self.autocrop_var.get() == 1):
lon = []
lat = []
for _pix_p in pixel_center_pos:
lon.append(_pix_p['lon[deg]'])
lat.append(_pix_p['lat[deg]'])
# limite the plotted geographical area
max_lon = max(lon) + 5
min_lon = min(lon) - 5
max_lat = max(lat) + 5
min_lat = min(lat) - 5
ax.set_extent([min_lon, max_lon, min_lat, max_lat], crs=ccrs.PlateCarree())
#colorbar
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array(interpl_var_data)
cb = fig.colorbar(sm, ax=ax)
cb.set_label(str(env_var))
canvas = FigureCanvasTkAgg(fig, master=fig_win) # A tk.DrawingArea.
canvas.draw()
canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
toolbar = NavigationToolbar2Tk(canvas, fig_win)
toolbar.update()
canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
|
object_storage_bulk_delete.py | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
##########################################################################
# object_storage_bulk_delete.py
#
# @author: Adi Zohar
#
# Supports Python 3
#
# DISCLAIMER – This is not an official Oracle application, It does not supported by Oracle Support, It should NOT be used for utilization calculation purposes
##########################################################################
# Info:
# Bulk delete with parallel threads
#
##########################################################################
# Application Command line parameters
#
# -c config - Config file section to use (tenancy profile)
# -t profile - Profile in config file, DEFAULT as default
# -p proxy - Set Proxy (i.e. www-proxy-server.com:80)
# -ip - Use Instance Principals for Authentication
# -dt - Use Instance Principals with delegation token for cloud shell
# -sb source_bucket
# -sp source_prefix
# -sr source_region
# -sn source_namespace
##########################################################################
import threading
import time
import queue
import oci
import argparse
import datetime
import sys
import click
import os
##########################################################################
# Pre Main
##########################################################################
# Get Command Line Parser
parser = argparse.ArgumentParser()
parser.add_argument('-t', default="", dest='config_profile', help='Config file section to use (tenancy profile)')
parser.add_argument('-p', default="", dest='proxy', help='Set Proxy (i.e. www-proxy-server.com:80) ')
parser.add_argument('-ip', action='store_true', default=False, dest='is_instance_principals', help='Use Instance Principals for Authentication')
parser.add_argument('-dt', action='store_true', default=False, dest='is_delegation_token', help='Use Delegation Token for Authentication')
parser.add_argument('-c', default="", dest='config_file', help="Config File (default=~/.oci/config)")
parser.add_argument('-sb', default="", dest='source_bucket', help='Source Bucket Name')
parser.add_argument('-sp', default="", dest='source_prefix', help='Source Prefix Include')
parser.add_argument('-se', default="", dest='source_prefix_exclude', help='Source Prefix Exclude')
parser.add_argument('-exclude_dirs', action='store_true', default=False, dest='source_exclude_dirs', help='Exclude Directories')
parser.add_argument('-sn', default="", dest='source_namespace', help='Source Namespace (Default current connection)')
parser.add_argument('-sr', default="", dest='source_region', help='Source Region')
cmd = parser.parse_args()
if len(sys.argv) < 1:
parser.print_help()
raise SystemExit
if not cmd.source_bucket:
print("Source bucket parameter is required !!!\n")
parser.print_help()
raise SystemExit
source_bucket = cmd.source_bucket
source_prefix = cmd.source_prefix
# Parameters
worker_count = 40
status_interval = 60
base_retry_timeout = 2
max_retry_timeout = 16**2
# global queue
q = queue.Queue()
# Global Variables
object_storage_client = None
source_namespace = ""
source_bucket = cmd.source_bucket
source_prefix = cmd.source_prefix
source_namespace = cmd.source_namespace
source_prefix_exclude = cmd.source_prefix_exclude
source_region = cmd.source_region
source_exclude_dirs = cmd.source_exclude_dirs
# Update Variables based on the parameters
config_file = (cmd.config_file if cmd.config_file else oci.config.DEFAULT_LOCATION)
config_profile = (cmd.config_profile if cmd.config_profile else oci.config.DEFAULT_PROFILE)
##########################################################################
# Create signer for Authentication
# Input - config_file, config_profile and is_instance_principals and is_delegation_token
# Output - config and signer objects
##########################################################################
def create_signer(config_file, config_profile, is_instance_principals, is_delegation_token):
# if instance principals authentications
if is_instance_principals:
try:
signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
config = {'region': signer.region, 'tenancy': signer.tenancy_id}
return config, signer
except Exception:
print_header("Error obtaining instance principals certificate, aborting")
raise SystemExit
# -----------------------------
# Delegation Token
# -----------------------------
elif is_delegation_token:
try:
# check if env variables OCI_CONFIG_FILE, OCI_CONFIG_PROFILE exist and use them
env_config_file = os.environ.get('OCI_CONFIG_FILE')
env_config_section = os.environ.get('OCI_CONFIG_PROFILE')
# check if file exist
if env_config_file is None or env_config_section is None:
print("*** OCI_CONFIG_FILE and OCI_CONFIG_PROFILE env variables not found, abort. ***")
print("")
raise SystemExit
config = oci.config.from_file(env_config_file, env_config_section)
delegation_token_location = config["delegation_token_file"]
with open(delegation_token_location, 'r') as delegation_token_file:
delegation_token = delegation_token_file.read().strip()
# get signer from delegation token
signer = oci.auth.signers.InstancePrincipalsDelegationTokenSigner(delegation_token=delegation_token)
return config, signer
except KeyError:
print("* Key Error obtaining delegation_token_file")
raise SystemExit
except Exception:
raise
# -----------------------------
# config file authentication
# -----------------------------
else:
config = oci.config.from_file(
(config_file if config_file else oci.config.DEFAULT_LOCATION),
(config_profile if config_profile else oci.config.DEFAULT_PROFILE)
)
signer = oci.signer.Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=oci.config.get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
return config, signer
##############################################################################
# get time
##############################################################################
def get_time(full=False):
if full:
return str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
else:
return str(datetime.datetime.now().strftime("%H:%M:%S"))
##########################################################################
# Print header centered
##########################################################################
def print_header(name):
chars = int(90)
print("")
print('#' * chars)
print("#" + name.center(chars - 2, " ") + "#")
print('#' * chars)
##########################################################################
# Print Info
##########################################################################
def print_command_info():
print_header("Running Object Storage Bulk Delete")
print("Written by Adi Zohar, July 2020")
print("Starts at : " + get_time(full=True))
print("Command Line : " + ' '.join(x for x in sys.argv[1:]))
print("Source Namespace : " + source_namespace)
print("Source Bucket : " + source_bucket)
print("Source Prefix Include : " + source_prefix)
print("Source Prefix Exclude : " + source_prefix_exclude)
print("Source Region : " + source_region)
if source_exclude_dirs:
print("Source Exclude Dirs : True")
##############################################################################
# Worker
##############################################################################
def worker():
while True:
object_ = q.get()
interval_exp = base_retry_timeout
while True:
response = None
try:
response = object_storage_client.delete_object(source_namespace, source_bucket, object_)
break
except Exception as e:
if e.status == 400:
break
if interval_exp > max_retry_timeout:
print(" ERROR: Failed to request delete of %s" % (object_))
raise
if response:
print(" Received %s from API for object %s, will wait %s seconds before retrying." % (response.status, object_, interval_exp))
else:
print(" Received error from API for object %s, will wait %s seconds before retrying." % (object_, interval_exp))
time.sleep(interval_exp)
interval_exp **= 2
continue
q.task_done()
##############################################################################
# Add object to Q
##############################################################################
def add_objects_to_queue(ns, source_bucket):
global q
count = 0
next_starts_with = None
while True:
response = object_storage_client.list_objects(ns, source_bucket, start=next_starts_with, prefix=source_prefix, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
next_starts_with = response.data.next_start_with
for object_ in response.data.objects:
if source_prefix_exclude and object_.name.startswith(source_prefix_exclude):
continue
if source_exclude_dirs and "/" in object_.name:
continue
q.put(object_.name)
count += 1
if count % 100000 == 0:
print(get_time() + " - Added " + str(count) + " files to queue...")
if not next_starts_with:
break
return count
##############################################################################
# connect to object storage
##############################################################################
def connect_to_object_storage():
global source_namespace
global object_storage_client
global source_region
# get signer
config, signer = create_signer(cmd.config_file, cmd.config_profile, cmd.is_instance_principals, cmd.is_delegation_token)
# if region is specified
if source_region:
config['region'] = source_region
else:
source_region = config['region']
try:
# connect and fetch namespace
print("\nConnecting to Object Storage Service...")
object_storage_client = oci.object_storage.ObjectStorageClient(config, signer=signer)
if cmd.proxy:
object_storage_client.base_client.session.proxies = {'https': cmd.proxy}
# retrieve namespace from object storage
if not source_namespace:
source_namespace = object_storage_client.get_namespace(retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
print("Succeed.")
except Exception as e:
print("\nError connecting to Object Storage - " + str(e))
raise SystemExit
##############################################################################
# Main
##############################################################################
def main():
# connect to object storage
connect_to_object_storage()
# command info
print_command_info()
if not click.confirm('\nAre you sure you want to continue deleting ?'):
raise SystemExit
print_header("Start Processing")
print(get_time() + " - Creating %s workers." % (worker_count))
for i in range(worker_count):
w = threading.Thread(target=worker)
w.daemon = True
w.start()
print(get_time() + " - Getting list of objects from source source_bucket (%s). delete will start immediately." % (source_bucket))
count = add_objects_to_queue(source_namespace, source_bucket)
print(get_time() + " - Enqueued %s objects to be deleted" % (count))
while count > 0:
print(get_time() + " - Waiting %s seconds before checking status." % (status_interval))
time.sleep(status_interval)
if q.qsize() == 0:
print(get_time() + " - deletion of all objects has been requested.")
break
else:
print(get_time() + " - %s object deletes remaining to requested." % (q.qsize()))
q.join()
print_header("Completed")
print("Completed at : " + get_time(True))
##############################################################################
# Execute
##############################################################################
if __name__ == '__main__':
main()
|
test_sys.py | import unittest, test.support
from test.support.script_helper import assert_python_ok, assert_python_failure
import sys, io, os
import struct
import subprocess
import textwrap
import warnings
import operator
import codecs
import gc
import sysconfig
import platform
import locale
# count the number of test runs, used to create unique
# strings to intern in test_intern()
numruns = 0
try:
import threading
except ImportError:
threading = None
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
test.support.reap_children()
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (10, 25, 50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
self.assertRaises(RecursionError, f)
self.assertRaises(RecursionError, f)
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than the "lower-water
# mark". Otherwise, it may not be possible anymore to
# reset the overflowed flag to 0.
from _testcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
# formula extracted from _Py_RecursionLimitLowerWaterMark()
if limit > 200:
depth = limit - 50
else:
depth = limit * 3 // 4
set_recursion_limit_at_depth(depth, limit)
finally:
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RecursionError:
f()
sys.setrecursionlimit(%d)
f()""")
with test.support.SuppressCrashReport():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b"Fatal Python error: Cannot recover from stack overflow",
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.support.reap_threads
def current_frames_with_threads(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
@unittest.skipUnless(hasattr(sys, 'thread_info'),
'Threading required for this test.')
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet", "hash_randomization", "isolated")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type()
with self.assertRaises(TypeError):
attr_type.__new__(attr_type)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(test.support.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % test.support.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(test.support.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = "C"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def test_c_locale_surrogateescape(self):
out = self.c_locale_get_error_handler(isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
# Some sanity checks
with_pymalloc = sysconfig.get_config_var('WITH_PYMALLOC')
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
@test.support.requires_type_collecting
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('4P')) # XXX check layout
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
check(get_cell().__code__, size('6i13P'))
check(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check(get_cell2.__code__, size('6i13P') + 1)
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# dict
check({}, size('nQ2P') + calcsize('2nP2n') + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize('2nP2n') + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('12P3ic' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('12P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pb2PPP'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2n15Pl4Pn9Pn11PIP'
if hasattr(sys, 'getcounts'):
fmt += '3n2P'
s = vsize(fmt)
check(int, s)
s = vsize(fmt + # PyTypeObject
'3P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'4P')
# Separate block for PyDictKeysObject with 8 keys and 5 entries
s += calcsize("2nP2n") + 8 + 5*calcsize("n2P")
# class
class newstyleclass(object): pass
check(newstyleclass, s)
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P' + '2nP2n'))
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
def test_main():
test.support.run_unittest(SysModuleTest, SizeofTest)
if __name__ == "__main__":
test_main()
|
wallet_multiwallet.py | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a bitcoind node can load multiple wallet files
"""
from decimal import Decimal
from threading import Thread
import os
import shutil
import stat
import time
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
while True:
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
self.extra_args = [["-nowallet"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if name == self.default_wallet_name:
return wallet_dir(self.default_wallet_name, self.wallet_data_filename)
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), {'wallets': [{'name': self.default_wallet_name}]})
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
os.symlink('..', wallet_dir('recursive_dir_symlink'))
os.mkdir(wallet_dir('self_walletdat_symlink'))
os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
# create another dummy wallet for use in testing backups later
self.start_node(0)
node.createwallet("empty")
node.createwallet("plain")
node.createwallet("created")
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_file("empty"), empty_wallet)
shutil.rmtree(wallet_dir("empty"))
empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat')
os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet)
shutil.rmtree(wallet_dir("created"))
os.rename(wallet_file("plain"), wallet_dir("w8"))
shutil.rmtree(wallet_dir("plain"))
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly. Not tested for SQLite wallets as this is a deprecated BDB behavior.
# '' - to verify default wallet file is created correctly
to_create = ['w1', 'w2', 'w3', 'w', 'sub/w5', 'w7_symlink']
in_wallet_dir = [w.replace('/', os.path.sep) for w in to_create] # Wallets in the wallet dir
in_wallet_dir.append('w7') # w7 is not loaded or created, but will be listed by listwalletdir because w7_symlink
to_create.append(os.path.join(self.options.tmpdir, 'extern/w6')) # External, not in the wallet dir, so we need to avoid adding it to in_wallet_dir
to_load = [self.default_wallet_name]
if not self.options.descriptors:
to_load.append('w8')
wallet_names = to_create + to_load # Wallet names loaded in the wallet
in_wallet_dir += to_load # The loaded wallets are also in the wallet dir
self.start_node(0)
for wallet_name in to_create:
self.nodes[0].createwallet(wallet_name)
for wallet_name in to_load:
self.nodes[0].loadwallet(wallet_name)
os.mkdir(wallet_dir('no_access'))
os.chmod(wallet_dir('no_access'), 0)
try:
with self.nodes[0].assert_debug_log(expected_msgs=['Error scanning']):
walletlist = self.nodes[0].listwalletdir()['wallets']
finally:
# Need to ensure access is restored for cleanup
os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir))
assert_equal(set(node.listwallets()), set(wallet_names))
# should raise rpc error if wallet path can't be created
err_code = -4 if self.options.descriptors else -1
assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad")
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
self.start_node(0, ['-wallet=w1', '-wallet=w1'])
self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')
if not self.options.descriptors:
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
in_wallet_dir.append('w8_copy')
exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0)
self.nodes[0].createwallet("w4")
self.nodes[0].createwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-nowallet', '-walletdir=' + data_dir()])
self.nodes[0].loadwallet("w4")
self.nodes[0].loadwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-nowallet', '-walletdir=' + competing_wallet_dir])
self.nodes[0].createwallet(self.default_wallet_name)
if self.options.descriptors:
exp_stderr = r"Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another bitcoind?"
else:
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0)
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generatetoaddress(nblocks=101, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(0.001)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2]))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat")
if self.options.descriptors:
assert_raises_rpc_error(-4, "Wallet file verification failed. SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another bitcoind?", self.nodes[0].loadwallet, wallet_names[0])
else:
assert_raises_rpc_error(-35, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0])
# This tests the default wallet that BDB makes, so SQLite wallet doesn't need to test this
# Fail to load duplicate wallets by different ways (directory and filepath)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat")
assert_raises_rpc_error(-35, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat')
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(path), self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2")
assert_raises_rpc_error(-4, "Failed to create database path '{}'. Database already exists.".format(path), self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
in_wallet_dir.append('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "RPC endpoint wallet and wallet_name parameter specify different wallets", w1.unloadwallet, "w2"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Unload w1 again, this time providing the wallet name twice
self.nodes[0].loadwallet("w1")
assert 'w1' in self.nodes[0].listwallets()
w1.unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
if os.path.exists(backup):
os.unlink(backup)
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
if self.options.descriptors:
assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet)
else:
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
caching_test.py | # Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""st.caching unit tests."""
from unittest.mock import patch
import threading
import unittest
import pytest
import types
from streamlit import caching
from streamlit import hashing
from streamlit.hashing import UserHashError
from streamlit.elements import exception_proto
from streamlit.proto.Exception_pb2 import Exception as ExceptionProto
from tests import testutil
import streamlit as st
class CacheTest(testutil.DeltaGeneratorTestCase):
def tearDown(self):
# Some of these tests reach directly into _cache_info and twiddle it.
# Reset default values on teardown.
st.caching._cache_info.cached_func_stack = []
st.caching._cache_info.suppress_st_function_warning = 0
def test_simple(self):
@st.cache
def foo():
return 42
self.assertEqual(foo(), 42)
self.assertEqual(foo(), 42)
def test_multiple_int_like_floats(self):
@st.cache
def foo(x):
return x
self.assertEqual(foo(1.0), 1.0)
self.assertEqual(foo(3.0), 3.0)
@patch.object(st, "exception")
def test_args(self, exception):
called = [False]
@st.cache
def f(x):
called[0] = True
return x
self.assertFalse(called[0])
f(0)
self.assertTrue(called[0])
called = [False] # Reset called
f(0)
self.assertFalse(called[0])
f(1)
self.assertTrue(called[0])
exception.assert_not_called()
@patch.object(st, "exception")
def test_mutate_return(self, exception):
@st.cache
def f():
return [0, 1]
r = f()
r[0] = 1
exception.assert_not_called()
r2 = f()
exception.assert_called()
self.assertEqual(r, r2)
@patch.object(st, "exception")
def test_mutate_args(self, exception):
@st.cache
def foo(d):
d["answer"] += 1
return d["answer"]
d = {"answer": 0}
self.assertNotEqual(foo(d), foo(d))
exception.assert_not_called()
@patch("streamlit.caching._show_cached_st_function_warning")
def test_cached_st_function_warning(self, warning):
st.text("foo")
warning.assert_not_called()
@st.cache
def cached_func():
st.text("Inside cached func")
cached_func()
warning.assert_called_once()
warning.reset_mock()
# Make sure everything got reset properly
st.text("foo")
warning.assert_not_called()
# Test warning suppression
@st.cache(suppress_st_warning=True)
def suppressed_cached_func():
st.text("No warnings here!")
suppressed_cached_func()
warning.assert_not_called()
# Test nested st.cache functions
@st.cache
def outer():
@st.cache
def inner():
st.text("Inside nested cached func")
return inner()
outer()
warning.assert_called_once()
warning.reset_mock()
# Test st.cache functions that raise errors
with self.assertRaises(RuntimeError):
@st.cache
def cached_raise_error():
st.text("About to throw")
raise RuntimeError("avast!")
cached_raise_error()
warning.assert_called_once()
warning.reset_mock()
# Make sure everything got reset properly
st.text("foo")
warning.assert_not_called()
# Test st.cache functions with widgets
@st.cache
def cached_widget():
st.button("Press me!")
cached_widget()
warning.assert_called_once()
warning.reset_mock()
# Make sure everything got reset properly
st.text("foo")
warning.assert_not_called()
def test_multithread_stack(self):
"""Test that cached_func_stack behaves properly in multiple threads."""
def get_counter():
return len(caching._cache_info.cached_func_stack)
def set_counter(val):
caching._cache_info.cached_func_stack = ["foo"] * val
self.assertEqual(0, get_counter())
set_counter(1)
self.assertEqual(1, get_counter())
values_in_thread = []
def thread_test():
values_in_thread.append(get_counter())
set_counter(55)
values_in_thread.append(get_counter())
thread = threading.Thread(target=thread_test)
thread.start()
thread.join()
self.assertEqual([0, 55], values_in_thread)
# The other thread should not have modified the main thread
self.assertEqual(1, get_counter())
def test_max_size(self):
"""The oldest object should be evicted when maxsize is reached."""
# Create 2 cached functions to test that they don't interfere
# with each other.
foo_vals = []
@st.cache(max_entries=2)
def foo(x):
foo_vals.append(x)
return x
bar_vals = []
@st.cache(max_entries=3)
def bar(x):
bar_vals.append(x)
return x
self.assertEqual([], foo_vals)
self.assertEqual([], bar_vals)
# Stick two items in both caches. foo will be filled.
foo(0), foo(1)
bar(0), bar(1)
self.assertEqual([0, 1], foo_vals)
self.assertEqual([0, 1], bar_vals)
# 0, 1 are already cached, so called_values shouldn't change.
foo(0), foo(1)
bar(0), bar(1)
self.assertEqual([0, 1], foo_vals)
self.assertEqual([0, 1], bar_vals)
# Add a new item to the cache.
# foo: 0 should be evicted; 1 and 2 should be present.
# bar: 0, 1, 2 present.
foo(2)
bar(2)
# foo(0) again should cause 0 to be added again, since it was
# previously evicted. Nothing will have been evicted from bar.
foo(1), foo(0)
bar(1), bar(0)
self.assertEqual([0, 1, 2, 0], foo_vals)
self.assertEqual([0, 1, 2], bar_vals)
# Reduce the huge amount of logspam we get from hashing/caching
@patch("streamlit.hashing._LOGGER.debug")
@patch("streamlit.caching._LOGGER.debug")
def test_no_max_size(self, _1, _2):
"""If max_size is None, the cache is unbounded."""
called_values = []
@st.cache(max_entries=None)
def f(x):
called_values.append(x)
return x
# Stick a bunch of items in the cache.
for ii in range(256):
f(ii)
# Clear called_values, and test that accessing the same bunch of
# items doesn't result in f() being called.
called_values = []
for ii in range(256):
f(ii)
self.assertEqual([], called_values)
@patch("streamlit.caching._TTLCACHE_TIMER")
def test_ttl(self, timer_patch):
"""Entries should expire after the given ttl."""
# Create 2 cached functions to test that they don't interfere
# with each other.
foo_vals = []
@st.cache(ttl=1)
def foo(x):
foo_vals.append(x)
return x
bar_vals = []
@st.cache(ttl=5)
def bar(x):
bar_vals.append(x)
return x
# Store a value at time 0
timer_patch.return_value = 0
foo(0)
bar(0)
self.assertEqual([0], foo_vals)
self.assertEqual([0], bar_vals)
# Advance our timer, but not enough to expire our value.
timer_patch.return_value = 0.5
foo(0)
bar(0)
self.assertEqual([0], foo_vals)
self.assertEqual([0], bar_vals)
# Advance our timer enough to expire foo, but not bar.
timer_patch.return_value = 1.5
foo(0)
bar(0)
self.assertEqual([0, 0], foo_vals)
self.assertEqual([0], bar_vals)
def test_clear_cache(self):
"""Clear cache should do its thing."""
foo_vals = []
@st.cache
def foo(x):
foo_vals.append(x)
return x
bar_vals = []
@st.cache
def bar(x):
bar_vals.append(x)
return x
foo(0), foo(1), foo(2)
bar(0), bar(1), bar(2)
self.assertEqual([0, 1, 2], foo_vals)
self.assertEqual([0, 1, 2], bar_vals)
# Clear the cache and access our original values again. They
# should be recomputed.
caching.clear_cache()
foo(0), foo(1), foo(2)
bar(0), bar(1), bar(2)
self.assertEqual([0, 1, 2, 0, 1, 2], foo_vals)
self.assertEqual([0, 1, 2, 0, 1, 2], bar_vals)
# Temporarily turn off these tests since there's no Cache object in __init__
# right now.
class CachingObjectTest(unittest.TestCase):
def off_test_simple(self):
val = 42
for _ in range(2):
c = st.Cache()
if c:
c.value = val
self.assertEqual(c.value, val)
def off_test_allow_output_mutation(self):
val = 42
for _ in range(2):
c = st.Cache(allow_output_mutation=True)
if c:
c.value = val
self.assertEqual(c.value, val)
def off_test_has_changes(self):
val = 42
for _ in range(2):
c = st.Cache()
if c.has_changes():
c.value = val
self.assertEqual(c.value, val)
@patch.object(st, "exception")
def off_test_mutate(self, exception):
for _ in range(2):
c = st.Cache()
if c:
c.value = [0, 1]
c.value[0] = 1
exception.assert_called()
class CacheErrorsTest(testutil.DeltaGeneratorTestCase):
"""Make sure user-visible error messages look correct.
These errors are a little annoying to test, but they're important! So we
are testing them word-for-word as much as possible. Even though this
*feels* like an antipattern, it isn't: we're making sure the codepaths
that pull useful debug info from the code are working.
"""
def test_st_warning_text(self):
@st.cache
def st_warning_text_func():
st.markdown("hi")
st_warning_text_func()
el = self.get_delta_from_queue(-2).new_element
self.assertEqual(el.exception.type, "CachedStFunctionWarning")
self.assertEqual(
normalize_md(el.exception.message),
normalize_md(
"""
Your script uses `st.markdown()` or `st.write()` to write to your Streamlit app
from within some cached code at `st_warning_text_func()`. This code will only be
called when we detect a cache "miss", which can lead to unexpected results.
How to fix this:
* Move the `st.markdown()` or `st.write()` call outside `st_warning_text_func()`.
* Or, if you know what you're doing, use `@st.cache(suppress_st_warning=True)`
to suppress the warning.
"""
),
)
self.assertNotEqual(len(el.exception.stack_trace), 0)
self.assertEqual(el.exception.message_is_markdown, True)
self.assertEqual(el.exception.is_warning, True)
el = self.get_delta_from_queue(-1).new_element
self.assertEqual(el.markdown.body, "hi")
def test_mutation_warning_text(self):
@st.cache
def mutation_warning_func():
return []
a = mutation_warning_func()
a.append("mutated!")
mutation_warning_func()
el = self.get_delta_from_queue(-1).new_element
self.assertEqual(el.exception.type, "CachedObjectMutationWarning")
self.assertEqual(
normalize_md(el.exception.message),
normalize_md(
"""
Return value of `mutation_warning_func()` was mutated between runs.
By default, Streamlit\'s cache should be treated as immutable, or it may behave
in unexpected ways. You received this warning because Streamlit detected that
an object returned by `mutation_warning_func()` was mutated outside of
`mutation_warning_func()`.
How to fix this:
* If you did not mean to mutate that return value:
- If possible, inspect your code to find and remove that mutation.
- Otherwise, you could also clone the returned value so you can freely
mutate it.
* If you actually meant to mutate the return value and know the consequences of
doing so, annotate the function with `@st.cache(allow_output_mutation=True)`.
For more information and detailed solutions check out [our
documentation.](https://docs.streamlit.io/en/latest/caching.html)
"""
),
)
self.assertNotEqual(len(el.exception.stack_trace), 0)
self.assertEqual(el.exception.message_is_markdown, True)
self.assertEqual(el.exception.is_warning, True)
def test_unhashable_type(self):
@st.cache
def unhashable_type_func():
return threading.Lock()
with self.assertRaises(hashing.UnhashableTypeError) as cm:
unhashable_type_func()
ep = ExceptionProto()
exception_proto.marshall(ep, cm.exception)
self.assertEqual(ep.type, "UnhashableTypeError")
self.assertTrue(
normalize_md(ep.message).startswith(
normalize_md(
"""
Cannot hash object of type `_thread.lock`, found in the return value of
`unhashable_type_func()`.
While caching the return value of `unhashable_type_func()`, Streamlit
encountered an object of type `_thread.lock`, which it does not know how to
hash.
To address this, please try helping Streamlit understand how to hash that type
by passing the `hash_funcs` argument into `@st.cache`. For example:
```
@st.cache(hash_funcs={_thread.lock: my_hash_func})
def my_func(...):
...
```
If you don't know where the object of type `_thread.lock` is coming
from, try looking at the hash chain below for an object that you do recognize,
then pass that to `hash_funcs` instead:
```
Object of type _thread.lock:
"""
)
)
)
# Stack trace doesn't show in test :(
# self.assertNotEqual(len(ep.stack_trace), 0)
self.assertEqual(ep.message_is_markdown, True)
self.assertEqual(ep.is_warning, False)
def test_hash_funcs_acceptable_keys(self):
@st.cache
def unhashable_type_func():
return (x for x in range(1))
@st.cache(hash_funcs={types.GeneratorType: id})
def hf_key_as_type():
return (x for x in range(1))
@st.cache(hash_funcs={"builtins.generator": id})
def hf_key_as_str():
return (x for x in range(1))
with self.assertRaises(hashing.UnhashableTypeError) as cm:
unhashable_type_func()
self.assertEqual(list(hf_key_as_type()), list(hf_key_as_str()))
def test_user_hash_error(self):
class MyObj(object):
pass
def bad_hash_func(x):
x += 10 # Throws a TypeError since x has type MyObj.
return x
@st.cache(hash_funcs={MyObj: bad_hash_func})
def user_hash_error_func(x):
pass
with self.assertRaises(hashing.UserHashError) as cm:
my_obj = MyObj()
user_hash_error_func(my_obj)
ep = ExceptionProto()
exception_proto.marshall(ep, cm.exception)
self.assertEqual(ep.type, "TypeError")
self.assertTrue(
normalize_md(ep.message).startswith(
normalize_md(
"""
unsupported operand type(s) for +=: 'MyObj' and 'int'
This error is likely due to a bug in `bad_hash_func()`, which is a
user-defined hash function that was passed into the `@st.cache` decorator of
`user_hash_error_func()`.
`bad_hash_func()` failed when hashing an object of type
`caching_test.CacheErrorsTest.test_user_hash_error.<locals>.MyObj`. If you
don't know where that object is coming from, try looking at the hash chain
below for an object that you do recognize, then pass that to `hash_funcs` instead:
```
Object of type caching_test.CacheErrorsTest.test_user_hash_error.<locals>.MyObj:
<caching_test.CacheErrorsTest.test_user_hash_error.<locals>.MyObj object at
"""
)
)
)
# Stack trace doesn't show in test :(
# self.assertNotEqual(len(ep.stack_trace), 0)
self.assertEqual(ep.message_is_markdown, True)
self.assertEqual(ep.is_warning, False)
def normalize_md(txt):
"""Replace newlines *inside paragraphs* with spaces.
Consecutive lines of text are considered part of the same paragraph
in Markdown. So this function joins those into a single line to make the
test robust to changes in text wrapping.
NOTE: This function doesn't attempt to be 100% grammatically correct
Markdown! It's just supposed to be "correct enough" for tests to pass. For
example, when we guard "\n\n" from being converted, we really should be
guarding for RegEx("\n\n+") instead. But that doesn't matter for our tests.
"""
# Two newlines in a row should NOT be replaced with a space.
txt = txt.replace("\n\n", "OMG_NEWLINE")
# Lists should NOT be replaced with a space.
txt = txt.replace("\n*", "OMG_STAR")
txt = txt.replace("\n-", "OMG_HYPHEN")
# Links broken over two lines should not get an extra space.
txt = txt.replace("]\n(", "OMG_LINK")
# Convert all remaining newlines into spaces.
txt = txt.replace("\n", " ")
# Restore everything else.
txt = txt.replace("OMG_NEWLINE", "\n\n")
txt = txt.replace("OMG_STAR", "\n*")
txt = txt.replace("OMG_HYPHEN", "\n-")
txt = txt.replace("OMG_LINK", "](")
return txt.strip()
|
chrome_test_server_spawner.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A "Test Server Spawner" that handles killing/stopping per-test test servers.
It's used to accept requests from the device to spawn and kill instances of the
chrome test server on the host.
"""
# pylint: disable=W0702
import BaseHTTPServer
import json
import logging
import os
import select
import struct
import subprocess
import sys
import threading
import time
import urlparse
from devil.android import forwarder
from devil.android import ports
from pylib import constants
from pylib.constants import host_paths
# Path that are needed to import necessary modules when launching a testserver.
os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '') + (':%s:%s:%s:%s:%s'
% (os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party'),
os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party', 'tlslite'),
os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party', 'pyftpdlib',
'src'),
os.path.join(host_paths.DIR_SOURCE_ROOT, 'net', 'tools', 'testserver'),
os.path.join(host_paths.DIR_SOURCE_ROOT, 'components', 'sync', 'tools',
'testserver')))
SERVER_TYPES = {
'http': '',
'ftp': '-f',
'sync': '', # Sync uses its own script, and doesn't take a server type arg.
'tcpecho': '--tcp-echo',
'udpecho': '--udp-echo',
}
# The timeout (in seconds) of starting up the Python test server.
TEST_SERVER_STARTUP_TIMEOUT = 10
def _WaitUntil(predicate, max_attempts=5):
"""Blocks until the provided predicate (function) is true.
Returns:
Whether the provided predicate was satisfied once (before the timeout).
"""
sleep_time_sec = 0.025
for _ in xrange(1, max_attempts):
if predicate():
return True
time.sleep(sleep_time_sec)
sleep_time_sec = min(1, sleep_time_sec * 2) # Don't wait more than 1 sec.
return False
def _CheckPortAvailable(port):
"""Returns True if |port| is available."""
return _WaitUntil(lambda: ports.IsHostPortAvailable(port))
def _CheckPortNotAvailable(port):
"""Returns True if |port| is not available."""
return _WaitUntil(lambda: not ports.IsHostPortAvailable(port))
def _CheckDevicePortStatus(device, port):
"""Returns whether the provided port is used."""
return _WaitUntil(lambda: ports.IsDevicePortUsed(device, port))
def _GetServerTypeCommandLine(server_type):
"""Returns the command-line by the given server type.
Args:
server_type: the server type to be used (e.g. 'http').
Returns:
A string containing the command-line argument.
"""
if server_type not in SERVER_TYPES:
raise NotImplementedError('Unknown server type: %s' % server_type)
if server_type == 'udpecho':
raise Exception('Please do not run UDP echo tests because we do not have '
'a UDP forwarder tool.')
return SERVER_TYPES[server_type]
class TestServerThread(threading.Thread):
"""A thread to run the test server in a separate process."""
def __init__(self, ready_event, arguments, device, tool):
"""Initialize TestServerThread with the following argument.
Args:
ready_event: event which will be set when the test server is ready.
arguments: dictionary of arguments to run the test server.
device: An instance of DeviceUtils.
tool: instance of runtime error detection tool.
"""
threading.Thread.__init__(self)
self.wait_event = threading.Event()
self.stop_flag = False
self.ready_event = ready_event
self.ready_event.clear()
self.arguments = arguments
self.device = device
self.tool = tool
self.test_server_process = None
self.is_ready = False
self.host_port = self.arguments['port']
assert isinstance(self.host_port, int)
# The forwarder device port now is dynamically allocated.
self.forwarder_device_port = 0
# Anonymous pipe in order to get port info from test server.
self.pipe_in = None
self.pipe_out = None
self.process = None
self.command_line = []
def _WaitToStartAndGetPortFromTestServer(self):
"""Waits for the Python test server to start and gets the port it is using.
The port information is passed by the Python test server with a pipe given
by self.pipe_out. It is written as a result to |self.host_port|.
Returns:
Whether the port used by the test server was successfully fetched.
"""
assert self.host_port == 0 and self.pipe_out and self.pipe_in
(in_fds, _, _) = select.select([self.pipe_in, ], [], [],
TEST_SERVER_STARTUP_TIMEOUT)
if len(in_fds) == 0:
logging.error('Failed to wait to the Python test server to be started.')
return False
# First read the data length as an unsigned 4-byte value. This
# is _not_ using network byte ordering since the Python test server packs
# size as native byte order and all Chromium platforms so far are
# configured to use little-endian.
# TODO(jnd): Change the Python test server and local_test_server_*.cc to
# use a unified byte order (either big-endian or little-endian).
data_length = os.read(self.pipe_in, struct.calcsize('=L'))
if data_length:
(data_length,) = struct.unpack('=L', data_length)
assert data_length
if not data_length:
logging.error('Failed to get length of server data.')
return False
port_json = os.read(self.pipe_in, data_length)
if not port_json:
logging.error('Failed to get server data.')
return False
logging.info('Got port json data: %s', port_json)
port_json = json.loads(port_json)
if port_json.has_key('port') and isinstance(port_json['port'], int):
self.host_port = port_json['port']
return _CheckPortNotAvailable(self.host_port)
logging.error('Failed to get port information from the server data.')
return False
def _GenerateCommandLineArguments(self):
"""Generates the command line to run the test server.
Note that all options are processed by following the definitions in
testserver.py.
"""
if self.command_line:
return
args_copy = dict(self.arguments)
# Translate the server type.
type_cmd = _GetServerTypeCommandLine(args_copy.pop('server-type'))
if type_cmd:
self.command_line.append(type_cmd)
# Use a pipe to get the port given by the instance of Python test server
# if the test does not specify the port.
assert self.host_port == args_copy['port']
if self.host_port == 0:
(self.pipe_in, self.pipe_out) = os.pipe()
self.command_line.append('--startup-pipe=%d' % self.pipe_out)
# Pass the remaining arguments as-is.
for key, values in args_copy.iteritems():
if not isinstance(values, list):
values = [values]
for value in values:
if value is None:
self.command_line.append('--%s' % key)
else:
self.command_line.append('--%s=%s' % (key, value))
def _CloseUnnecessaryFDsForTestServerProcess(self):
# This is required to avoid subtle deadlocks that could be caused by the
# test server child process inheriting undesirable file descriptors such as
# file lock file descriptors.
for fd in xrange(0, 1024):
if fd != self.pipe_out:
try:
os.close(fd)
except:
pass
def run(self):
logging.info('Start running the thread!')
self.wait_event.clear()
self._GenerateCommandLineArguments()
command = host_paths.DIR_SOURCE_ROOT
if self.arguments['server-type'] == 'sync':
command = [os.path.join(command, 'components', 'sync', 'tools',
'testserver',
'sync_testserver.py')] + self.command_line
else:
command = [os.path.join(command, 'net', 'tools', 'testserver',
'testserver.py')] + self.command_line
logging.info('Running: %s', command)
# Disable PYTHONUNBUFFERED because it has a bad interaction with the
# testserver. Remove once this interaction is fixed.
unbuf = os.environ.pop('PYTHONUNBUFFERED', None)
# Pass DIR_SOURCE_ROOT as the child's working directory so that relative
# paths in the arguments are resolved correctly.
self.process = subprocess.Popen(
command, preexec_fn=self._CloseUnnecessaryFDsForTestServerProcess,
cwd=host_paths.DIR_SOURCE_ROOT)
if unbuf:
os.environ['PYTHONUNBUFFERED'] = unbuf
if self.process:
if self.pipe_out:
self.is_ready = self._WaitToStartAndGetPortFromTestServer()
else:
self.is_ready = _CheckPortNotAvailable(self.host_port)
if self.is_ready:
forwarder.Forwarder.Map([(0, self.host_port)], self.device, self.tool)
# Check whether the forwarder is ready on the device.
self.is_ready = False
device_port = forwarder.Forwarder.DevicePortForHostPort(self.host_port)
if device_port and _CheckDevicePortStatus(self.device, device_port):
self.is_ready = True
self.forwarder_device_port = device_port
# Wake up the request handler thread.
self.ready_event.set()
# Keep thread running until Stop() gets called.
_WaitUntil(lambda: self.stop_flag, max_attempts=sys.maxint)
if self.process.poll() is None:
self.process.kill()
forwarder.Forwarder.UnmapDevicePort(self.forwarder_device_port, self.device)
self.process = None
self.is_ready = False
if self.pipe_out:
os.close(self.pipe_in)
os.close(self.pipe_out)
self.pipe_in = None
self.pipe_out = None
logging.info('Test-server has died.')
self.wait_event.set()
def Stop(self):
"""Blocks until the loop has finished.
Note that this must be called in another thread.
"""
if not self.process:
return
self.stop_flag = True
self.wait_event.wait()
class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler used to process http GET/POST request."""
def _SendResponse(self, response_code, response_reason, additional_headers,
contents):
"""Generates a response sent to the client from the provided parameters.
Args:
response_code: number of the response status.
response_reason: string of reason description of the response.
additional_headers: dict of additional headers. Each key is the name of
the header, each value is the content of the header.
contents: string of the contents we want to send to client.
"""
self.send_response(response_code, response_reason)
self.send_header('Content-Type', 'text/html')
# Specify the content-length as without it the http(s) response will not
# be completed properly (and the browser keeps expecting data).
self.send_header('Content-Length', len(contents))
for header_name in additional_headers:
self.send_header(header_name, additional_headers[header_name])
self.end_headers()
self.wfile.write(contents)
self.wfile.flush()
def _StartTestServer(self):
"""Starts the test server thread."""
logging.info('Handling request to spawn a test server.')
content_type = self.headers.getheader('content-type')
if content_type != 'application/json':
raise Exception('Bad content-type for start request.')
content_length = self.headers.getheader('content-length')
if not content_length:
content_length = 0
try:
content_length = int(content_length)
except:
raise Exception('Bad content-length for start request.')
logging.info(content_length)
test_server_argument_json = self.rfile.read(content_length)
logging.info(test_server_argument_json)
# There should only be one test server instance at a time. However it may
# be possible that a previous instance was not cleaned up properly
# (crbug.com/665686)
if self.server.test_server_instance:
port = self.server.test_server_instance.host_port
logging.info('Killing lingering test server instance on port: %d', port)
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
ready_event = threading.Event()
self.server.test_server_instance = TestServerThread(
ready_event,
json.loads(test_server_argument_json),
self.server.device,
self.server.tool)
self.server.test_server_instance.setDaemon(True)
self.server.test_server_instance.start()
ready_event.wait()
if self.server.test_server_instance.is_ready:
self._SendResponse(200, 'OK', {}, json.dumps(
{'port': self.server.test_server_instance.forwarder_device_port,
'message': 'started'}))
logging.info('Test server is running on port: %d.',
self.server.test_server_instance.host_port)
else:
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during starting a test server.')
def _KillTestServer(self):
"""Stops the test server instance."""
# There should only ever be one test server at a time. This may do the
# wrong thing if we try and start multiple test servers.
if not self.server.test_server_instance:
return
port = self.server.test_server_instance.host_port
logging.info('Handling request to kill a test server on port: %d.', port)
self.server.test_server_instance.Stop()
# Make sure the status of test server is correct before sending response.
if _CheckPortAvailable(port):
self._SendResponse(200, 'OK', {}, 'killed')
logging.info('Test server on port %d is killed', port)
else:
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during killing a test server.')
self.server.test_server_instance = None
def do_POST(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
logging.info('Action for POST method is: %s.', action)
if action == '/start':
self._StartTestServer()
else:
self._SendResponse(400, 'Unknown request.', {}, '')
logging.info('Encounter unknown request: %s.', action)
def do_GET(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
params = urlparse.parse_qs(parsed_path.query, keep_blank_values=1)
logging.info('Action for GET method is: %s.', action)
for param in params:
logging.info('%s=%s', param, params[param][0])
if action == '/kill':
self._KillTestServer()
elif action == '/ping':
# The ping handler is used to check whether the spawner server is ready
# to serve the requests. We don't need to test the status of the test
# server when handling ping request.
self._SendResponse(200, 'OK', {}, 'ready')
logging.info('Handled ping request and sent response.')
else:
self._SendResponse(400, 'Unknown request', {}, '')
logging.info('Encounter unknown request: %s.', action)
class SpawningServer(object):
"""The class used to start/stop a http server."""
def __init__(self, test_server_spawner_port, device, tool):
logging.info('Creating new spawner on port: %d.', test_server_spawner_port)
self.server = BaseHTTPServer.HTTPServer(('', test_server_spawner_port),
SpawningServerRequestHandler)
self.server.device = device
self.server.tool = tool
self.server.test_server_instance = None
self.server.build_type = constants.GetBuildType()
def _Listen(self):
logging.info('Starting test server spawner')
self.server.serve_forever()
def Start(self):
"""Starts the test server spawner."""
listener_thread = threading.Thread(target=self._Listen)
listener_thread.setDaemon(True)
listener_thread.start()
def Stop(self):
"""Stops the test server spawner.
Also cleans the server state.
"""
self.CleanupState()
self.server.shutdown()
def CleanupState(self):
"""Cleans up the spawning server state.
This should be called if the test server spawner is reused,
to avoid sharing the test server instance.
"""
if self.server.test_server_instance:
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
|
mqtt_sub.py | import json
import logging
import os
import random
import subprocess
import threading
import signal
import time
import toml
import pickledb
from forms import convert
from paho.mqtt import client as mqtt_client
data = toml.load("data.toml")
PID = pickledb.load('pid.db', False, True)
# db = pickledb.load('data.db', False, True)
logging.basicConfig(level=logging.DEBUG, format="[%(module)s] %(message)s")
log = logging.getLogger(__name__)
broker = data['mqtt']['broker']
port = data['mqtt']['port']
topic = data['mqtt']['topic']
client_id = f'python-mqtt-{random.randint(0, 100)}'
username = data['mqtt']['username']
password = data['mqtt']['password']
# db = {}
def subscribe(client: mqtt_client):
def on_message(client, userdata, msg):
str_msg = msg.payload.decode()
dict_msg = json.loads(str_msg)
# type_access = dict_msg['dtype']
# model_access = dict_msg['stype']
# name_access = dict_msg['name']
# id_access = dict_msg['id']
# value_access = dict_msg['svalue1']
dev_id, _form, _type = convert(dict_msg)
log.debug(f'Inner dev_id: {dev_id}')
log.debug(f'Inner _type: {_type}')
log.debug(f'Inner _form: {_form}')
if dev_id:
db = pickledb.load('data.db', False, True)
ids = list(db.getall())
# ids = list(db.keys())
restart = False
log.debug('/ ' * 40)
if dev_id not in ids:
# db[dev_id] = {_type: _form}
_form = {_type: _form}
db.set(dev_id, _form)
db.dump()
restart = True
else:
acc_data = db.get(dev_id)
if _type not in list(acc_data.keys()):
restart = True
acc_data[_type] = _form
db.set(dev_id, acc_data)
db.dump()
# _form = {_type: _form}
# db[dev_id].update(_form)
# if _type not in list(db[dev_id].keys()):
# db[dev_id][_type] = _form
# restart = True
log.debug(f'dev_id: {dev_id}')
log.debug(f'ids: {ids}')
for _id in ids:
# acc_data = db[_id]
acc_data = db.get(_id)
for _type, _value in acc_data.items():
if not _value['active']:
log.debug(f'--- ! --- Deleted TYPE: {_id} {_type}')
new_data = acc_data
new_data.pop(_type)
# db[_id].pop(_type)
# if len(db[_id]) == 0:
if len(new_data) == 0:
log.debug(f'--- ! --- Deleted ID: {_id} {new_data}')
# log.debug(f'--- ! --- Deleted ID: {_id} {db[_id]}')
# db.pop(_id)
db.rem(_id)
db.dump()
else:
db.set(_id, new_data)
db.dump()
restart = True
# current_time = int(time.time())
# values = {'value': value_access,
# 'type': type_access,
# 'model': model_access,
# 'name': name_access,
# 'current_time': str(current_time),
# 'timeout': 80,
# 'active': 1}
# db_s = pickledb.load('data.db', False, True)
# db_s = db
# db_s.set(id_access, values)
# db_s.dump()
# db_x = pickledb.load('data.db', False, True)
if restart:
try:
pid = PID.get('PID')
if pid:
os.kill(pid, signal.SIGINT)
except Exception:
pass
t = threading.Thread(target=start_proc, args=())
t.start()
log.debug('/ ' * 40)
client.subscribe(topic)
client.on_message = on_message
def connect_mqtt() -> mqtt_client:
def on_connect(client, userdata, flags, rc):
if rc == 0:
log.info("Connected to MQTT Broker!")
else:
log.error("Failed to connect, return code %d\n", rc)
client = mqtt_client.Client(client_id)
client.username_pw_set(username, password)
client.on_connect = on_connect
client.connect(broker, port)
return client
def start_proc():
time.sleep(1)
process = subprocess.Popen(['python3', 'hk_runner.py'])
PID.set('PID', process.pid)
PID.dump()
if __name__ == '__main__':
db = pickledb.load('data.db', False, True)
if len(list(db.getall())) != 0:
try:
pid = PID.get('PID')
if pid:
os.kill(pid, signal.SIGINT)
except Exception:
pass
t = threading.Thread(target=start_proc, args=())
t.start()
client = connect_mqtt()
subscribe(client)
client.loop_forever()
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-few-public-methods,no-self-use,too-many-locals,line-too-long,unused-argument
import errno
try:
import msvcrt
except ImportError:
# Not supported for Linux machines.
pass
import platform
import select
import shlex
import signal
import sys
import threading
import time
try:
import termios
import tty
except ImportError:
# Not supported for Windows machines.
pass
import websocket
import yaml
from knack.log import get_logger
from knack.prompting import prompt_pass, prompt, NoTTYException
from knack.util import CLIError
from azure.mgmt.containerinstance.models import (AzureFileVolume, Container, ContainerGroup, ContainerGroupNetworkProtocol,
ContainerPort, ImageRegistryCredential, IpAddress, Port, ResourceRequests,
ResourceRequirements, Volume, VolumeMount, ContainerExecRequestTerminalSize,
GitRepoVolume, LogAnalytics, ContainerGroupDiagnostics, ContainerGroupNetworkProfile,
ContainerGroupIpAddressType, ResourceIdentityType, ContainerGroupIdentity)
from azure.cli.core.util import sdk_no_wait
from ._client_factory import (cf_container_groups, cf_container, cf_log_analytics_workspace,
cf_log_analytics_workspace_shared_keys, cf_resource, cf_network)
logger = get_logger(__name__)
WINDOWS_NAME = 'Windows'
SERVER_DELIMITER = '.'
ACR_SERVER_DELIMITER = '.azurecr.io'
AZURE_FILE_VOLUME_NAME = 'azurefile'
SECRETS_VOLUME_NAME = 'secrets'
GITREPO_VOLUME_NAME = 'gitrepo'
MSI_LOCAL_ID = '[system]'
def list_containers(client, resource_group_name=None):
"""List all container groups in a resource group. """
if resource_group_name is None:
return client.list()
return client.list_by_resource_group(resource_group_name)
def get_container(client, resource_group_name, name):
"""Show details of a container group. """
return client.get(resource_group_name, name)
def delete_container(client, resource_group_name, name, **kwargs):
"""Delete a container group. """
return client.delete(resource_group_name, name)
# pylint: disable=too-many-statements
def create_container(cmd,
resource_group_name,
name=None,
image=None,
location=None,
cpu=1,
memory=1.5,
restart_policy='Always',
ports=None,
protocol=None,
os_type='Linux',
ip_address=None,
dns_name_label=None,
command_line=None,
environment_variables=None,
secure_environment_variables=None,
registry_login_server=None,
registry_username=None,
registry_password=None,
azure_file_volume_share_name=None,
azure_file_volume_account_name=None,
azure_file_volume_account_key=None,
azure_file_volume_mount_path=None,
log_analytics_workspace=None,
log_analytics_workspace_key=None,
vnet=None,
vnet_name=None,
vnet_address_prefix='10.0.0.0/16',
subnet=None,
subnet_address_prefix='10.0.0.0/24',
network_profile=None,
gitrepo_url=None,
gitrepo_dir='.',
gitrepo_revision=None,
gitrepo_mount_path=None,
secrets=None,
secrets_mount_path=None,
file=None,
assign_identity=None,
identity_scope=None,
identity_role='Contributor',
no_wait=False):
"""Create a container group. """
if file:
return _create_update_from_file(cmd.cli_ctx, resource_group_name, name, location, file, no_wait)
if not name:
raise CLIError("error: the --name/-n argument is required unless specified with a passed in file.")
if not image:
raise CLIError("error: the --image argument is required unless specified with a passed in file.")
ports = ports or [80]
protocol = protocol or ContainerGroupNetworkProtocol.tcp
container_resource_requirements = _create_resource_requirements(cpu=cpu, memory=memory)
image_registry_credentials = _create_image_registry_credentials(registry_login_server=registry_login_server,
registry_username=registry_username,
registry_password=registry_password,
image=image)
command = shlex.split(command_line) if command_line else None
volumes = []
mounts = []
azure_file_volume = _create_azure_file_volume(azure_file_volume_share_name=azure_file_volume_share_name,
azure_file_volume_account_name=azure_file_volume_account_name,
azure_file_volume_account_key=azure_file_volume_account_key)
azure_file_volume_mount = _create_azure_file_volume_mount(azure_file_volume=azure_file_volume,
azure_file_volume_mount_path=azure_file_volume_mount_path)
if azure_file_volume:
volumes.append(azure_file_volume)
mounts.append(azure_file_volume_mount)
secrets_volume = _create_secrets_volume(secrets)
secrets_volume_mount = _create_secrets_volume_mount(secrets_volume=secrets_volume,
secrets_mount_path=secrets_mount_path)
if secrets_volume:
volumes.append(secrets_volume)
mounts.append(secrets_volume_mount)
diagnostics = None
tags = {}
if log_analytics_workspace and log_analytics_workspace_key:
log_analytics = LogAnalytics(
workspace_id=log_analytics_workspace, workspace_key=log_analytics_workspace_key)
diagnostics = ContainerGroupDiagnostics(
log_analytics=log_analytics
)
elif log_analytics_workspace and not log_analytics_workspace_key:
diagnostics, tags = _get_diagnostics_from_workspace(
cmd.cli_ctx, log_analytics_workspace)
if not diagnostics:
raise CLIError('Log Analytics workspace "' + log_analytics_workspace + '" not found.')
elif not log_analytics_workspace and log_analytics_workspace_key:
raise CLIError('"--log-analytics-workspace-key" requires "--log-analytics-workspace".')
gitrepo_volume = _create_gitrepo_volume(gitrepo_url=gitrepo_url, gitrepo_dir=gitrepo_dir, gitrepo_revision=gitrepo_revision)
gitrepo_volume_mount = _create_gitrepo_volume_mount(gitrepo_volume=gitrepo_volume, gitrepo_mount_path=gitrepo_mount_path)
if gitrepo_volume:
volumes.append(gitrepo_volume)
mounts.append(gitrepo_volume_mount)
# Concatenate secure and standard environment variables
if environment_variables and secure_environment_variables:
environment_variables = environment_variables + secure_environment_variables
else:
environment_variables = environment_variables or secure_environment_variables
identity = None
if assign_identity is not None:
identity = _build_identities_info(assign_identity)
# Set up VNET, subnet and network profile if needed
if subnet and not network_profile:
network_profile = _get_vnet_network_profile(cmd, location, resource_group_name, vnet, vnet_address_prefix, subnet, subnet_address_prefix)
cg_network_profile = None
if network_profile:
cg_network_profile = ContainerGroupNetworkProfile(id=network_profile)
cgroup_ip_address = _create_ip_address(ip_address, ports, protocol, dns_name_label, network_profile)
container = Container(name=name,
image=image,
resources=container_resource_requirements,
command=command,
ports=[ContainerPort(
port=p, protocol=protocol) for p in ports] if cgroup_ip_address else None,
environment_variables=environment_variables,
volume_mounts=mounts or None)
cgroup = ContainerGroup(location=location,
identity=identity,
containers=[container],
os_type=os_type,
restart_policy=restart_policy,
ip_address=cgroup_ip_address,
image_registry_credentials=image_registry_credentials,
volumes=volumes or None,
network_profile=cg_network_profile,
diagnostics=diagnostics,
tags=tags)
container_group_client = cf_container_groups(cmd.cli_ctx)
lro = sdk_no_wait(no_wait, container_group_client.create_or_update, resource_group_name,
name, cgroup)
if assign_identity is not None and identity_scope:
from azure.cli.core.commands.arm import assign_identity
cg = container_group_client.get(resource_group_name, name)
assign_identity(cmd.cli_ctx, lambda: cg, lambda cg: cg, identity_role, identity_scope)
return lro
def _build_identities_info(identities):
identities = identities or []
identity_type = ResourceIdentityType.none
if not identities or MSI_LOCAL_ID in identities:
identity_type = ResourceIdentityType.system_assigned
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities and identity_type == ResourceIdentityType.system_assigned:
identity_type = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_type = ResourceIdentityType.user_assigned
identity = ContainerGroupIdentity(type=identity_type)
if external_identities:
identity.user_assigned_identities = {e: {} for e in external_identities}
return identity
def _get_resource(client, resource_group_name, *subresources):
from azure.core.exceptions import HttpResponseError
try:
resource = client.get(resource_group_name, *subresources)
return resource
except HttpResponseError as ex:
if ex.error.code == "NotFound" or ex.error.code == "ResourceNotFound":
return None
raise
def _get_vnet_network_profile(cmd, location, resource_group_name, vnet, vnet_address_prefix, subnet, subnet_address_prefix):
from azure.cli.core.profiles import ResourceType
from msrestazure.tools import parse_resource_id, is_valid_resource_id
aci_delegation_service_name = "Microsoft.ContainerInstance/containerGroups"
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
aci_delegation = Delegation(
name=aci_delegation_service_name,
service_name=aci_delegation_service_name
)
ncf = cf_network(cmd.cli_ctx)
vnet_name = vnet
subnet_name = subnet
if is_valid_resource_id(subnet):
parsed_subnet_id = parse_resource_id(subnet)
subnet_name = parsed_subnet_id['resource_name']
vnet_name = parsed_subnet_id['name']
resource_group_name = parsed_subnet_id['resource_group']
elif is_valid_resource_id(vnet):
parsed_vnet_id = parse_resource_id(vnet)
vnet_name = parsed_vnet_id['resource_name']
resource_group_name = parsed_vnet_id['resource_group']
default_network_profile_name = "aci-network-profile-{}-{}".format(vnet_name, subnet_name)
subnet = _get_resource(ncf.subnets, resource_group_name, vnet_name, subnet_name)
# For an existing subnet, validate and add delegation if needed
if subnet:
logger.info('Using existing subnet "%s" in resource group "%s"', subnet.name, resource_group_name)
for sal in (subnet.service_association_links or []):
if sal.linked_resource_type != aci_delegation_service_name:
raise CLIError("Can not use subnet with existing service association links other than {}.".format(aci_delegation_service_name))
if not subnet.delegations:
logger.info('Adding ACI delegation to the existing subnet.')
subnet.delegations = [aci_delegation]
subnet = ncf.subnets.begin_create_or_update(resource_group_name, vnet_name, subnet_name, subnet).result()
else:
for delegation in subnet.delegations:
if delegation.service_name != aci_delegation_service_name:
raise CLIError("Can not use subnet with existing delegations other than {}".format(aci_delegation_service_name))
network_profile = _get_resource(ncf.network_profiles, resource_group_name, default_network_profile_name)
if network_profile:
logger.info('Using existing network profile "%s"', default_network_profile_name)
return network_profile.id
# Create new subnet and Vnet if not exists
else:
Subnet, VirtualNetwork, AddressSpace = cmd.get_models('Subnet', 'VirtualNetwork',
'AddressSpace', resource_type=ResourceType.MGMT_NETWORK)
vnet = _get_resource(ncf.virtual_networks, resource_group_name, vnet_name)
if not vnet:
logger.info('Creating new vnet "%s" in resource group "%s"', vnet_name, resource_group_name)
ncf.virtual_networks.begin_create_or_update(resource_group_name,
vnet_name,
VirtualNetwork(name=vnet_name,
location=location,
address_space=AddressSpace(address_prefixes=[vnet_address_prefix])))
subnet = Subnet(
name=subnet_name,
location=location,
address_prefix=subnet_address_prefix,
delegations=[aci_delegation])
logger.info('Creating new subnet "%s" in resource group "%s"', subnet_name, resource_group_name)
subnet = ncf.subnets.begin_create_or_update(resource_group_name, vnet_name, subnet_name, subnet).result()
NetworkProfile, ContainerNetworkInterfaceConfiguration, IPConfigurationProfile = cmd.get_models('NetworkProfile',
'ContainerNetworkInterfaceConfiguration',
'IPConfigurationProfile',
resource_type=ResourceType.MGMT_NETWORK)
# In all cases, create the network profile with aci NIC
network_profile = NetworkProfile(
name=default_network_profile_name,
location=location,
container_network_interface_configurations=[ContainerNetworkInterfaceConfiguration(
name="eth0",
ip_configurations=[IPConfigurationProfile(
name="ipconfigprofile",
subnet=subnet
)]
)]
)
logger.info('Creating network profile "%s" in resource group "%s"', default_network_profile_name, resource_group_name)
network_profile = ncf.network_profiles.create_or_update(resource_group_name, default_network_profile_name, network_profile)
return network_profile.id
def _get_diagnostics_from_workspace(cli_ctx, log_analytics_workspace):
from msrestazure.tools import parse_resource_id
log_analytics_workspace_client = cf_log_analytics_workspace(cli_ctx)
log_analytics_workspace_shared_keys_client = cf_log_analytics_workspace_shared_keys(cli_ctx)
for workspace in log_analytics_workspace_client.list():
if log_analytics_workspace in (workspace.name, workspace.customer_id):
keys = log_analytics_workspace_shared_keys_client.get_shared_keys(
parse_resource_id(workspace.id)['resource_group'], workspace.name)
log_analytics = LogAnalytics(
workspace_id=workspace.customer_id, workspace_key=keys.primary_shared_key)
diagnostics = ContainerGroupDiagnostics(
log_analytics=log_analytics)
return (diagnostics, {'oms-resource-link': workspace.id})
return None, {}
def _create_update_from_file(cli_ctx, resource_group_name, name, location, file, no_wait):
resource_client = cf_resource(cli_ctx)
container_group_client = cf_container_groups(cli_ctx)
cg_defintion = None
try:
with open(file, 'r') as f:
cg_defintion = yaml.safe_load(f)
except OSError: # FileNotFoundError introduced in Python 3
raise CLIError("No such file or directory: " + file)
except yaml.YAMLError as e:
raise CLIError("Error while parsing yaml file:\n\n" + str(e))
# Validate names match if both are provided
if name and cg_defintion.get('name', None):
if name != cg_defintion.get('name', None):
raise CLIError("The name parameter and name from yaml definition must match.")
else:
# Validate at least one name is provided
name = name or cg_defintion.get('name', None)
if cg_defintion.get('name', None) is None and not name:
raise CLIError("The name of the container group is required")
cg_defintion['name'] = name
location = location or cg_defintion.get('location', None)
if not location:
location = resource_client.resource_groups.get(resource_group_name).location
cg_defintion['location'] = location
api_version = cg_defintion.get('apiVersion', None) or container_group_client.api_version
return sdk_no_wait(no_wait,
resource_client.resources.create_or_update,
resource_group_name,
"Microsoft.ContainerInstance",
'',
"containerGroups",
name,
api_version,
cg_defintion)
# pylint: disable=inconsistent-return-statements
def _create_resource_requirements(cpu, memory):
"""Create resource requirements. """
if cpu or memory:
container_resource_requests = ResourceRequests(memory_in_gb=memory, cpu=cpu)
return ResourceRequirements(requests=container_resource_requests)
def _create_image_registry_credentials(registry_login_server, registry_username, registry_password, image):
"""Create image registry credentials. """
image_registry_credentials = None
if registry_login_server:
if not registry_username:
raise CLIError('Please specify --registry-username in order to use custom image registry.')
if not registry_password:
try:
registry_password = prompt_pass(msg='Image registry password: ')
except NoTTYException:
raise CLIError('Please specify --registry-password in order to use custom image registry.')
image_registry_credentials = [ImageRegistryCredential(server=registry_login_server,
username=registry_username,
password=registry_password)]
elif ACR_SERVER_DELIMITER in image.split("/")[0]:
if not registry_username:
try:
registry_username = prompt(msg='Image registry username: ')
except NoTTYException:
raise CLIError('Please specify --registry-username in order to use Azure Container Registry.')
if not registry_password:
try:
registry_password = prompt_pass(msg='Image registry password: ')
except NoTTYException:
raise CLIError('Please specify --registry-password in order to use Azure Container Registry.')
acr_server = image.split("/")[0] if image.split("/") else None
if acr_server:
image_registry_credentials = [ImageRegistryCredential(server=acr_server,
username=registry_username,
password=registry_password)]
elif registry_username and registry_password and SERVER_DELIMITER in image.split("/")[0]:
login_server = image.split("/")[0] if image.split("/") else None
if login_server:
image_registry_credentials = [ImageRegistryCredential(server=login_server,
username=registry_username,
password=registry_password)]
else:
raise CLIError('Failed to parse login server from image name; please explicitly specify --registry-server.')
return image_registry_credentials
def _create_azure_file_volume(azure_file_volume_share_name, azure_file_volume_account_name, azure_file_volume_account_key):
"""Create Azure File volume. """
azure_file_volume = None
if azure_file_volume_share_name:
if not azure_file_volume_account_name:
raise CLIError('Please specify --azure-file-volume-account-name in order to use Azure File volume.')
if not azure_file_volume_account_key:
try:
azure_file_volume_account_key = prompt_pass(msg='Azure File storage account key: ')
except NoTTYException:
raise CLIError('Please specify --azure-file-volume-account-key in order to use Azure File volume.')
azure_file_volume = AzureFileVolume(share_name=azure_file_volume_share_name,
storage_account_name=azure_file_volume_account_name,
storage_account_key=azure_file_volume_account_key)
return Volume(name=AZURE_FILE_VOLUME_NAME, azure_file=azure_file_volume) if azure_file_volume else None
def _create_secrets_volume(secrets):
"""Create secrets volume. """
return Volume(name=SECRETS_VOLUME_NAME, secret=secrets) if secrets else None
def _create_gitrepo_volume(gitrepo_url, gitrepo_dir, gitrepo_revision):
"""Create Git Repo volume. """
gitrepo_volume = GitRepoVolume(repository=gitrepo_url, directory=gitrepo_dir, revision=gitrepo_revision)
return Volume(name=GITREPO_VOLUME_NAME, git_repo=gitrepo_volume) if gitrepo_url else None
# pylint: disable=inconsistent-return-statements
def _create_azure_file_volume_mount(azure_file_volume, azure_file_volume_mount_path):
"""Create Azure File volume mount. """
if azure_file_volume_mount_path:
if not azure_file_volume:
raise CLIError('Please specify --azure-file-volume-share-name --azure-file-volume-account-name --azure-file-volume-account-key '
'to enable Azure File volume mount.')
return VolumeMount(name=AZURE_FILE_VOLUME_NAME, mount_path=azure_file_volume_mount_path)
def _create_secrets_volume_mount(secrets_volume, secrets_mount_path):
"""Create secrets volume mount. """
if secrets_volume:
if not secrets_mount_path:
raise CLIError('Please specify --secrets --secrets-mount-path '
'to enable secrets volume mount.')
return VolumeMount(name=SECRETS_VOLUME_NAME, mount_path=secrets_mount_path)
def _create_gitrepo_volume_mount(gitrepo_volume, gitrepo_mount_path):
"""Create Git Repo volume mount. """
if gitrepo_mount_path:
if not gitrepo_volume:
raise CLIError('Please specify --gitrepo-url (--gitrepo-dir --gitrepo-revision) '
'to enable Git Repo volume mount.')
return VolumeMount(name=GITREPO_VOLUME_NAME, mount_path=gitrepo_mount_path)
# pylint: disable=inconsistent-return-statements
def _create_ip_address(ip_address, ports, protocol, dns_name_label, network_profile):
"""Create IP address. """
if (ip_address and ip_address.lower() == 'public') or dns_name_label:
return IpAddress(ports=[Port(protocol=protocol, port=p) for p in ports],
dns_name_label=dns_name_label, type=ContainerGroupIpAddressType.public)
if network_profile:
return IpAddress(ports=[Port(protocol=protocol, port=p) for p in ports],
type=ContainerGroupIpAddressType.private)
# pylint: disable=inconsistent-return-statements
def container_logs(cmd, resource_group_name, name, container_name=None, follow=False):
"""Tail a container instance log. """
container_client = cf_container(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
container_group = container_group_client.get(resource_group_name, name)
# If container name is not present, use the first container.
if container_name is None:
container_name = container_group.containers[0].name
if not follow:
log = container_client.list_logs(resource_group_name, name, container_name)
print(log.content)
else:
_start_streaming(
terminate_condition=_is_container_terminated,
terminate_condition_args=(container_group_client, resource_group_name, name, container_name),
shupdown_grace_period=5,
stream_target=_stream_logs,
stream_args=(container_client, resource_group_name, name, container_name, container_group.restart_policy))
def container_export(cmd, resource_group_name, name, file):
resource_client = cf_resource(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
resource = resource_client.resources.get(resource_group_name,
"Microsoft.ContainerInstance",
'',
"containerGroups",
name,
container_group_client.api_version).__dict__
# Remove unwanted properites
resource['properties'].pop('instanceView', None)
resource.pop('sku', None)
resource.pop('id', None)
resource.pop('plan', None)
resource.pop('kind', None)
resource.pop('managed_by', None)
resource['properties'].pop('provisioningState', None)
# Correctly export the identity
try:
identity = resource['identity'].type
if identity != ResourceIdentityType.none:
resource['identity'] = resource['identity'].__dict__
identity_entry = {'type': resource['identity']['type'].value}
if resource['identity']['user_assigned_identities']:
identity_entry['user_assigned_identities'] = {k: {} for k in resource['identity']['user_assigned_identities']}
resource['identity'] = identity_entry
except (KeyError, AttributeError):
resource.pop('indentity', None)
# Remove container instance views
for i in range(len(resource['properties']['containers'])):
resource['properties']['containers'][i]['properties'].pop('instanceView', None)
# Add the api version
resource['apiVersion'] = container_group_client.api_version
with open(file, 'w+') as f:
yaml.safe_dump(resource, f, default_flow_style=False)
def container_exec(cmd, resource_group_name, name, exec_command, container_name=None, terminal_row_size=20, terminal_col_size=80):
"""Start exec for a container. """
container_client = cf_container(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
container_group = container_group_client.get(resource_group_name, name)
if container_name or container_name is None and len(container_group.containers) == 1:
# If only one container in container group, use that container.
if container_name is None:
container_name = container_group.containers[0].name
terminal_size = ContainerExecRequestTerminalSize(rows=terminal_row_size, cols=terminal_col_size)
execContainerResponse = container_client.execute_command(resource_group_name, name, container_name, exec_command, terminal_size)
if platform.system() is WINDOWS_NAME:
_start_exec_pipe_win(execContainerResponse.web_socket_uri, execContainerResponse.password)
else:
_start_exec_pipe(execContainerResponse.web_socket_uri, execContainerResponse.password)
else:
raise CLIError('--container-name required when container group has more than one container.')
def _start_exec_pipe_win(web_socket_uri, password):
def _on_ws_open(ws):
ws.send(password)
t = threading.Thread(target=_capture_stdin, args=[ws])
t.daemon = True
t.start()
ws = websocket.WebSocketApp(web_socket_uri, on_open=_on_ws_open, on_message=_on_ws_msg)
ws.run_forever()
def _on_ws_msg(ws, msg):
sys.stdout.write(msg)
sys.stdout.flush()
def _capture_stdin(ws):
while True:
if msvcrt.kbhit:
x = msvcrt.getch()
ws.send(x)
def _start_exec_pipe(web_socket_uri, password):
ws = websocket.create_connection(web_socket_uri)
oldtty = termios.tcgetattr(sys.stdin)
old_handler = signal.getsignal(signal.SIGWINCH)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
ws.send(password)
while True:
try:
if not _cycle_exec_pipe(ws):
break
except (select.error, IOError) as e:
if e.args and e.args[0] == errno.EINTR:
pass
else:
raise
except websocket.WebSocketException:
pass
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
signal.signal(signal.SIGWINCH, old_handler)
def _cycle_exec_pipe(ws):
r, _, _ = select.select([ws.sock, sys.stdin], [], [])
if ws.sock in r:
data = ws.recv()
sys.stdout.write(data)
sys.stdout.flush()
if sys.stdin in r:
x = sys.stdin.read(1)
if not x:
return True
ws.send(x)
return True
def attach_to_container(cmd, resource_group_name, name, container_name=None):
"""Attach to a container. """
container_client = cf_container(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
container_group = container_group_client.get(resource_group_name, name)
# If container name is not present, use the first container.
if container_name is None:
container_name = container_group.containers[0].name
_start_streaming(
terminate_condition=_is_container_terminated,
terminate_condition_args=(container_group_client, resource_group_name, name, container_name),
shupdown_grace_period=5,
stream_target=_stream_container_events_and_logs,
stream_args=(container_group_client, container_client, resource_group_name, name, container_name))
def _start_streaming(terminate_condition, terminate_condition_args, shupdown_grace_period, stream_target, stream_args):
"""Start streaming for the stream target. """
import colorama
colorama.init()
try:
t = threading.Thread(target=stream_target, args=stream_args)
t.daemon = True
t.start()
while not terminate_condition(*terminate_condition_args) and t.is_alive():
time.sleep(10)
time.sleep(shupdown_grace_period)
finally:
colorama.deinit()
def _stream_logs(client, resource_group_name, name, container_name, restart_policy):
"""Stream logs for a container. """
lastOutputLines = 0
while True:
log = client.list_logs(resource_group_name, name, container_name)
lines = log.content.split('\n')
currentOutputLines = len(lines)
# Should only happen when the container restarts.
if currentOutputLines < lastOutputLines and restart_policy != 'Never':
print("Warning: you're having '--restart-policy={}'; the container '{}' was just restarted; the tail of the current log might be missing. Exiting...".format(restart_policy, container_name))
break
_move_console_cursor_up(lastOutputLines)
print(log.content)
lastOutputLines = currentOutputLines
time.sleep(2)
def _stream_container_events_and_logs(container_group_client, container_client, resource_group_name, name, container_name):
"""Stream container events and logs. """
lastOutputLines = 0
lastContainerState = None
while True:
container_group, container = _find_container(container_group_client, resource_group_name, name, container_name)
container_state = 'Unknown'
if container.instance_view and container.instance_view.current_state and container.instance_view.current_state.state:
container_state = container.instance_view.current_state.state
_move_console_cursor_up(lastOutputLines)
if container_state != lastContainerState:
print("Container '{}' is in state '{}'...".format(container_name, container_state))
currentOutputLines = 0
if container.instance_view and container.instance_view.events:
for event in sorted(container.instance_view.events, key=lambda e: e.last_timestamp):
print('(count: {}) (last timestamp: {}) {}'.format(event.count, event.last_timestamp, event.message))
currentOutputLines += 1
lastOutputLines = currentOutputLines
lastContainerState = container_state
if container_state == 'Running':
print('\nStart streaming logs:')
break
time.sleep(2)
_stream_logs(container_client, resource_group_name, name, container_name, container_group.restart_policy)
def _is_container_terminated(client, resource_group_name, name, container_name):
"""Check if a container should be considered terminated. """
container_group, container = _find_container(client, resource_group_name, name, container_name)
# If a container group is terminated, assume the container is also terminated.
if container_group.instance_view and container_group.instance_view.state:
if container_group.instance_view.state == 'Succeeded' or container_group.instance_view.state == 'Failed':
return True
# If the restart policy is Always, assume the container will be restarted.
if container_group.restart_policy:
if container_group.restart_policy == 'Always':
return False
# Only assume the container is terminated if its state is Terminated.
if container.instance_view and container.instance_view.current_state and container.instance_view.current_state.state == 'Terminated':
return True
return False
def _find_container(client, resource_group_name, name, container_name):
"""Find a container in a container group. """
container_group = client.get(resource_group_name, name)
containers = [c for c in container_group.containers if c.name == container_name]
if len(containers) != 1:
raise CLIError("Found 0 or more than 1 container with name '{}'".format(container_name))
return container_group, containers[0]
def _move_console_cursor_up(lines):
"""Move console cursor up. """
if lines > 0:
# Use stdout.write to support Python 2
sys.stdout.write('\033[{}A\033[K\033[J'.format(lines))
def _gen_guid():
import uuid
return uuid.uuid4()
|
tello.py | # coding=utf-8
import logging
import socket
import time
import threading
import cv2
from threading import Thread
from .decorators import accepts
class Tello:
"""Python wrapper to interact with the Ryze Tello drone using the official Tello api.
Tello API documentation:
https://dl-cdn.ryzerobotics.com/downloads/tello/20180910/Tello%20SDK%20Documentation%20EN_1.3.pdf
"""
# Send and receive commands, client socket
UDP_IP = '192.168.10.1'
UDP_PORT = 8889
RESPONSE_TIMEOUT = 7 # in seconds
TIME_BTW_COMMANDS = 1 # in seconds
TIME_BTW_RC_CONTROL_COMMANDS = 0.5 # in seconds
RETRY_COUNT = 3
last_received_command = time.time()
HANDLER = logging.StreamHandler()
FORMATTER = logging.Formatter('%(filename)s - %(lineno)d - %(message)s')
HANDLER.setFormatter(FORMATTER)
LOGGER = logging.getLogger('djitellopy')
LOGGER.addHandler(HANDLER)
LOGGER.setLevel(logging.INFO)
# use logging.getLogger('djitellopy').setLevel(logging.<LEVEL>) in YOUR CODE
# to only receive logs of the desired level and higher
# Video stream, server socket
VS_UDP_IP = '0.0.0.0'
VS_UDP_PORT = 11111
STATE_UDP_PORT = 8890
# VideoCapture object
cap = None
background_frame_read = None
stream_on = False
is_flying = False
# Tello state
pitch = -1
roll = -1
yaw = -1
speed_x = -1
speed_y = -1
speed_z = -1
temperature_lowest = -1
temperature_highest = -1
distance_tof = -1
height = -1
battery = -1
barometer = -1.0
flight_time = -1.0
acceleration_x = -1.0
acceleration_y = -1.0
acceleration_z = -1.0
attitude = {'pitch': -1, 'roll': -1, 'yaw': -1}
def __init__(self,
host='192.168.10.1',
port=8889,
client_socket=None,
enable_exceptions=True,
retry_count=3):
self.address = (host, port)
self.response = None
self.response_state = None # to attain the response of the states
self.stream_on = False
self.enable_exceptions = enable_exceptions
self.retry_count = retry_count
if client_socket:
self.clientSocket = client_socket
else:
self.clientSocket = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
self.clientSocket.bind(('', self.UDP_PORT)) # For UDP response (receiving data)
self.stateSocket = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
self.stateSocket.bind(('', self.STATE_UDP_PORT)) # for accessing the states of Tello
# Run tello udp receiver on background
thread1 = threading.Thread(target=self.run_udp_receiver, args=())
# Run state reciever on background
thread2 = threading.Thread(target=self.get_states, args=())
thread1.daemon = True
thread2.daemon = True
thread1.start()
thread2.start()
def run_udp_receiver(self):
"""Setup drone UDP receiver. This method listens for responses of Tello. Must be run from a background thread
in order to not block the main thread."""
while True:
try:
self.response, _ = self.clientSocket.recvfrom(1024) # buffer size is 1024 bytes
except Exception as e:
self.LOGGER.error(e)
break
def get_states(self):
"""This runs on background to recieve the state of Tello"""
while True:
try:
self.response_state, _ = self.stateSocket.recvfrom(256)
if self.response_state != 'ok':
self.response_state = self.response_state.decode('ASCII')
list = self.response_state.replace(';', ':').split(':')
self.pitch = int(list[1])
self.roll = int(list[3])
self.yaw = int(list[5])
self.speed_x = int(list[7])
self.speed_y = int(list[9])
self.speed_z = int(list[11])
self.temperature_lowest = int(list[13])
self.temperature_highest = int(list[15])
self.distance_tof = int(list[17])
self.height = int(list[19])
self.battery = int(list[21])
self.barometer = float(list[23])
self.flight_time = float(list[25])
self.acceleration_x = float(list[27])
self.acceleration_y = float(list[29])
self.acceleration_z = float(list[31])
self.attitude = {'pitch': int(list[1]), 'roll': int(list[3]), 'yaw': int(list[5])}
except Exception as e:
self.LOGGER.error(e)
self.LOGGER.error(f"Response was is {self.response_state}")
break
def get_udp_video_address(self):
return 'udp://@' + self.VS_UDP_IP + ':' + str(self.VS_UDP_PORT) # + '?overrun_nonfatal=1&fifo_size=5000'
def get_video_capture(self):
"""Get the VideoCapture object from the camera drone
Returns:
VideoCapture
"""
if self.cap is None:
self.cap = cv2.VideoCapture(self.get_udp_video_address())
if not self.cap.isOpened():
self.cap.open(self.get_udp_video_address())
return self.cap
def get_frame_read(self):
"""Get the BackgroundFrameRead object from the camera drone. Then, you just need to call
backgroundFrameRead.frame to get the actual frame received by the drone.
Returns:
BackgroundFrameRead
"""
if self.background_frame_read is None:
self.background_frame_read = BackgroundFrameRead(self, self.get_udp_video_address()).start()
return self.background_frame_read
def stop_video_capture(self):
return self.streamoff()
@accepts(command=str, printinfo=bool, timeout=int)
def send_command_with_return(self, command, printinfo=True, timeout=RESPONSE_TIMEOUT):
"""Send command to Tello and wait for its response.
Return:
bool: True for successful, False for unsuccessful
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
diff = time.time() * 1000 - self.last_received_command
if diff < self.TIME_BTW_COMMANDS:
time.sleep(diff)
if printinfo:
self.LOGGER.info('Send command: ' + command)
timestamp = int(time.time() * 1000)
self.clientSocket.sendto(command.encode('utf-8'), self.address)
while self.response is None:
if (time.time() * 1000) - timestamp > timeout * 1000:
self.LOGGER.warning('Timeout exceed on command ' + command)
return False
try:
response = self.response.decode('utf-8').rstrip("\r\n")
except UnicodeDecodeError as e:
self.LOGGER.error(e)
return None
if printinfo:
self.LOGGER.info(f'Response {command}: {response}')
self.response = None
self.last_received_command = time.time() * 1000
return response
@accepts(command=str)
def send_command_without_return(self, command):
"""Send command to Tello without expecting a response. Use this method when you want to send a command
continuously
- go x y z speed: Tello fly to x y z in speed (cm/s)
x: 20-500
y: 20-500
z: 20-500
speed: 10-100
- curve x1 y1 z1 x2 y2 z2 speed: Tello fly a curve defined by the current and two given coordinates with
speed (cm/s). If the arc radius is not within the range of 0.5-10 meters, it responses false.
x/y/z can’t be between -20 – 20 at the same time .
x1, x2: 20-500
y1, y2: 20-500
z1, z2: 20-500
speed: 10-60
- rc a b c d: Send RC control via four channels.
a: left/right (-100~100)
b: forward/backward (-100~100)
c: up/down (-100~100)
d: yaw (-100~100)
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
self.LOGGER.info('Send command (no expect response): ' + command)
self.clientSocket.sendto(command.encode('utf-8'), self.address)
@accepts(command=str, timeout=int)
def send_control_command(self, command, timeout=RESPONSE_TIMEOUT):
"""Send control command to Tello and wait for its response. Possible control commands:
- command: entry SDK mode
- takeoff: Tello auto takeoff
- land: Tello auto land
- streamon: Set video stream on
- streamoff: Set video stream off
- emergency: Stop all motors immediately
- up x: Tello fly up with distance x cm. x: 20-500
- down x: Tello fly down with distance x cm. x: 20-500
- left x: Tello fly left with distance x cm. x: 20-500
- right x: Tello fly right with distance x cm. x: 20-500
- forward x: Tello fly forward with distance x cm. x: 20-500
- back x: Tello fly back with distance x cm. x: 20-500
- cw x: Tello rotate x degree clockwise x: 1-3600
- ccw x: Tello rotate x degree counter- clockwise. x: 1-3600
- flip x: Tello fly flip x
l (left)
r (right)
f (forward)
b (back)
- speed x: set speed to x cm/s. x: 10-100
- wifi ssid pass: Set Wi-Fi with SSID password
Return:
bool: True for successful, False for unsuccessful
"""
response = None
for i in range(0, self.retry_count):
response = self.send_command_with_return(command, timeout=timeout)
if response == 'OK' or response == 'ok':
return True
return self.return_error_on_send_command(command, response, self.enable_exceptions)
@accepts(command=str, printinfo=bool)
def send_read_command(self, command, printinfo=True):
"""Send set command to Tello and wait for its response. Possible set commands:
- speed?: get current speed (cm/s): x: 1-100
- battery?: get current battery percentage: x: 0-100
- time?: get current fly time (s): time
- height?: get height (cm): x: 0-3000
- temp?: get temperature (°C): x: 0-90
- attitude?: get IMU attitude data: pitch roll yaw
- baro?: get barometer value (m): x
- tof?: get distance value from TOF (cm): x: 30-1000
- wifi?: get Wi-Fi SNR: snr
Return:
bool: The requested value for successful, False for unsuccessful
"""
response = self.send_command_with_return(command, printinfo=printinfo)
try:
response = str(response)
except TypeError as e:
self.LOGGER.error(e)
pass
if ('error' not in response) and ('ERROR' not in response) and ('False' not in response):
if response.isdigit():
return int(response)
else:
try:
return float(response) # isdigit() is False when the number is a float(barometer)
except ValueError:
return response
else:
return self.return_error_on_send_command(command, response, self.enable_exceptions)
def return_error_on_send_command(self, command, response, enable_exceptions):
"""Returns False and print an informative result code to show unsuccessful response"""
msg = 'Command ' + command + ' was unsuccessful. Message: ' + str(response)
if enable_exceptions:
raise Exception(msg)
else:
self.LOGGER.error(msg)
return False
def connect(self):
"""Entry SDK mode
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("command")
def takeoff(self):
"""Tello auto takeoff
Returns:
bool: True for successful, False for unsuccessful
False: Unsuccessful
"""
# Something it takes a looooot of time to take off and return a succesful take off. So we better wait. If not, is going to give us error on the following calls.
if self.send_control_command("takeoff", timeout=20):
self.is_flying = True
return True
else:
return False
def land(self):
"""Tello auto land
Returns:
bool: True for successful, False for unsuccessful
"""
if self.send_control_command("land"):
self.is_flying = False
return True
else:
return False
def streamon(self):
"""Set video stream on. If the response is 'Unknown command' means you have to update the Tello firmware. That
can be done through the Tello app.
Returns:
bool: True for successful, False for unsuccessful
"""
result = self.send_control_command("streamon")
if result is True:
self.stream_on = True
return result
def streamoff(self):
"""Set video stream off
Returns:
bool: True for successful, False for unsuccessful
"""
result = self.send_control_command("streamoff")
if result is True:
self.stream_on = False
return result
def emergency(self):
"""Stop all motors immediately
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("emergency")
@accepts(direction=str, x=int)
def move(self, direction, x):
"""Tello fly up, down, left, right, forward or back with distance x cm.
Arguments:
direction: up, down, left, right, forward or back
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command(direction + ' ' + str(x))
@accepts(x=int)
def move_up(self, x):
"""Tello fly up with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("up", x)
@accepts(x=int)
def move_down(self, x):
"""Tello fly down with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("down", x)
@accepts(x=int)
def move_left(self, x):
"""Tello fly left with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("left", x)
@accepts(x=int)
def move_right(self, x):
"""Tello fly right with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("right", x)
@accepts(x=int)
def move_forward(self, x):
"""Tello fly forward with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("forward", x)
@accepts(x=int)
def move_back(self, x):
"""Tello fly back with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("back", x)
@accepts(x=int)
def rotate_clockwise(self, x):
"""Tello rotate x degree clockwise.
Arguments:
x: 1-360
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("cw " + str(x))
@accepts(x=int)
def rotate_counter_clockwise(self, x):
"""Tello rotate x degree counter-clockwise.
Arguments:
x: 1-3600
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("ccw " + str(x))
@accepts(x=str)
def flip(self, direction):
"""Tello fly flip.
Arguments:
direction: l (left), r (right), f (forward) or b (back)
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("flip " + direction)
def flip_left(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("l")
def flip_right(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("r")
def flip_forward(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("f")
def flip_back(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("b")
@accepts(x=int, y=int, z=int, speed=int)
def go_xyz_speed(self, x, y, z, speed):
"""Tello fly to x y z in speed (cm/s)
Arguments:
x: 20-500
y: 20-500
z: 20-500
speed: 10-100
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_command_without_return('go %s %s %s %s' % (x, y, z, speed))
@accepts(x1=int, y1=int, z1=int, x2=int, y2=int, z2=int, speed=int)
def curve_xyz_speed(self, x1, y1, z1, x2, y2, z2, speed):
"""Tello fly a curve defined by the current and two given coordinates with speed (cm/s).
- If the arc radius is not within the range of 0.5-10 meters, it responses false.
- x/y/z can’t be between -20 – 20 at the same time.
Arguments:
x1: 20-500
x2: 20-500
y1: 20-500
y2: 20-500
z1: 20-500
z2: 20-500
speed: 10-60
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_command_without_return('curve %s %s %s %s %s %s %s' % (x1, y1, z1, x2, y2, z2, speed))
@accepts(x=int, y=int, z=int, speed=int, mid=int)
def go_xyz_speed_mid(self, x, y, z, speed, mid):
"""Tello fly to x y z in speed (cm/s) relative to mission pad iwth id mid
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
mid: 1-8
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('go %s %s %s %s m%s' % (x, y, z, speed, mid))
@accepts(x1=int, y1=int, z1=int, x2=int, y2=int, z2=int, speed=int, mid=int)
def curve_xyz_speed_mid(self, x1, y1, z1, x2, y2, z2, speed, mid):
"""Tello fly to x2 y2 z2 over x1 y1 z1 in speed (cm/s) relative to mission pad with id mid
Arguments:
x1: -500-500
y1: -500-500
z1: -500-500
x2: -500-500
y2: -500-500
z2: -500-500
speed: 10-60
mid: 1-8
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('curve %s %s %s %s %s %s %s m%s' % (x1, y1, z1, x2, y2, z2, speed, mid))
@accepts(x=int, y=int, z=int, speed=int, yaw=int, mid1=int, mid2=int)
def go_xyz_speed_yaw_mid(self, x, y, z, speed, yaw, mid1, mid2):
"""Tello fly to x y z in speed (cm/s) relative to mid1
Then fly to 0 0 z over mid2 and rotate to yaw relative to mid2's rotation
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
yaw: -360-360
mid1: 1-8
mid2: 1-8
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('jump %s %s %s %s %s m%s m%s' % (x, y, z, speed, yaw, mid1, mid2))
def enable_mission_pads(self):
return self.send_control_command("mon")
def disable_mission_pads(self):
return self.send_control_command("moff")
def set_mission_pad_detection_direction(self, x):
return self.send_control_command("mdirection " + str(x))
@accepts(x=int)
def set_speed(self, x):
"""Set speed to x cm/s.
Arguments:
x: 10-100
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("speed " + str(x))
last_rc_control_sent = 0
@accepts(left_right_velocity=int, forward_backward_velocity=int, up_down_velocity=int, yaw_velocity=int)
def send_rc_control(self, left_right_velocity, forward_backward_velocity, up_down_velocity, yaw_velocity):
"""Send RC control via four channels. Command is sent every self.TIME_BTW_RC_CONTROL_COMMANDS seconds.
Arguments:
left_right_velocity: -100~100 (left/right)
forward_backward_velocity: -100~100 (forward/backward)
up_down_velocity: -100~100 (up/down)
yaw_velocity: -100~100 (yaw)
Returns:
bool: True for successful, False for unsuccessful
"""
if int(time.time() * 1000) - self.last_rc_control_sent < self.TIME_BTW_RC_CONTROL_COMMANDS:
pass
else:
self.last_rc_control_sent = int(time.time() * 1000)
return self.send_command_without_return('rc %s %s %s %s' % (self.round_to_100(left_right_velocity),
self.round_to_100(forward_backward_velocity),
self.round_to_100(up_down_velocity),
self.round_to_100(yaw_velocity)))
@accepts(x=int)
def round_to_100(self, x):
if x > 100:
return 100
elif x < -100:
return -100
else:
return x
def set_wifi_credentials(self, ssid, password):
"""Set the Wi-Fi SSID and password. The Tello will reboot afterwords.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('wifi %s %s' % (ssid, password))
def connect_to_wifi(self, ssid, password):
"""Connects to the Wi-Fi with SSID and password.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('ap %s %s' % (ssid, password))
def get_speed(self):
"""Get current speed (cm/s)
Returns:
False: Unsuccessful
int: 1-100
"""
return self.send_read_command('speed?')
def get_battery(self):
"""Get current battery percentage
Returns:
False: Unsuccessful
int: -100
"""
return self.send_read_command('battery?')
def get_flight_time(self):
"""Get current fly time (s)
Returns:
False: Unsuccessful
int: Seconds elapsed during flight.
"""
return self.send_read_command('time?')
def get_height(self):
"""Get height (cm)
Returns:
False: Unsuccessful
int: 0-3000
"""
return self.send_read_command('height?')
def get_temperature(self):
"""Get temperature (°C)
Returns:
False: Unsuccessful
int: 0-90
"""
return self.send_read_command('temp?')
def get_attitude(self):
"""Get IMU attitude data
Returns:
False: Unsuccessful
int: pitch roll yaw
"""
r = self.send_read_command('attitude?').replace(';', ':').split(':')
return dict(zip(r[::2], [int(i) for i in r[1::2]])) # {'pitch': xxx, 'roll': xxx, 'yaw': xxx}
def get_barometer(self):
"""Get barometer value (m)
Returns:
False: Unsuccessful
int: 0-100
"""
return self.send_read_command('baro?')
def get_distance_tof(self):
"""Get distance value from TOF (cm)
Returns:
False: Unsuccessful
int: 30-1000
"""
return self.send_read_command('tof?')
def get_wifi(self):
"""Get Wi-Fi SNR
Returns:
False: Unsuccessful
str: snr
"""
return self.send_read_command('wifi?')
def get_sdk_version(self):
"""Get SDK Version
Returns:
False: Unsuccessful
str: SDK Version
"""
return self.send_read_command('sdk?')
def get_serial_number(self):
"""Get Serial Number
Returns:
False: Unsuccessful
str: Serial Number
"""
return self.send_read_command('sn?')
def end(self):
"""Call this method when you want to end the tello object"""
if self.is_flying:
self.land()
if self.stream_on:
self.streamoff()
if self.background_frame_read is not None:
self.background_frame_read.stop()
if self.cap is not None:
self.cap.release()
def __del__(self):
self.end()
class BackgroundFrameRead:
"""
This class read frames from a VideoCapture in background. Then, just call backgroundFrameRead.frame to get the
actual one.
"""
def __init__(self, tello, address):
tello.cap = cv2.VideoCapture(address)
self.cap = tello.cap
if not self.cap.isOpened():
self.cap.open(address)
self.grabbed, self.frame = self.cap.read()
self.stopped = False
def start(self):
Thread(target=self.update_frame, args=()).start()
return self
def update_frame(self):
while not self.stopped:
if not self.grabbed or not self.cap.isOpened():
self.stop()
else:
(self.grabbed, self.frame) = self.cap.read()
def stop(self):
self.stopped = True
|
termplay.py | # ------------------------------------------------------------
# TermPlay is licensed under the Apache License, Version 2.0.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# A copy of the "License" is also provided with the source
# of this project. Unless required by applicable law or agreed
# to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing
# permissions and limitations under the License.
# ------------------------------------------------------------
# ------------------------------------------------------------
# This project uses components from the
# https://github.com/termux/play-audio project.
# All licenses and copyrights to the aforementioned
# project reside with its author(s) and/or owners.
# ------------------------------------------------------------
import os
import sys
import subprocess
import datetime
import multiprocessing
import glob
# glob for folder list
from rich.console import Console
from tinytag import TinyTag
from time import sleep
from awesome_progress_bar import ProgressBar
# Initialise for console output
console = Console()
# Supported extensions
ext = ("mp3", "MP3", "FLAC", "flac", "wav", "WAV", "\
m4a", "M4A", "OGG", "ogg", "wma", "WMA", "aac", "AAC", "\
opus", "OPUS", "aiff", "AIFF")
def folderInput():
# list of music files
global file_list
global userfolder
try:
while True:
console.print("Type :q or :quit to exit\n", style="b green", justify="center")
console.print("Type :all to list a few suggestions\n", style="b green", justify="center")
console.print("Enter name of directory:\n", style="b magenta")
console.print(">>> ", style="b red", end="")
listcomp = [":q", ":quit", ":all"] + glob.glob("\
/storage/*/") + glob.glob("/storage/*/*/") + glob.glob("\
/storage/*/*/*/") + glob.glob("/storage/emulated/0/*/") + glob.glob("\
/storage/emulated/0/*/*/")
userfolder = (input())
if len(userfolder) != 0 and list(userfolder)[-1] != "/":
userfolder += "/" # Add / to end if not present
userfolder = "/storage/" + userfolder
if os.path.isdir(userfolder):
x = os.listdir(userfolder) # list all files in the folder
file_list = []
for i in x:
name = str(userfolder + i)
# Check if extension names in filename and then add to file_list
if os.path.isfile(name) and name.endswith(ext):
file_list.append(i)
if len(file_list) != 0:
break
else:
console.print("\nDirectory does not contain any music file", style="b red")
elif userfolder == "/storage/:q/" or userfolder == "/storage/:quit/":
console.print("\nExiting Now", style="b red")
sleep(2)
clearScreen()
sys.exit()
elif userfolder == "/storage/:all/":
for i in listcomp:
console.print(i)
print()
elif userfolder == "/storage/emulated/":
console.print("This folder cannot be accessed\n", style="b red")
sys.exit()
else:
console.print("\nEnter valid directory name", style="b red")
except PermissionError:
console.print("\nPrivileges not present to access folder", style="b red")
except (KeyboardInterrupt, ValueError):
console.print("\n")
def clearScreen():
# print("\033c", end="")
os.system("clear")
def printDir(dir):
""" Print Directory and Metadata """
# proc used to get data about play-audio globally
global proc
# tag.duration() used by progress bar
global tag
# t1 is name of multiprocessing - progress bar process
global t1
try:
console.print("TERMPLAY\n\n", style="b magenta", justify="center")
console.print("\
To play from a file, choose the corresponding value", style="b green", justify="center")
console.print("\
To stop playback, but not close the program, type 0.2", style="b green", justify="center")
console.print("To stop playback and exit, type 0.1\n", style="b green", justify="center")
# Print directory as [0] file_name and so on
for i in file_list:
print("[" + str(file_list.index(i)) + "] ", end="")
print(i)
# check if choice is defined (for first execution of program)
# To differentiate between 1.1 and int input, check if int_choice as float == choice:
if '\
int_choice' in globals() and 'choice' in globals() and float(str(int_choice) + ".0") == choice:
# File Metadata
console.print("\nNow Playing:" + str(file_list[int_choice]), style="b green")
tag = TinyTag.get(str(userfolder + str(file_list[int_choice])))
console.print("\n\
Artist:", tag.artist, "\n\
Title:", tag.title, "\n\
Album:", tag.album, "\n\
Duration:", datetime.timedelta(seconds=round(tag.duration)), style="b blue", justify="center")
console.print("\n")
proc = subprocess.Popen(["play-audio", str(userfolder + str(file_list[int_choice]))], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
t1 = multiprocessing.Process(target=progress)
if 'proc' in globals():
t1.start()
except TypeError:
console.print("\nThe file chosen is not of the correct format", style="b red")
sleep(2)
clearScreen()
sys.exit()
def progress():
""" Print Progress bar (without inbuilt threading) """
try:
total = round(tag.duration) # Total time to print progress
bar = ProgressBar(total, bar_length=os.get_terminal_size()[0], use_thread=False)
for x in range(total):
sleep(1)
bar.iter() # iter to update animation
console.print('\nPlayback finished', style="b cyan")
except KeyboardInterrupt:
print("\n")
def killAll():
if 'proc' in globals():
# if proc is defined, i.e., input taken at least once,
# kill it so that no interference occurs
proc.terminate()
if 't1' in globals():
t1.terminate()
# join to connect parent python process and background process
t1.join()
def startScreenInput():
""" Input Prompt """
# int_choice is used for accessing list index
global int_choice
# choice is float and if == 0.1, exit, if 0.2, kill all processes
global choice
while True:
try:
console.print(">>> ", style="b red", end="")
choice = float((input()).replace(" ", ""))
int_choice = int(choice)
killAll()
except (ValueError, TypeError):
console.print("\tPlease enter a valid integer or decimal-point value", style="b red")
except KeyboardInterrupt:
console.print("\n")
pass
else:
try:
# isinstance is for checking data type
# check if choice is less than actual length of list
if choice in (0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9) or (isinstance(int_choice, int) and (int_choice >= len(file_list) or int_choice < 0)):
console.print("\tPlease enter \
an integer below {} or more than -1".format(len(file_list)), style="b red")
continue
elif isinstance(choice, float) and choice == 0.1:
clearScreen()
console.print("\n\tStopping Playback", style="b cyan")
console.print("\n\tExiting Now\n", style="b red")
killAll()
sleep(2)
clearScreen()
sys.exit()
elif isinstance(choice, float) and choice == 0.2:
console.print("\n\tStopping Playback", style="b cyan")
killAll()
else:
break
except Exception:
pass
def main():
""" Main function"""
clearScreen()
folderInput()
while True:
try:
clearScreen()
printDir(userfolder)
startScreenInput()
except KeyboardInterrupt:
pass
except NameError:
sys.exit()
if __name__ == "__main__":
main()
|
http_server.py | # Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
""" This module defines util functions for testing offchain inbound request processor with http server
Not recommended for production
"""
from http import server
from .http_header import X_REQUEST_ID, X_REQUEST_SENDER_ADDRESS
import logging, socket, threading, typing
logger: logging.Logger = logging.getLogger(__name__)
def start_local(
port: int,
process_inbound_request: typing.Callable[[str, str, bytes], typing.Tuple[int, bytes]],
) -> server.HTTPServer:
"""starts a HTTPServer on localhost with given port and callback function to process offchain inbound requests
Warning: this is not recommended for production.
Given process_inbound_request callable will receive:
1. X_REQUEST_ID header value for logging purpose.
2. DIP-5 account id for verifying JWS signature.
3. JWS content bytes from request body.
"""
class Handler(server.BaseHTTPRequestHandler):
def do_POST(self):
x_request_id = self.headers[X_REQUEST_ID]
jws_key_address = self.headers[X_REQUEST_SENDER_ADDRESS]
try:
length = int(self.headers["content-length"])
content = self.rfile.read(length)
code, resp_body = process_inbound_request(x_request_id, jws_key_address, content)
self.send_response(code)
self.send_header(X_REQUEST_ID, x_request_id)
self.end_headers()
self.wfile.write(resp_body)
except Exception as e:
logger.exception(e)
self.send_error(500, str(e))
httpd = server.HTTPServer(("localhost", port), Handler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
return httpd
def get_available_port() -> int:
"""get_available_port returns an available port"""
with socket.socket() as s:
s.bind(("localhost", 0))
return s.getsockname()[1]
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Carboncoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class CarboncoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = CarboncoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
helper_all.py | """
:module:
:synopsis: Module provide functions for easier testing communication.
:author: Julian Sobott
public classes
---------------
.. autoclass:: XXX
:members:
public functions
-----------------
.. autofunction:: XXX
private classes
----------------
private functions
------------------
"""
import os
import shutil
from multiprocessing import Process, Queue
from typing import Tuple
from pynetworking import server
import OpenDrive.client_side.net_start
import OpenDrive.server_side.net_start
from OpenDrive.client_side import paths as client_paths
from OpenDrive.server_side import paths as server_paths
server_stop_queue = Queue()
def h_start_server_process() -> Process:
server_process = None
try:
server_process = Process(target=_debug_server_routine, args=(server_stop_queue,))
return server_process
finally:
server_process.start()
def h_stop_server_process(process: Process):
server_stop_queue.put("Stop")
process.join()
def h_client_routine(clear_server_db: bool = False, clear_folders: bool = True):
from tests.server_side.helper_server import h_delete_recreate_server_db
def decorator(func):
def wrapper(*args, **kwargs):
if clear_folders:
h_clear_init_all_folders()
if clear_server_db:
h_delete_recreate_server_db()
client_net = OpenDrive.client_side.net_start
connected = client_net.connect(timeout=5)
if not connected:
raise ConnectionError
ret_value = func(*args, **kwargs)
client_net.close_connection()
return ret_value
return wrapper
return decorator
def h_clear_init_all_folders(client=True, server=True):
"""
server: OpenDrive/local/server_side/ROOT/
client: OpenDrive/local/client_side
"""
if server:
shutil.rmtree(server_paths.FOLDERS_ROOT, ignore_errors=True)
os.makedirs(server_paths.FOLDERS_ROOT, exist_ok=True)
if client:
shutil.rmtree(client_paths.LOCAL_CLIENT_DATA, ignore_errors=True)
os.makedirs(client_paths.LOCAL_CLIENT_DATA, exist_ok=True)
def h_clear_init_dummy_folders() -> Tuple[str, str]:
"""
client: OpenDrive/local/client_side/DUMMY_FOLDER/
server: OpenDrive/local/server_side/DUMMY_FOLDER/
"""
dummy_client_folder = os.path.join(client_paths.LOCAL_CLIENT_DATA, "DUMMY_FOLDER")
dummy_server_folder = os.path.join(server_paths.LOCAL_SERVER_DATA, "DUMMY_FOLDER")
h_create_empty(dummy_client_folder)
h_create_empty(dummy_server_folder)
return dummy_client_folder, dummy_server_folder
def h_create_empty(abs_path: str):
shutil.rmtree(abs_path, ignore_errors=True)
os.makedirs(abs_path, exist_ok=True)
def _debug_server_routine(queue: Queue):
OpenDrive.server_side.net_start.start(queue)
|
test_multiprocessing_value.py | import logging
from multiprocessing import Process, Value
import os
import sys
import time
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class Monitor:
def __init__(self):
self.monitor_active = False
def start(self, monitor_state, kill_monitor):
Monitor.monitor(self, monitor_state, kill_monitor)
def stop(self):
pass
def monitor(self, monitor_state, kill_monitor):#, monitor_active):
self.monitor_active = True
loop_start = time.time()
while (True):
try:
if monitor_state.value == 0:
monitor_state.value = 1
elif monitor_state.value == 1:
monitor_state.value = 0
if kill_monitor == 1 or (time.time() - loop_start) > 60:
break
time.sleep(5)
except Exception as e:
logger.exception(e)
except KeyboardInterrupt:
logger.info('Exit signal received.')
break
finally:
self.monitor_active = False
if __name__ == '__main__':
try:
monitor_state = Value('b', 0)
kill_monitor = Value('b', 0)
monitor = Monitor()
arguments = tuple()
keyword_arguments = {'monitor_state': monitor_state, 'kill_monitor': kill_monitor}
monitor_proc = Process(target=monitor.start, args=arguments, kwargs=keyword_arguments)
logger.info('Starting process.')
monitor_proc.start()
loop_start = time.time()
while (True):
logger.info('monitor_state.value: ' + str(monitor_state.value))
time.sleep(5)
#logger.info('Joining process.')
#monitor_proc.join()
except Exception as e:
logger.exception('Unhandled exception in heartbeatmonitor.monitor.')
logger.exception(e)
except KeyboardInterrupt:
logger.info('Exit signal received.')
finally:
monitor_proc.terminate()
monitor_proc.join()
logger.info('Done.')
|
Server.py | #! /usr/bin/env python3
import socket, sqlite3 as lite
from multiprocessing import Process
class IP_To_Name_Control(object):
def __init__(self):
global con, cur, data
con = lite.connect('itnc.db')
cur = con.cursor()
cur.execute("DROP TABLE CONTROL;")
cur.execute("CREATE TABLE CONTROL (id INTEGER PRIMARY KEY AUTOINCREMENT, name text, ip text);")
con.commit()
data = []
def lookup(self, name, typ=None):
cur.execute("SELECT name, ip FROM CONTROL;")
rows = cur.fetchall()
for i in rows:
data.append(i[0])
data.append(i[1])
if typ == 'ip':
for x in iter(data):
if x == name:
try:
i_p = data[data.index(x) + 1]
return i_p
except IndexError: pass
if typ == 'user':
for x in iter(data):
if x == name:
num = data.index(x) - 1
return data[num]
def add(self, name, ip):
print("ADDING: ", "NAME -->", name, "IP -->", ip)
cur.execute("INSERT INTO CONTROL(name, ip) values(?, ?);", (name, ip))
con.commit()
class Chat_Server(object):
def __init__(self):
global s, itnc, Name_And_IP
Name_And_IP = {}
itnc = IP_To_Name_Control()
host = '192.168.1.115'
port = 8090
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(5)
def _connection_handler(self):
sock, addr = s.accept()
if sock and addr:
p = Process(target=self._chat_handler, args=(addr, sock))
p.start()
p.join()
def _recv_data(self, sock):
return sock.recv(9999).decode('utf-8')
def _construct_headers(self, MSGFROM, MSG):
header = []
header.append("MSGFROM: "+ MSGFROM + "\n\r")
header.append("MSG: " + MSG)
return ''.join(header)
def _send_data(self, ip, data):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, 9090))
sock.send(data.encode('utf-8'))
def _parse_msg(self, msg, ip):
new_msg = []
msg_data = msg.splitlines()
for header in msg_data:
if 'MSGTO: ' in header:
msgto = header.replace("MSGTO: ", '')
if 'MSG: ' in header:
old_msg = header.replace("MSG: ", '')
msgtoip = itnc.lookup(msgto, typ='ip')
msgfrom = itnc.lookup(ip, typ='user')
new_msg = [msgfrom, "\n\r", old_msg]
return new_msg, msgtoip
def _chat_handler(self, addr, sock):
print("Connection From -->", addr)
print("Connection Sock -->", sock)
sock.send("Login".encode('utf-8'))
print("Sent Login Message")
login_name = self._recv_data(sock)
print("Recvied Login Name For", addr[0], "It is", login_name)
#Code for login valadation
print("Adding to database")
itnc.add(login_name, addr[0])
print("Added to Database")
sock.send("Ready For Message".encode('utf-8'))
msg = self._recv_data(sock)
print("Closing Socket")
sock.close()
print("Socket Closed Succesfully")
print("Recived Message")
msg_with_headers, mstoip = self._parse_msg(msg, addr[0])
print("Succesfully Parsed Message")
print(msg_with_headers, "-->" ,mstoip)
self._send_data(mstoip, ''.join(msg_with_headers))
print("Sent Message")
if __name__ == '__main__':
c = Chat_Server()
while True:
c._connection_handler() |
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from PyQt5.QtWidgets import *
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds, PrintError,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = QMessageBox.question(self,
"Electrum - " + _("Enable update check"),
_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"),
QMessageBox.Yes,
QMessageBox.No)
config.set_key('check_updates', choice == QMessageBox.Yes, save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
traceback.print_exception(*exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
self.history_model.on_fee_histogram()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.print_error("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if is_relevant:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if is_relevant:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected%s.png"%fork_str)
else:
icon = QIcon(":icons/status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.is_max = False
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.is_max:
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for o in outputs:
if o.address is None:
self.show_error(_('Bitcoin Address is None'))
return
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Bitcoin Address'))
return
if o.value is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.print_error(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(QIcon(":icons/update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
#traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 BTC = 1000 mBTC. 1 mBTC = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(self.config.get('check_updates', False))
def on_set_updatecheck(v):
self.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('General')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(":icons/offline_tx.png"), None, _('Success'), msg)
return True
|
methods.py | import logging
import multiprocessing
import threading
def add_process(name, function, *args):
logging.info(f"Starting new process - {name}")
process = multiprocessing.Process(target=function, args=args)
process.start()
return process
def add_thread(name, function, *args):
logging.info(f"Starting new thread - {name}")
thread = threading.Thread(target=function, args=args)
thread.start()
return thread
class thread_function:
def __init__(self, function):
self.function = function
def __call__(self, *args):
return add_thread("Unnamed", self.function, *args) |
idf_monitor.py | #!/usr/bin/env python
#
# esp-idf serial output monitor tool. Does some helpful things:
# - Looks up hex addresses in ELF file with addr2line
# - Reset ESP32 via serial RTS line (Ctrl-T Ctrl-R)
# - Run flash build target to rebuild and flash entire project (Ctrl-T Ctrl-F)
# - Run app-flash build target to rebuild and flash app only (Ctrl-T Ctrl-A)
# - If gdbstub output is detected, gdb is automatically loaded
# - If core dump output is detected, it is converted to a human-readable report
# by espcoredump.py.
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contains elements taken from miniterm "Very simple serial terminal" which
# is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# Originally released under BSD-3-Clause license.
#
from __future__ import print_function, division
from __future__ import unicode_literals
from builtins import chr
from builtins import object
from builtins import bytes
import subprocess
import argparse
import codecs
import datetime
import re
import os
try:
import queue
except ImportError:
import Queue as queue
import shlex
import time
import sys
import serial
import serial.tools.list_ports
import serial.tools.miniterm as miniterm
import threading
import ctypes
import types
from distutils.version import StrictVersion
from io import open
import textwrap
import tempfile
key_description = miniterm.key_description
# Control-key characters
CTRL_A = '\x01'
CTRL_B = '\x02'
CTRL_F = '\x06'
CTRL_H = '\x08'
CTRL_R = '\x12'
CTRL_T = '\x14'
CTRL_Y = '\x19'
CTRL_P = '\x10'
CTRL_X = '\x18'
CTRL_L = '\x0c'
CTRL_RBRACKET = '\x1d' # Ctrl+]
# Command parsed from console inputs
CMD_STOP = 1
CMD_RESET = 2
CMD_MAKE = 3
CMD_APP_FLASH = 4
CMD_OUTPUT_TOGGLE = 5
CMD_TOGGLE_LOGGING = 6
CMD_ENTER_BOOT = 7
# ANSI terminal codes (if changed, regular expressions in LineMatcher need to be udpated)
ANSI_RED = '\033[1;31m'
ANSI_YELLOW = '\033[0;33m'
ANSI_NORMAL = '\033[0m'
def color_print(message, color, newline='\n'):
""" Print a message to stderr with colored highlighting """
sys.stderr.write("%s%s%s%s" % (color, message, ANSI_NORMAL, newline))
def yellow_print(message, newline='\n'):
color_print(message, ANSI_YELLOW, newline)
def red_print(message, newline='\n'):
color_print(message, ANSI_RED, newline)
__version__ = "1.1"
# Tags for tuples in queues
TAG_KEY = 0
TAG_SERIAL = 1
TAG_SERIAL_FLUSH = 2
TAG_CMD = 3
# regex matches an potential PC value (0x4xxxxxxx)
MATCH_PCADDR = re.compile(r'0x4[0-9a-f]{7}', re.IGNORECASE)
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
DEFAULT_PRINT_FILTER = ""
# coredump related messages
COREDUMP_UART_START = b"================= CORE DUMP START ================="
COREDUMP_UART_END = b"================= CORE DUMP END ================="
COREDUMP_UART_PROMPT = b"Press Enter to print core dump to UART..."
# coredump states
COREDUMP_IDLE = 0
COREDUMP_READING = 1
COREDUMP_DONE = 2
# coredump decoding options
COREDUMP_DECODE_DISABLE = "disable"
COREDUMP_DECODE_INFO = "info"
class StoppableThread(object):
"""
Provide a Thread-like class which can be 'cancelled' via a subclass-provided
cancellation method.
Can be started and stopped multiple times.
Isn't an instance of type Thread because Python Thread objects can only be run once
"""
def __init__(self):
self._thread = None
@property
def alive(self):
"""
Is 'alive' whenever the internal thread object exists
"""
return self._thread is not None
def start(self):
if self._thread is None:
self._thread = threading.Thread(target=self._run_outer)
self._thread.start()
def _cancel(self):
pass # override to provide cancellation functionality
def run(self):
pass # override for the main thread behaviour
def _run_outer(self):
try:
self.run()
finally:
self._thread = None
def stop(self):
if self._thread is not None:
old_thread = self._thread
self._thread = None
self._cancel()
old_thread.join()
class ConsoleReader(StoppableThread):
""" Read input keys from the console and push them to the queue,
until stopped.
"""
def __init__(self, console, event_queue, cmd_queue, parser, test_mode):
super(ConsoleReader, self).__init__()
self.console = console
self.event_queue = event_queue
self.cmd_queue = cmd_queue
self.parser = parser
self.test_mode = test_mode
def run(self):
self.console.setup()
try:
while self.alive:
try:
if os.name == 'nt':
# Windows kludge: because the console.cancel() method doesn't
# seem to work to unblock getkey() on the Windows implementation.
#
# So we only call getkey() if we know there's a key waiting for us.
import msvcrt
while not msvcrt.kbhit() and self.alive:
time.sleep(0.1)
if not self.alive:
break
elif self.test_mode:
# In testing mode the stdin is connected to PTY but is not used for input anything. For PTY
# the canceling by fcntl.ioctl isn't working and would hang in self.console.getkey().
# Therefore, we avoid calling it.
while self.alive:
time.sleep(0.1)
break
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if c is not None:
ret = self.parser.parse(c)
if ret is not None:
(tag, cmd) = ret
# stop command should be executed last
if tag == TAG_CMD and cmd != CMD_STOP:
self.cmd_queue.put(ret)
else:
self.event_queue.put(ret)
finally:
self.console.cleanup()
def _cancel(self):
if os.name == 'posix' and not self.test_mode:
# this is the way cancel() is implemented in pyserial 3.3 or newer,
# older pyserial (3.1+) has cancellation implemented via 'select',
# which does not work when console sends an escape sequence response
#
# even older pyserial (<3.1) does not have this method
#
# on Windows there is a different (also hacky) fix, applied above.
#
# note that TIOCSTI is not implemented in WSL / bash-on-Windows.
# TODO: introduce some workaround to make it work there.
#
# Note: This would throw exception in testing mode when the stdin is connected to PTY.
import fcntl
import termios
fcntl.ioctl(self.console.fd, termios.TIOCSTI, b'\0')
class ConsoleParser(object):
def __init__(self, eol="CRLF"):
self.translate_eol = {
"CRLF": lambda c: c.replace("\n", "\r\n"),
"CR": lambda c: c.replace("\n", "\r"),
"LF": lambda c: c.replace("\r", "\n"),
}[eol]
self.menu_key = CTRL_T
self.exit_key = CTRL_RBRACKET
self._pressed_menu_key = False
def parse(self, key):
ret = None
if self._pressed_menu_key:
ret = self._handle_menu_key(key)
elif key == self.menu_key:
self._pressed_menu_key = True
elif key == self.exit_key:
ret = (TAG_CMD, CMD_STOP)
else:
key = self.translate_eol(key)
ret = (TAG_KEY, key)
return ret
def _handle_menu_key(self, c):
ret = None
if c == self.exit_key or c == self.menu_key: # send verbatim
ret = (TAG_KEY, c)
elif c in [CTRL_H, 'h', 'H', '?']:
red_print(self.get_help_text())
elif c == CTRL_R: # Reset device via RTS
ret = (TAG_CMD, CMD_RESET)
elif c == CTRL_F: # Recompile & upload
ret = (TAG_CMD, CMD_MAKE)
elif c in [CTRL_A, 'a', 'A']: # Recompile & upload app only
# "CTRL-A" cannot be captured with the default settings of the Windows command line, therefore, "A" can be used
# instead
ret = (TAG_CMD, CMD_APP_FLASH)
elif c == CTRL_Y: # Toggle output display
ret = (TAG_CMD, CMD_OUTPUT_TOGGLE)
elif c == CTRL_L: # Toggle saving output into file
ret = (TAG_CMD, CMD_TOGGLE_LOGGING)
elif c == CTRL_P:
yellow_print("Pause app (enter bootloader mode), press Ctrl-T Ctrl-R to restart")
# to fast trigger pause without press menu key
ret = (TAG_CMD, CMD_ENTER_BOOT)
elif c in [CTRL_X, 'x', 'X']: # Exiting from within the menu
ret = (TAG_CMD, CMD_STOP)
else:
red_print('--- unknown menu character {} --'.format(key_description(c)))
self._pressed_menu_key = False
return ret
def get_help_text(self):
text = """\
--- idf_monitor ({version}) - ESP-IDF monitor tool
--- based on miniterm from pySerial
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:14} Send the menu character itself to remote
--- {exit:14} Send the exit character itself to remote
--- {reset:14} Reset target board via RTS line
--- {makecmd:14} Build & flash project
--- {appmake:14} Build & flash app only
--- {output:14} Toggle output display
--- {log:14} Toggle saving output into file
--- {pause:14} Reset target into bootloader to pause app via RTS line
--- {menuexit:14} Exit program
""".format(version=__version__,
exit=key_description(self.exit_key),
menu=key_description(self.menu_key),
reset=key_description(CTRL_R),
makecmd=key_description(CTRL_F),
appmake=key_description(CTRL_A) + ' (or A)',
output=key_description(CTRL_Y),
log=key_description(CTRL_L),
pause=key_description(CTRL_P),
menuexit=key_description(CTRL_X) + ' (or X)')
return textwrap.dedent(text)
def get_next_action_text(self):
text = """\
--- Press {} to exit monitor.
--- Press {} to build & flash project.
--- Press {} to build & flash app.
--- Press any other key to resume monitor (resets target).
""".format(key_description(self.exit_key),
key_description(CTRL_F),
key_description(CTRL_A))
return textwrap.dedent(text)
def parse_next_action_key(self, c):
ret = None
if c == self.exit_key:
ret = (TAG_CMD, CMD_STOP)
elif c == CTRL_F: # Recompile & upload
ret = (TAG_CMD, CMD_MAKE)
elif c in [CTRL_A, 'a', 'A']: # Recompile & upload app only
# "CTRL-A" cannot be captured with the default settings of the Windows command line, therefore, "A" can be used
# instead
ret = (TAG_CMD, CMD_APP_FLASH)
return ret
class SerialReader(StoppableThread):
""" Read serial data from the serial port and push to the
event queue, until stopped.
"""
def __init__(self, serial, event_queue):
super(SerialReader, self).__init__()
self.baud = serial.baudrate
self.serial = serial
self.event_queue = event_queue
if not hasattr(self.serial, 'cancel_read'):
# enable timeout for checking alive flag,
# if cancel_read not available
self.serial.timeout = 0.25
def run(self):
if not self.serial.is_open:
self.serial.baudrate = self.baud
self.serial.rts = True # Force an RTS reset on open
self.serial.open()
self.serial.rts = False
self.serial.dtr = self.serial.dtr # usbser.sys workaround
try:
while self.alive:
data = self.serial.read(self.serial.in_waiting or 1)
if len(data):
self.event_queue.put((TAG_SERIAL, data), False)
finally:
self.serial.close()
def _cancel(self):
if hasattr(self.serial, 'cancel_read'):
try:
self.serial.cancel_read()
except Exception:
pass
class LineMatcher(object):
"""
Assembles a dictionary of filtering rules based on the --print_filter
argument of idf_monitor. Then later it is used to match lines and
determine whether they should be shown on screen or not.
"""
LEVEL_N = 0
LEVEL_E = 1
LEVEL_W = 2
LEVEL_I = 3
LEVEL_D = 4
LEVEL_V = 5
level = {'N': LEVEL_N, 'E': LEVEL_E, 'W': LEVEL_W, 'I': LEVEL_I, 'D': LEVEL_D,
'V': LEVEL_V, '*': LEVEL_V, '': LEVEL_V}
def __init__(self, print_filter):
self._dict = dict()
self._re = re.compile(r'^(?:\033\[[01];?[0-9]+m?)?([EWIDV]) \([0-9]+\) ([^:]+): ')
items = print_filter.split()
if len(items) == 0:
self._dict["*"] = self.LEVEL_V # default is to print everything
for f in items:
s = f.split(r':')
if len(s) == 1:
# specifying no warning level defaults to verbose level
lev = self.LEVEL_V
elif len(s) == 2:
if len(s[0]) == 0:
raise ValueError('No tag specified in filter ' + f)
try:
lev = self.level[s[1].upper()]
except KeyError:
raise ValueError('Unknown warning level in filter ' + f)
else:
raise ValueError('Missing ":" in filter ' + f)
self._dict[s[0]] = lev
def match(self, line):
try:
m = self._re.search(line)
if m:
lev = self.level[m.group(1)]
if m.group(2) in self._dict:
return self._dict[m.group(2)] >= lev
return self._dict.get("*", self.LEVEL_N) >= lev
except (KeyError, IndexError):
# Regular line written with something else than ESP_LOG*
# or an empty line.
pass
# We need something more than "*.N" for printing.
return self._dict.get("*", self.LEVEL_N) > self.LEVEL_N
class SerialStopException(Exception):
"""
This exception is used for stopping the IDF monitor in testing mode.
"""
pass
class Monitor(object):
"""
Monitor application main class.
This was originally derived from miniterm.Miniterm, but it turned out to be easier to write from scratch for this
purpose.
Main difference is that all event processing happens in the main thread, not the worker threads.
"""
def __init__(self, serial_instance, elf_file, print_filter, make="make", toolchain_prefix=DEFAULT_TOOLCHAIN_PREFIX, eol="CRLF",
decode_coredumps=COREDUMP_DECODE_INFO):
super(Monitor, self).__init__()
self.event_queue = queue.Queue()
self.cmd_queue = queue.Queue()
self.console = miniterm.Console()
if os.name == 'nt':
sys.stderr = ANSIColorConverter(sys.stderr, decode_output=True)
self.console.output = ANSIColorConverter(self.console.output)
self.console.byte_output = ANSIColorConverter(self.console.byte_output)
if StrictVersion(serial.VERSION) < StrictVersion('3.3.0'):
# Use Console.getkey implementation from 3.3.0 (to be in sync with the ConsoleReader._cancel patch above)
def getkey_patched(self):
c = self.enc_stdin.read(1)
if c == chr(0x7f):
c = chr(8) # map the BS key (which yields DEL) to backspace
return c
self.console.getkey = types.MethodType(getkey_patched, self.console)
socket_mode = serial_instance.port.startswith("socket://") # testing hook - data from serial can make exit the monitor
self.serial = serial_instance
self.console_parser = ConsoleParser(eol)
self.console_reader = ConsoleReader(self.console, self.event_queue, self.cmd_queue, self.console_parser, socket_mode)
self.serial_reader = SerialReader(self.serial, self.event_queue)
self.elf_file = elf_file
if not os.path.exists(make):
self.make = shlex.split(make) # allow for possibility the "make" arg is a list of arguments (for idf.py)
else:
self.make = make
self.toolchain_prefix = toolchain_prefix
# internal state
self._last_line_part = b""
self._gdb_buffer = b""
self._pc_address_buffer = b""
self._line_matcher = LineMatcher(print_filter)
self._invoke_processing_last_line_timer = None
self._force_line_print = False
self._output_enabled = True
self._serial_check_exit = socket_mode
self._log_file = None
self._decode_coredumps = decode_coredumps
self._reading_coredump = COREDUMP_IDLE
self._coredump_buffer = b""
def invoke_processing_last_line(self):
self.event_queue.put((TAG_SERIAL_FLUSH, b''), False)
def main_loop(self):
self.console_reader.start()
self.serial_reader.start()
try:
while self.console_reader.alive and self.serial_reader.alive:
try:
item = self.cmd_queue.get_nowait()
except queue.Empty:
try:
item = self.event_queue.get(True, 0.03)
except queue.Empty:
continue
(event_tag, data) = item
if event_tag == TAG_CMD:
self.handle_commands(data)
elif event_tag == TAG_KEY:
try:
self.serial.write(codecs.encode(data))
except serial.SerialException:
pass # this shouldn't happen, but sometimes port has closed in serial thread
except UnicodeEncodeError:
pass # this can happen if a non-ascii character was passed, ignoring
elif event_tag == TAG_SERIAL:
self.handle_serial_input(data)
if self._invoke_processing_last_line_timer is not None:
self._invoke_processing_last_line_timer.cancel()
self._invoke_processing_last_line_timer = threading.Timer(0.1, self.invoke_processing_last_line)
self._invoke_processing_last_line_timer.start()
# If no futher data is received in the next short period
# of time then the _invoke_processing_last_line_timer
# generates an event which will result in the finishing of
# the last line. This is fix for handling lines sent
# without EOL.
elif event_tag == TAG_SERIAL_FLUSH:
self.handle_serial_input(data, finalize_line=True)
else:
raise RuntimeError("Bad event data %r" % ((event_tag,data),))
except SerialStopException:
sys.stderr.write(ANSI_NORMAL + "Stopping condition has been received\n")
finally:
try:
self.console_reader.stop()
self.serial_reader.stop()
self.stop_logging()
# Cancelling _invoke_processing_last_line_timer is not
# important here because receiving empty data doesn't matter.
self._invoke_processing_last_line_timer = None
except Exception:
pass
sys.stderr.write(ANSI_NORMAL + "\n")
def handle_serial_input(self, data, finalize_line=False):
sp = data.split(b'\n')
if self._last_line_part != b"":
# add unprocessed part from previous "data" to the first line
sp[0] = self._last_line_part + sp[0]
self._last_line_part = b""
if sp[-1] != b"":
# last part is not a full line
self._last_line_part = sp.pop()
for line in sp:
if line != b"":
if self._serial_check_exit and line == self.console_parser.exit_key.encode('latin-1'):
raise SerialStopException()
self.check_coredump_trigger_before_print(line)
if self._force_line_print or self._line_matcher.match(line.decode(errors="ignore")):
self._print(line + b'\n')
self.handle_possible_pc_address_in_line(line)
self.check_coredump_trigger_after_print(line)
self.check_gdbstub_trigger(line)
self._force_line_print = False
# Now we have the last part (incomplete line) in _last_line_part. By
# default we don't touch it and just wait for the arrival of the rest
# of the line. But after some time when we didn't received it we need
# to make a decision.
if self._last_line_part != b"":
if self._force_line_print or (finalize_line and self._line_matcher.match(self._last_line_part.decode(errors="ignore"))):
self._force_line_print = True
self._print(self._last_line_part)
self.handle_possible_pc_address_in_line(self._last_line_part)
self.check_gdbstub_trigger(self._last_line_part)
# It is possible that the incomplete line cuts in half the PC
# address. A small buffer is kept and will be used the next time
# handle_possible_pc_address_in_line is invoked to avoid this problem.
# MATCH_PCADDR matches 10 character long addresses. Therefore, we
# keep the last 9 characters.
self._pc_address_buffer = self._last_line_part[-9:]
# GDB sequence can be cut in half also. GDB sequence is 7
# characters long, therefore, we save the last 6 characters.
self._gdb_buffer = self._last_line_part[-6:]
self._last_line_part = b""
# else: keeping _last_line_part and it will be processed the next time
# handle_serial_input is invoked
def handle_possible_pc_address_in_line(self, line):
line = self._pc_address_buffer + line
self._pc_address_buffer = b""
for m in re.finditer(MATCH_PCADDR, line.decode(errors="ignore")):
self.lookup_pc_address(m.group())
def __enter__(self):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.serial_reader.stop()
self.console_reader.stop()
def __exit__(self, *args, **kwargs):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.console_reader.start()
self.serial_reader.start()
def prompt_next_action(self, reason):
self.console.setup() # set up console to trap input characters
try:
red_print("--- {}".format(reason))
red_print(self.console_parser.get_next_action_text())
k = CTRL_T # ignore CTRL-T here, so people can muscle-memory Ctrl-T Ctrl-F, etc.
while k == CTRL_T:
k = self.console.getkey()
finally:
self.console.cleanup()
ret = self.console_parser.parse_next_action_key(k)
if ret is not None:
cmd = ret[1]
if cmd == CMD_STOP:
# the stop command should be handled last
self.event_queue.put(ret)
else:
self.cmd_queue.put(ret)
def run_make(self, target):
with self:
if isinstance(self.make, list):
popen_args = self.make + [target]
else:
popen_args = [self.make, target]
yellow_print("Running %s..." % " ".join(popen_args))
p = subprocess.Popen(popen_args)
try:
p.wait()
except KeyboardInterrupt:
p.wait()
if p.returncode != 0:
self.prompt_next_action("Build failed")
else:
self.output_enable(True)
def lookup_pc_address(self, pc_addr):
cmd = ["%saddr2line" % self.toolchain_prefix,
"-pfiaC", "-e", self.elf_file, pc_addr]
try:
translation = subprocess.check_output(cmd, cwd=".")
if b"?? ??:0" not in translation:
self._print(translation.decode(), console_printer=yellow_print)
except OSError as e:
red_print("%s: %s" % (" ".join(cmd), e))
def check_gdbstub_trigger(self, line):
line = self._gdb_buffer + line
self._gdb_buffer = b""
m = re.search(b"\\$(T..)#(..)", line) # look for a gdb "reason" for a break
if m is not None:
try:
chsum = sum(ord(bytes([p])) for p in m.group(1)) & 0xFF
calc_chsum = int(m.group(2), 16)
except ValueError:
return # payload wasn't valid hex digits
if chsum == calc_chsum:
self.run_gdb()
else:
red_print("Malformed gdb message... calculated checksum %02x received %02x" % (chsum, calc_chsum))
def check_coredump_trigger_before_print(self, line):
if self._decode_coredumps == COREDUMP_DECODE_DISABLE:
return
if COREDUMP_UART_PROMPT in line:
yellow_print("Initiating core dump!")
self.event_queue.put((TAG_KEY, '\n'))
return
if COREDUMP_UART_START in line:
yellow_print("Core dump started (further output muted)")
self._reading_coredump = COREDUMP_READING
self._coredump_buffer = b""
self._output_enabled = False
return
if COREDUMP_UART_END in line:
self._reading_coredump = COREDUMP_DONE
yellow_print("\nCore dump finished!")
self.process_coredump()
return
if self._reading_coredump == COREDUMP_READING:
kb = 1024
buffer_len_kb = len(self._coredump_buffer) // kb
self._coredump_buffer += line.replace(b'\r', b'') + b'\n'
new_buffer_len_kb = len(self._coredump_buffer) // kb
if new_buffer_len_kb > buffer_len_kb:
yellow_print("Received %3d kB..." % (new_buffer_len_kb), newline='\r')
def check_coredump_trigger_after_print(self, line):
if self._decode_coredumps == COREDUMP_DECODE_DISABLE:
return
# Re-enable output after the last line of core dump has been consumed
if not self._output_enabled and self._reading_coredump == COREDUMP_DONE:
self._reading_coredump = COREDUMP_IDLE
self._output_enabled = True
self._coredump_buffer = b""
def process_coredump(self):
if self._decode_coredumps != COREDUMP_DECODE_INFO:
raise NotImplementedError("process_coredump: %s not implemented" % self._decode_coredumps)
coredump_script = os.path.join(os.path.dirname(__file__), "..", "components", "espcoredump", "espcoredump.py")
coredump_file = None
try:
# On Windows, the temporary file can't be read unless it is closed.
# Set delete=False and delete the file manually later.
with tempfile.NamedTemporaryFile(mode="wb", delete=False) as coredump_file:
coredump_file.write(self._coredump_buffer)
coredump_file.flush()
cmd = [sys.executable,
coredump_script,
"info_corefile",
"--core", coredump_file.name,
"--core-format", "b64",
self.elf_file
]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
self._output_enabled = True
self._print(output)
self._output_enabled = False # Will be reenabled in check_coredump_trigger_after_print
except subprocess.CalledProcessError as e:
yellow_print("Failed to run espcoredump script: {}\n\n".format(e))
self._output_enabled = True
self._print(COREDUMP_UART_START + b'\n')
self._print(self._coredump_buffer)
# end line will be printed in handle_serial_input
finally:
if coredump_file is not None:
try:
os.unlink(coredump_file.name)
except OSError as e:
yellow_print("Couldn't remote temporary core dump file ({})".format(e))
def run_gdb(self):
with self: # disable console control
sys.stderr.write(ANSI_NORMAL)
try:
cmd = ["%sgdb" % self.toolchain_prefix,
"-ex", "set serial baud %d" % self.serial.baudrate,
"-ex", "target remote %s" % self.serial.port,
"-ex", "interrupt", # monitor has already parsed the first 'reason' command, need a second
self.elf_file]
process = subprocess.Popen(cmd, cwd=".")
process.wait()
except OSError as e:
red_print("%s: %s" % (" ".join(cmd), e))
except KeyboardInterrupt:
pass # happens on Windows, maybe other OSes
finally:
try:
# on Linux, maybe other OSes, gdb sometimes seems to be alive even after wait() returns...
process.terminate()
except Exception:
pass
try:
# also on Linux, maybe other OSes, gdb sometimes exits uncleanly and breaks the tty mode
subprocess.call(["stty", "sane"])
except Exception:
pass # don't care if there's no stty, we tried...
self.prompt_next_action("gdb exited")
def output_enable(self, enable):
self._output_enabled = enable
def output_toggle(self):
self._output_enabled = not self._output_enabled
yellow_print("\nToggle output display: {}, Type Ctrl-T Ctrl-Y to show/disable output again.".format(self._output_enabled))
def toggle_logging(self):
if self._log_file:
self.stop_logging()
else:
self.start_logging()
def start_logging(self):
if not self._log_file:
try:
name = "log.{}.{}.txt".format(os.path.splitext(os.path.basename(self.elf_file))[0],
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
self._log_file = open(name, "wb+")
yellow_print("\nLogging is enabled into file {}".format(name))
except Exception as e:
red_print("\nLog file {} cannot be created: {}".format(name, e))
def stop_logging(self):
if self._log_file:
try:
name = self._log_file.name
self._log_file.close()
yellow_print("\nLogging is disabled and file {} has been closed".format(name))
except Exception as e:
red_print("\nLog file cannot be closed: {}".format(e))
finally:
self._log_file = None
def _print(self, string, console_printer=None):
if console_printer is None:
console_printer = self.console.write_bytes
if self._output_enabled:
console_printer(string)
if self._log_file:
try:
if isinstance(string, type(u'')):
string = string.encode()
self._log_file.write(string)
except Exception as e:
red_print("\nCannot write to file: {}".format(e))
# don't fill-up the screen with the previous errors (probably consequent prints would fail also)
self.stop_logging()
def handle_commands(self, cmd):
if cmd == CMD_STOP:
self.console_reader.stop()
self.serial_reader.stop()
elif cmd == CMD_RESET:
self.serial.setRTS(True)
self.serial.setDTR(self.serial.dtr) # usbser.sys workaround
time.sleep(0.2)
self.serial.setRTS(False)
self.serial.setDTR(self.serial.dtr) # usbser.sys workaround
self.output_enable(True)
elif cmd == CMD_MAKE:
self.run_make("flash")
elif cmd == CMD_APP_FLASH:
self.run_make("app-flash")
elif cmd == CMD_OUTPUT_TOGGLE:
self.output_toggle()
elif cmd == CMD_TOGGLE_LOGGING:
self.toggle_logging()
elif cmd == CMD_ENTER_BOOT:
self.serial.setDTR(False) # IO0=HIGH
self.serial.setRTS(True) # EN=LOW, chip in reset
self.serial.setDTR(self.serial.dtr) # usbser.sys workaround
time.sleep(1.3) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.1
self.serial.setDTR(True) # IO0=LOW
self.serial.setRTS(False) # EN=HIGH, chip out of reset
self.serial.setDTR(self.serial.dtr) # usbser.sys workaround
time.sleep(0.45) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.05
self.serial.setDTR(False) # IO0=HIGH, done
else:
raise RuntimeError("Bad command data %d" % (cmd))
def main():
def _get_default_serial_port():
"""
Same logic for detecting serial port as esptool.py and idf.py: reverse sort by name and choose the first port.
"""
try:
ports = list(reversed(sorted(p.device for p in serial.tools.list_ports.comports())))
return ports[0]
except Exception:
return '/dev/ttyUSB0'
parser = argparse.ArgumentParser("idf_monitor - a serial output monitor for esp-idf")
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', _get_default_serial_port())
)
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=int,
default=os.getenv('IDF_MONITOR_BAUD', os.getenv('MONITORBAUD', 115200)))
parser.add_argument(
'--make', '-m',
help='Command to run make',
type=str, default='make')
parser.add_argument(
'--toolchain-prefix',
help="Triplet prefix to add before cross-toolchain names",
default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="End of line to use when sending to the serial port",
default='CR')
parser.add_argument(
'elf_file', help='ELF file of application',
type=argparse.FileType('rb'))
parser.add_argument(
'--print_filter',
help="Filtering string",
default=DEFAULT_PRINT_FILTER)
parser.add_argument(
'--decode-coredumps',
choices=[COREDUMP_DECODE_INFO, COREDUMP_DECODE_DISABLE],
default=COREDUMP_DECODE_INFO,
help="Handling of core dumps found in serial output"
)
args = parser.parse_args()
if args.port.startswith("/dev/tty."):
args.port = args.port.replace("/dev/tty.", "/dev/cu.")
yellow_print("--- WARNING: Serial ports accessed as /dev/tty.* will hang gdb if launched.")
yellow_print("--- Using %s instead..." % args.port)
serial_instance = serial.serial_for_url(args.port, args.baud,
do_not_open=True)
serial_instance.dtr = False
serial_instance.rts = False
args.elf_file.close() # don't need this as a file
# remove the parallel jobserver arguments from MAKEFLAGS, as any
# parent make is only running 1 job (monitor), so we can re-spawn
# all of the child makes we need (the -j argument remains part of
# MAKEFLAGS)
try:
makeflags = os.environ["MAKEFLAGS"]
makeflags = re.sub(r"--jobserver[^ =]*=[0-9,]+ ?", "", makeflags)
os.environ["MAKEFLAGS"] = makeflags
except KeyError:
pass # not running a make jobserver
monitor = Monitor(serial_instance, args.elf_file.name, args.print_filter, args.make, args.toolchain_prefix, args.eol,
args.decode_coredumps)
yellow_print('--- idf_monitor on {p.name} {p.baudrate} ---'.format(
p=serial_instance))
yellow_print('--- Quit: {} | Menu: {} | Help: {} followed by {} ---'.format(
key_description(monitor.console_parser.exit_key),
key_description(monitor.console_parser.menu_key),
key_description(monitor.console_parser.menu_key),
key_description(CTRL_H)))
if args.print_filter != DEFAULT_PRINT_FILTER:
yellow_print('--- Print filter: {} ---'.format(args.print_filter))
monitor.main_loop()
if os.name == 'nt':
# Windows console stuff
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h values
FOREGROUND_INTENSITY = 8
FOREGROUND_GREY = 7
# matches the ANSI color change sequences that IDF sends
RE_ANSI_COLOR = re.compile(b'\033\\[([01]);3([0-7])m')
# list mapping the 8 ANSI colors (the indexes) to Windows Console colors
ANSI_TO_WINDOWS_COLOR = [0, 4, 2, 6, 1, 5, 3, 7]
GetStdHandle = ctypes.windll.kernel32.GetStdHandle
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
class ANSIColorConverter(object):
"""Class to wrap a file-like output stream, intercept ANSI color codes,
and convert them into calls to Windows SetConsoleTextAttribute.
Doesn't support all ANSI terminal code escape sequences, only the sequences IDF uses.
Ironically, in Windows this console output is normally wrapped by winpty which will then detect the console text
color changes and convert these back to ANSI color codes for MSYS' terminal to display. However this is the
least-bad working solution, as winpty doesn't support any "passthrough" mode for raw output.
"""
def __init__(self, output=None, decode_output=False):
self.output = output
self.decode_output = decode_output
self.handle = GetStdHandle(STD_ERROR_HANDLE if self.output == sys.stderr else STD_OUTPUT_HANDLE)
self.matched = b''
def _output_write(self, data):
try:
if self.decode_output:
self.output.write(data.decode())
else:
self.output.write(data)
except (IOError, OSError):
# Windows 10 bug since the Fall Creators Update, sometimes writing to console randomly throws
# an exception (however, the character is still written to the screen)
# Ref https://github.com/espressif/esp-idf/issues/1163
#
# Also possible for Windows to throw an OSError error if the data is invalid for the console
# (garbage bytes, etc)
pass
def write(self, data):
if isinstance(data, bytes):
data = bytearray(data)
else:
data = bytearray(data, 'utf-8')
for b in data:
b = bytes([b])
length = len(self.matched)
if b == b'\033': # ESC
self.matched = b
elif (length == 1 and b == b'[') or (1 < length < 7):
self.matched += b
if self.matched == ANSI_NORMAL.encode('latin-1'): # reset console
# Flush is required only with Python3 - switching color before it is printed would mess up the console
self.flush()
SetConsoleTextAttribute(self.handle, FOREGROUND_GREY)
self.matched = b''
elif len(self.matched) == 7: # could be an ANSI sequence
m = re.match(RE_ANSI_COLOR, self.matched)
if m is not None:
color = ANSI_TO_WINDOWS_COLOR[int(m.group(2))]
if m.group(1) == b'1':
color |= FOREGROUND_INTENSITY
# Flush is required only with Python3 - switching color before it is printed would mess up the console
self.flush()
SetConsoleTextAttribute(self.handle, color)
else:
self._output_write(self.matched) # not an ANSI color code, display verbatim
self.matched = b''
else:
self._output_write(b)
self.matched = b''
def flush(self):
try:
self.output.flush()
except OSError:
# Account for Windows Console refusing to accept garbage bytes (serial noise, etc)
pass
if __name__ == "__main__":
main()
|
mp_demo8.py | from multiprocessing import Process, Value, Array
def f(n, a):
n.value = 3.1415927
for i in range(len(a)):
a[i] = -a[i]
if __name__ == '__main__':
num = Value('d', 0.0)
arr = Array('i', range(10))
p = Process(target=f, args=(num, arr))
p.start()
p.join()
print(num.value)
print(arr[:])
|
test_credentials.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import uuid
import threading
import os
import math
import time
import mock
import tempfile
import shutil
from datetime import datetime, timedelta
import sys
from dateutil.tz import tzlocal
from ibm_botocore.exceptions import CredentialRetrievalError
from tests import unittest, IntegerRefresher, BaseEnvVar, random_chars
from tests import temporary_file, StubbedSession, SessionHTTPStubber
from ibm_botocore.credentials import EnvProvider, ContainerProvider
from ibm_botocore.credentials import InstanceMetadataProvider
from ibm_botocore.credentials import Credentials, ReadOnlyCredentials
from ibm_botocore.credentials import AssumeRoleProvider, ProfileProviderBuilder
from ibm_botocore.credentials import CanonicalNameCredentialSourcer
from ibm_botocore.credentials import DeferredRefreshableCredentials
from ibm_botocore.credentials import create_credential_resolver
from ibm_botocore.session import Session
from ibm_botocore.exceptions import InvalidConfigError, InfiniteLoopConfigError
from ibm_botocore.stub import Stubber
from ibm_botocore.awsrequest import AWSResponse
class TestCredentialRefreshRaces(unittest.TestCase):
def assert_consistent_credentials_seen(self, creds, func):
collected = []
self._run_threads(20, func, collected)
for creds in collected:
# During testing, the refresher uses it's current
# refresh count as the values for the access, secret, and
# token value. This means that at any given point in time,
# the credentials should be something like:
#
# ReadOnlyCredentials('1', '1', '1')
# ReadOnlyCredentials('2', '2', '2')
# ...
# ReadOnlyCredentials('30', '30', '30')
#
# This makes it really easy to verify we see a consistent
# set of credentials from the same time period. We just
# check if all the credential values are the same. If
# we ever see something like:
#
# ReadOnlyCredentials('1', '2', '1')
#
# We fail. This is because we're using the access_key
# from the first refresh ('1'), the secret key from
# the second refresh ('2'), and the token from the
# first refresh ('1').
self.assertTrue(creds[0] == creds[1] == creds[2], creds)
def assert_non_none_retrieved_credentials(self, func):
collected = []
self._run_threads(50, func, collected)
for cred in collected:
self.assertIsNotNone(cred)
def _run_threads(self, num_threads, func, collected):
threads = []
for _ in range(num_threads):
threads.append(threading.Thread(target=func, args=(collected,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def test_has_no_race_conditions(self):
creds = IntegerRefresher(
creds_last_for=2,
advisory_refresh=1,
mandatory_refresh=0
)
def _run_in_thread(collected):
for _ in range(4000):
frozen = creds.get_frozen_credentials()
collected.append((frozen.access_key,
frozen.secret_key,
frozen.token))
start = time.time()
self.assert_consistent_credentials_seen(creds, _run_in_thread)
end = time.time()
# creds_last_for = 2 seconds (from above)
# So, for example, if execution time took 6.1 seconds, then
# we should see a maximum number of refreshes being (6 / 2.0) + 1 = 4
max_calls_allowed = math.ceil((end - start) / 2.0) + 1
self.assertTrue(creds.refresh_counter <= max_calls_allowed,
"Too many cred refreshes, max: %s, actual: %s, "
"time_delta: %.4f" % (max_calls_allowed,
creds.refresh_counter,
(end - start)))
def test_no_race_for_immediate_advisory_expiration(self):
creds = IntegerRefresher(
creds_last_for=1,
advisory_refresh=1,
mandatory_refresh=0
)
def _run_in_thread(collected):
for _ in range(100):
frozen = creds.get_frozen_credentials()
collected.append((frozen.access_key,
frozen.secret_key,
frozen.token))
self.assert_consistent_credentials_seen(creds, _run_in_thread)
def test_no_race_for_initial_refresh_of_deferred_refreshable(self):
def get_credentials():
expiry_time = (
datetime.now(tzlocal()) + timedelta(hours=24)).isoformat()
return {
'access_key': 'my-access-key',
'secret_key': 'my-secret-key',
'token': 'my-token',
'expiry_time': expiry_time
}
deferred_creds = DeferredRefreshableCredentials(
get_credentials, 'fixed')
def _run_in_thread(collected):
frozen = deferred_creds.get_frozen_credentials()
collected.append(frozen)
self.assert_non_none_retrieved_credentials(_run_in_thread)
class BaseAssumeRoleTest(BaseEnvVar):
def setUp(self):
super(BaseAssumeRoleTest, self).setUp()
self.tempdir = tempfile.mkdtemp()
self.config_file = os.path.join(self.tempdir, 'config')
self.environ['AWS_CONFIG_FILE'] = self.config_file
self.environ['AWS_SHARED_CREDENTIALS_FILE'] = str(uuid.uuid4())
def tearDown(self):
shutil.rmtree(self.tempdir)
super(BaseAssumeRoleTest, self).tearDown()
def some_future_time(self):
timeobj = datetime.now(tzlocal())
return timeobj + timedelta(hours=24)
def create_assume_role_response(self, credentials, expiration=None):
if expiration is None:
expiration = self.some_future_time()
response = {
'Credentials': {
'AccessKeyId': credentials.access_key,
'SecretAccessKey': credentials.secret_key,
'SessionToken': credentials.token,
'Expiration': expiration
},
'AssumedRoleUser': {
'AssumedRoleId': 'myroleid',
'Arn': 'arn:aws:iam::1234567890:user/myuser'
}
}
return response
def create_random_credentials(self):
return Credentials(
'fake-%s' % random_chars(15),
'fake-%s' % random_chars(35),
'fake-%s' % random_chars(45)
)
def assert_creds_equal(self, c1, c2):
c1_frozen = c1
if not isinstance(c1_frozen, ReadOnlyCredentials):
c1_frozen = c1.get_frozen_credentials()
c2_frozen = c2
if not isinstance(c2_frozen, ReadOnlyCredentials):
c2_frozen = c2.get_frozen_credentials()
self.assertEqual(c1_frozen, c2_frozen)
def write_config(self, config):
with open(self.config_file, 'w') as f:
f.write(config)
class TestAssumeRole(BaseAssumeRoleTest):
def setUp(self):
super(TestAssumeRole, self).setUp()
self.environ['AWS_ACCESS_KEY_ID'] = 'access_key'
self.environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key'
self.metadata_provider = self.mock_provider(InstanceMetadataProvider)
self.env_provider = self.mock_provider(EnvProvider)
self.container_provider = self.mock_provider(ContainerProvider)
self.mock_client_creator = mock.Mock(spec=Session.create_client)
self.actual_client_region = None
current_dir = os.path.dirname(os.path.abspath(__file__))
credential_process = os.path.join(
current_dir, 'utils', 'credentialprocess.py'
)
self.credential_process = '%s %s' % (
sys.executable, credential_process
)
def mock_provider(self, provider_cls):
mock_instance = mock.Mock(spec=provider_cls)
mock_instance.load.return_value = None
mock_instance.METHOD = provider_cls.METHOD
mock_instance.CANONICAL_NAME = provider_cls.CANONICAL_NAME
return mock_instance
def create_session(self, profile=None):
session = StubbedSession(profile=profile)
# We have to set bogus credentials here or otherwise we'll trigger
# an early credential chain resolution.
sts = session.create_client(
's3',
aws_access_key_id='spam',
aws_secret_access_key='eggs',
)
self.mock_client_creator.return_value = sts
assume_role_provider = AssumeRoleProvider(
load_config=lambda: session.full_config,
client_creator=self.mock_client_creator,
cache={},
profile_name=profile,
credential_sourcer=CanonicalNameCredentialSourcer([
self.env_provider, self.container_provider,
self.metadata_provider
]),
profile_provider_builder=ProfileProviderBuilder(session),
)
stubber = session.stub('sts')
stubber.activate()
component_name = 'credential_provider'
resolver = session.get_component(component_name)
available_methods = [p.METHOD for p in resolver.providers]
replacements = {
'env': self.env_provider,
'iam-role': self.metadata_provider,
'container-role': self.container_provider,
'assume-role': assume_role_provider
}
for name, provider in replacements.items():
try:
index = available_methods.index(name)
except ValueError:
# The provider isn't in the session
continue
resolver.providers[index] = provider
session.register_component(
'credential_provider', resolver
)
return session, stubber
def test_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
def test_environment_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = Environment\n'
)
self.write_config(config)
environment_creds = self.create_random_credentials()
self.env_provider.load.return_value = environment_creds
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
self.assertEqual(self.env_provider.load.call_count, 1)
def test_instance_metadata_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = Ec2InstanceMetadata\n'
)
self.write_config(config)
metadata_creds = self.create_random_credentials()
self.metadata_provider.load.return_value = metadata_creds
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
self.assertEqual(self.metadata_provider.load.call_count, 1)
def test_container_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = EcsContainer\n'
)
self.write_config(config)
container_creds = self.create_random_credentials()
self.container_provider.load.return_value = container_creds
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
self.assertEqual(self.container_provider.load.call_count, 1)
def test_invalid_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = CustomInvalidProvider\n'
)
self.write_config(config)
with self.assertRaises(InvalidConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials()
def test_misconfigured_source_profile(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n'
'[profile B]\n'
'region = us-west-2\n'
)
self.write_config(config)
with self.assertRaises(InvalidConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials().get_frozen_credentials()
def test_recursive_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'role_arn = arn:aws:iam::123456789:role/RoleB\n'
'source_profile = C\n\n'
'[profile C]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
profile_b_creds = self.create_random_credentials()
profile_b_response = self.create_assume_role_response(profile_b_creds)
profile_a_creds = self.create_random_credentials()
profile_a_response = self.create_assume_role_response(profile_a_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', profile_b_response)
stubber.add_response('assume_role', profile_a_response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, profile_a_creds)
stubber.assert_no_pending_responses()
def test_recursive_assume_role_stops_at_static_creds(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
'role_arn = arn:aws:iam::123456789:role/RoleB\n'
'source_profile = C\n\n'
'[profile C]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
profile_a_creds = self.create_random_credentials()
profile_a_response = self.create_assume_role_response(profile_a_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', profile_a_response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, profile_a_creds)
stubber.assert_no_pending_responses()
def test_infinitely_recursive_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = A\n'
)
self.write_config(config)
with self.assertRaises(InfiniteLoopConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials()
def test_process_source_profile(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n'
'[profile B]\n'
'credential_process = %s\n' % self.credential_process
)
self.write_config(config)
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
# Assert that the client was created with the credentials from the
# credential process.
self.assertEqual(self.mock_client_creator.call_count, 1)
_, kwargs = self.mock_client_creator.call_args_list[0]
expected_kwargs = {
'aws_access_key_id': 'spam',
'aws_secret_access_key': 'eggs',
'aws_session_token': None,
}
self.assertEqual(kwargs, expected_kwargs)
def test_web_identity_source_profile(self):
token_path = os.path.join(self.tempdir, 'token')
with open(token_path, 'w') as token_file:
token_file.write('a.token')
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n'
'[profile B]\n'
'role_arn = arn:aws:iam::123456789:role/RoleB\n'
'web_identity_token_file = %s\n' % token_path
)
self.write_config(config)
session, stubber = self.create_session(profile='A')
identity_creds = self.create_random_credentials()
identity_response = self.create_assume_role_response(identity_creds)
stubber.add_response(
'assume_role_with_web_identity',
identity_response,
)
expected_creds = self.create_random_credentials()
assume_role_response = self.create_assume_role_response(expected_creds)
stubber.add_response('assume_role', assume_role_response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
# Assert that the client was created with the credentials from the
# assume role with web identity call.
self.assertEqual(self.mock_client_creator.call_count, 1)
_, kwargs = self.mock_client_creator.call_args_list[0]
expected_kwargs = {
'aws_access_key_id': identity_creds.access_key,
'aws_secret_access_key': identity_creds.secret_key,
'aws_session_token': identity_creds.token,
}
self.assertEqual(kwargs, expected_kwargs)
def test_web_identity_source_profile_ignores_env_vars(self):
token_path = os.path.join(self.tempdir, 'token')
with open(token_path, 'w') as token_file:
token_file.write('a.token')
self.environ['AWS_ROLE_ARN'] = 'arn:aws:iam::123456789:role/RoleB'
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n'
'[profile B]\n'
'web_identity_token_file = %s\n' % token_path
)
self.write_config(config)
session, _ = self.create_session(profile='A')
# The config is split between the profile and the env, we
# should only be looking at the profile so this should raise
# a configuration error.
with self.assertRaises(InvalidConfigError):
session.get_credentials()
def test_web_identity_credential_source_ignores_env_vars(self):
token_path = os.path.join(self.tempdir, 'token')
with open(token_path, 'w') as token_file:
token_file.write('a.token')
self.environ['AWS_ROLE_ARN'] = 'arn:aws:iam::123456789:role/RoleB'
self.environ['AWS_WEB_IDENTITY_TOKEN_FILE'] = token_path
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = Environment\n'
)
self.write_config(config)
session, _ = self.create_session(profile='A')
# We should not get credentials from web-identity configured in the
# environment when the Environment credential_source is set.
# There are no Environment credentials, so this should raise a
# retrieval error.
with self.assertRaises(CredentialRetrievalError):
session.get_credentials()
def test_self_referential_profile(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = A\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
def create_stubbed_sts_client(self, session):
expected_creds = self.create_random_credentials()
_original_create_client = session.create_client
def create_client_sts_stub(service, *args, **kwargs):
client = _original_create_client(service, *args, **kwargs)
stub = Stubber(client)
response = self.create_assume_role_response(expected_creds)
self.actual_client_region = client.meta.region_name
stub.add_response('assume_role', response)
stub.activate()
return client
return create_client_sts_stub, expected_creds
def test_assume_role_uses_correct_region(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
session = Session(profile='A')
# Verify that when we configure the session with a specific region
# that we use that region when creating the sts client.
session.set_config_variable('region', 'cn-north-1')
create_client, expected_creds = self.create_stubbed_sts_client(session)
session.create_client = create_client
resolver = create_credential_resolver(session)
provider = resolver.get_provider('assume-role')
creds = provider.load()
self.assert_creds_equal(creds, expected_creds)
self.assertEqual(self.actual_client_region, 'cn-north-1')
class TestAssumeRoleWithWebIdentity(BaseAssumeRoleTest):
def setUp(self):
super(TestAssumeRoleWithWebIdentity, self).setUp()
self.token_file = os.path.join(self.tempdir, 'token.jwt')
self.write_token('totally.a.token')
def write_token(self, token, path=None):
if path is None:
path = self.token_file
with open(path, 'w') as f:
f.write(token)
def assert_session_credentials(self, expected_params, **kwargs):
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session = StubbedSession(**kwargs)
stubber = session.stub('sts')
stubber.add_response(
'assume_role_with_web_identity',
response,
expected_params
)
stubber.activate()
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
def test_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'role_session_name = sname\n'
'web_identity_token_file = %s\n'
) % self.token_file
self.write_config(config)
expected_params = {
'RoleArn': 'arn:aws:iam::123456789:role/RoleA',
'RoleSessionName': 'sname',
'WebIdentityToken': 'totally.a.token',
}
self.assert_session_credentials(expected_params, profile='A')
def test_assume_role_env_vars(self):
config = (
'[profile B]\n'
'region = us-west-2\n'
)
self.write_config(config)
self.environ['AWS_ROLE_ARN'] = 'arn:aws:iam::123456789:role/RoleB'
self.environ['AWS_WEB_IDENTITY_TOKEN_FILE'] = self.token_file
self.environ['AWS_ROLE_SESSION_NAME'] = 'bname'
expected_params = {
'RoleArn': 'arn:aws:iam::123456789:role/RoleB',
'RoleSessionName': 'bname',
'WebIdentityToken': 'totally.a.token',
}
self.assert_session_credentials(expected_params)
def test_assume_role_env_vars_do_not_take_precedence(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'role_session_name = aname\n'
'web_identity_token_file = %s\n'
) % self.token_file
self.write_config(config)
different_token = os.path.join(self.tempdir, str(uuid.uuid4()))
self.write_token('totally.different.token', path=different_token)
self.environ['AWS_ROLE_ARN'] = 'arn:aws:iam::123456789:role/RoleC'
self.environ['AWS_WEB_IDENTITY_TOKEN_FILE'] = different_token
self.environ['AWS_ROLE_SESSION_NAME'] = 'cname'
expected_params = {
'RoleArn': 'arn:aws:iam::123456789:role/RoleA',
'RoleSessionName': 'aname',
'WebIdentityToken': 'totally.a.token',
}
self.assert_session_credentials(expected_params, profile='A')
class TestProcessProvider(unittest.TestCase):
def setUp(self):
current_dir = os.path.dirname(os.path.abspath(__file__))
credential_process = os.path.join(
current_dir, 'utils', 'credentialprocess.py'
)
self.credential_process = '%s %s' % (
sys.executable, credential_process
)
self.environ = os.environ.copy()
self.environ_patch = mock.patch('os.environ', self.environ)
self.environ_patch.start()
def tearDown(self):
self.environ_patch.stop()
def test_credential_process(self):
config = (
'[profile processcreds]\n'
'credential_process = %s\n'
)
config = config % self.credential_process
with temporary_file('w') as f:
f.write(config)
f.flush()
self.environ['AWS_CONFIG_FILE'] = f.name
credentials = Session(profile='processcreds').get_credentials()
self.assertEqual(credentials.access_key, 'spam')
self.assertEqual(credentials.secret_key, 'eggs')
def test_credential_process_returns_error(self):
config = (
'[profile processcreds]\n'
'credential_process = %s --raise-error\n'
)
config = config % self.credential_process
with temporary_file('w') as f:
f.write(config)
f.flush()
self.environ['AWS_CONFIG_FILE'] = f.name
session = Session(profile='processcreds')
# This regex validates that there is no substring: b'
# The reason why we want to validate that is that we want to
# make sure that stderr is actually decoded so that in
# exceptional cases the error is properly formatted.
# As for how the regex works:
# `(?!b').` is a negative lookahead, meaning that it will only
# match if it is not followed by the pattern `b'`. Since it is
# followed by a `.` it will match any character not followed by
# that pattern. `((?!hede).)*` does that zero or more times. The
# final pattern adds `^` and `$` to anchor the beginning and end
# of the string so we can know the whole string is consumed.
# Finally `(?s)` at the beginning makes dots match newlines so
# we can handle a multi-line string.
reg = r"(?s)^((?!b').)*$"
with self.assertRaisesRegexp(CredentialRetrievalError, reg):
session.get_credentials()
class TestSTSRegional(BaseAssumeRoleTest):
def add_assume_role_http_response(self, stubber):
stubber.add_response(
body=self._get_assume_role_body('AssumeRole'))
def add_assume_role_with_web_identity_http_response(self, stubber):
stubber.add_response(
body=self._get_assume_role_body('AssumeRoleWithWebIdentity'))
def _get_assume_role_body(self, method_name):
expiration = self.some_future_time()
body = (
'<{method_name}Response>'
' <{method_name}Result>'
' <AssumedRoleUser>'
' <Arn>arn:aws:sts::0123456:user</Arn>'
' <AssumedRoleId>AKID:mysession-1567020004</AssumedRoleId>'
' </AssumedRoleUser>'
' <Credentials>'
' <AccessKeyId>AccessKey</AccessKeyId>'
' <SecretAccessKey>SecretKey</SecretAccessKey>'
' <SessionToken>SessionToken</SessionToken>'
' <Expiration>{expiration}</Expiration>'
' </Credentials>'
' </{method_name}Result>'
'</{method_name}Response>'
).format(method_name=method_name, expiration=expiration)
return body.encode('utf-8')
def make_stubbed_client_call_to_region(self, session, stubber, region):
ec2 = session.create_client('ec2', region_name=region)
stubber.add_response(body=b'<DescribeRegionsResponse/>')
ec2.describe_regions()
def test_assume_role_uses_same_region_as_client(self):
config = (
'[profile A]\n'
'sts_regional_endpoints = regional\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
session = Session(profile='A')
with SessionHTTPStubber(session) as stubber:
self.add_assume_role_http_response(stubber)
# Make an arbitrary client and API call as we are really only
# looking to make sure the STS assume role call uses the correct
# endpoint.
self.make_stubbed_client_call_to_region(
session, stubber, 'us-west-2')
self.assertEqual(
stubber.requests[0].url,
'https://sts.us-west-2.amazonaws.com/'
)
def test_assume_role_web_identity_uses_same_region_as_client(self):
token_file = os.path.join(self.tempdir, 'token.jwt')
with open(token_file, 'w') as f:
f.write('some-token')
config = (
'[profile A]\n'
'sts_regional_endpoints = regional\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'web_identity_token_file = %s\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n' % token_file
)
self.write_config(config)
# Make an arbitrary client and API call as we are really only
# looking to make sure the STS assume role call uses the correct
# endpoint.
session = Session(profile='A')
with SessionHTTPStubber(session) as stubber:
self.add_assume_role_with_web_identity_http_response(stubber)
# Make an arbitrary client and API call as we are really only
# looking to make sure the STS assume role call uses the correct
# endpoint.
self.make_stubbed_client_call_to_region(
session, stubber, 'us-west-2')
self.assertEqual(
stubber.requests[0].url,
'https://sts.us-west-2.amazonaws.com/'
)
|
LibSync.py | from yaml import load, dump
import requests
import os
import urllib
import threading
from queue import Queue
import asyncio
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
class LibSync:
def __init__(self, serverRootURL, localRootPath):
self.serverRootURL = serverRootURL
self.localRootPath = localRootPath
self.serverSummary = {'files': []}
self.localSummary = {'files': [], 'version': '0.0.0'}
self.differentials = []
self.__operations = Queue()
threading.Thread(target=self.__loop, daemon=True).start()
def __loop(self):
while True:
operation = self.__operations.get()
for o in operation:
o()
async def updateSummaries(self):
future = asyncio.Future()
self.__operations.put([self.doUpdateSummaries, lambda: future.set_result(None)])
await future
def doUpdateSummaries(self):
self.serverSummary = load(requests.get(self.serverRootURL + 'summary.yml').content, Loader=Loader)
localSummaryPath = os.path.join(self.localRootPath, 'summary.yml')
if os.path.exists(localSummaryPath):
file = open(localSummaryPath)
self.localSummary = load(file.read(), Loader=Loader)
file.close()
def fileMap(fs):
fs = fs['files']
fm = {}
for f in fs:
fm[f['url']] = f
return fm
serverFileMap = fileMap(self.serverSummary)
localFileMap = fileMap(self.localSummary)
actions = []
for serverFile in serverFileMap:
if (localFileMap.__contains__(serverFile)):
if not (serverFileMap[serverFile]['sha512'] == localFileMap[serverFile]['sha512']):
actions.append(['download', serverFile])
else:
actions.append(['download', serverFile])
for localFile in localFileMap:
if not serverFileMap.__contains__(localFile):
actions.append(['remove', localFile])
self.differentials = actions
def getLocalVersion(self):
return self.localSummary['version']
def isUptodate(self):
return len(self.differentials) == 0
async def performSync(self):
future = asyncio.Future()
self.__operations.put([self.doPerformSync, lambda: future.set_result(None)])
await future
def doPerformSync(self):
for action in self.differentials:
url = urllib.parse.urljoin(self.serverRootURL, action[1])
data = requests.get(url).content
localPath = os.path.join(self.localRootPath, action[1])
os.makedirs(os.path.dirname(localPath), exist_ok=True)
file = open(localPath, 'wb')
file.write(data)
file.close()
file = open(os.path.join(self.localRootPath, 'summary.yml'), 'w')
file.write(dump(self.serverSummary))
file.close()
self.localSummary = self.serverSummary
self.differentials = []
if __name__ == '__main__':
sync = LibSync('http://172.16.60.200/IFAssets/IFNodeApplications/', 'C:\\Users\\Administrator\\Downloads\\LibSyncTest\\')
sync.doUpdateSummaries()
sync.doPerformSync()
|
socket_server.py | import time
import json
from threading import Thread, Event
import socket
from data_stream import send_request
"""
SocketServer is a multithreaded socket server
receiving n amount of connections and proxy the messages
to flask
"""
class SocketServer(Thread):
def __init__(self):
super(SocketServer, self).__init__()
def handle_connection(self, conn):
with conn:
while True:
data = conn.recv(1024)
if not data:
break
else:
#print(data)
try:
data = json.loads(data)
send_request(id = data["id"], data=data["value"], type =safe(data, "type"), active_points =safe(data, "active_points"),
_label=safe(data, "label"), _legend=safe(data, "legend"), _width = safe(data, "width"), _height = safe(data, "height"),
_name = safe(data, "name"), fill = safe(data, "fill"), backgroundColor = safe(data, "backgroundColor"), borderColor = safe(data, "borderColor"))
except Exception as e:
print(data)
print(" WARNING: an error occured in socket_server: ", e)
def run(self):
from config_handler import ConfigHandler
(HOST, PORT) = ConfigHandler().get_all("SocketServer") # pylint: disable=unbalanced-tuple-unpacking
#HOST = '127.0.0.1' # Standard loopback interface address (localhost)
#PORT = 65432 # Port to listen on (non-privileged ports are > 1023)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((str(HOST), int(PORT)))
s.listen()
try:
while True:
conn, addr = s.accept()
print('Connected by', addr)
Thread(target=self.handle_connection, args=(conn,)).start()
except Exception as e:
print(e)
def safe(json, value):
try:
return json[value]
except Exception:
return
|
assistant_library_with_button_demo.py | #!/usr/bin/env python3
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run a recognizer using the Google Assistant Library with button support.
The Google Assistant Library has direct access to the audio API, so this Python
code doesn't need to record audio. Hot word detection "OK, Google" is supported.
The Google Assistant Library can be installed with:
env/bin/pip install google-assistant-library==0.0.2
It is available for Raspberry Pi 2/3 only; Pi Zero is not supported.
"""
import logging
import sys
import threading
import aiy.assistant.auth_helpers
import aiy.voicehat
from google.assistant.library import Assistant
from google.assistant.library.event import EventType
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
)
class MyAssistant(object):
"""An assistant that runs in the background.
The Google Assistant Library event loop blocks the running thread entirely.
To support the button trigger, we need to run the event loop in a separate
thread. Otherwise, the on_button_pressed() method will never get a chance to
be invoked.
"""
def __init__(self):
self._task = threading.Thread(target=self._run_task)
self._can_start_conversation = False
self._assistant = None
def start(self):
"""Starts the assistant.
Starts the assistant event loop and begin processing events.
"""
self._task.start()
def _run_task(self):
credentials = aiy.assistant.auth_helpers.get_assistant_credentials()
with Assistant(credentials) as assistant:
self._assistant = assistant
for event in assistant.start():
self._process_event(event)
def _process_event(self, event):
status_ui = aiy.voicehat.get_status_ui()
if event.type == EventType.ON_START_FINISHED:
status_ui.status('ready')
self._can_start_conversation = True
# Start the voicehat button trigger.
aiy.voicehat.get_button().on_press(self._on_button_pressed)
if sys.stdout.isatty():
print('Say "OK, Google" or press the button, then speak. '
'Press Ctrl+C to quit...')
elif event.type == EventType.ON_CONVERSATION_TURN_STARTED:
self._can_start_conversation = False
status_ui.status('listening')
elif event.type == EventType.ON_END_OF_UTTERANCE:
status_ui.status('thinking')
elif event.type == EventType.ON_CONVERSATION_TURN_FINISHED:
status_ui.status('ready')
self._can_start_conversation = True
elif event.type == EventType.ON_ASSISTANT_ERROR and event.args and event.args['is_fatal']:
sys.exit(1)
def _on_button_pressed(self):
# Check if we can start a conversation. 'self._can_start_conversation'
# is False when either:
# 1. The assistant library is not yet ready; OR
# 2. The assistant library is already in a conversation.
if self._can_start_conversation:
self._assistant.start_conversation()
def main():
MyAssistant().start()
if __name__ == '__main__':
main()
|
fork_wait.py | """This test case provides support for checking forking and wait behavior.
To test different wait behavior, override the wait_impl method.
We want fork1() semantics -- only the forking thread survives in the
child after a fork().
On some systems (e.g. Solaris without posix threads) we find that all
active threads survive in the child after a fork(); this is an error.
While BeOS doesn't officially support fork and native threading in
the same application, the present example should work just fine. DC
"""
import os, sys, time, unittest
import test.support as support
threading = support.import_module('threading')
LONGSLEEP = 2
SHORTSLEEP = 0.5
NUM_THREADS = 4
class ForkWait(unittest.TestCase):
def setUp(self):
self._threading_key = support.threading_setup()
self.alive = {}
self.stop = 0
self.threads = []
def tearDown(self):
# Stop threads
self.stop = 1
for thread in self.threads:
thread.join()
thread = None
del self.threads[:]
support.threading_cleanup(*self._threading_key)
def f(self, id):
while not self.stop:
self.alive[id] = os.getpid()
try:
time.sleep(SHORTSLEEP)
except IOError:
pass
def wait_impl(self, cpid):
for i in range(10):
# waitpid() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
spid, status = os.waitpid(cpid, os.WNOHANG)
if spid == cpid:
break
time.sleep(2 * SHORTSLEEP)
self.assertEqual(spid, cpid)
self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
def test_wait(self):
for i in range(NUM_THREADS):
thread = threading.Thread(target=self.f, args=(i,))
thread.start()
self.threads.append(thread)
time.sleep(LONGSLEEP)
a = self.alive.keys()
a.sort()
self.assertEqual(a, range(NUM_THREADS))
prefork_lives = self.alive.copy()
if sys.platform in ['unixware7']:
cpid = os.fork1()
else:
cpid = os.fork()
if cpid == 0:
# Child
time.sleep(LONGSLEEP)
n = 0
for key in self.alive:
if self.alive[key] != prefork_lives[key]:
n += 1
os._exit(n)
else:
# Parent
self.wait_impl(cpid)
|
test.py | import json
import pytest
import random
import re
import string
import threading
import time
from multiprocessing.dummy import Pool
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1',
config_dir='configs',
main_configs=['configs/logs_config.xml'],
with_zookeeper=True,
tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'],
macros={"shard": 0, "replica": 1} )
node2 = cluster.add_instance('node2',
config_dir='configs',
main_configs=['configs/logs_config.xml'],
with_zookeeper=True,
tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'],
macros={"shard": 0, "replica": 2} )
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def get_random_string(length):
symbols = bytes(string.ascii_uppercase + string.digits)
result_list = bytearray([0])*length
for i in range(length):
result_list[i] = random.choice(symbols)
return str(result_list)
def get_used_disks_for_table(node, table_name, partition=None):
if partition is None:
suffix = ""
else:
suffix = "and partition='{}'".format(partition)
return node.query("""
SELECT disk_name
FROM system.parts
WHERE table == '{name}' AND active=1 {suffix}
ORDER BY modification_time
""".format(name=table_name, suffix=suffix)).strip().split('\n')
def check_used_disks_with_retry(node, table_name, expected_disks, retries):
for _ in range(retries):
used_disks = get_used_disks_for_table(node, table_name)
if set(used_disks).issubset(expected_disks):
return True
time.sleep(0.5)
return False
@pytest.mark.parametrize("name,engine,alter", [
("mt_test_rule_with_invalid_destination","MergeTree()",0),
("replicated_mt_test_rule_with_invalid_destination","ReplicatedMergeTree('/clickhouse/replicated_test_rule_with_invalid_destination', '1')",0),
("mt_test_rule_with_invalid_destination","MergeTree()",1),
("replicated_mt_test_rule_with_invalid_destination","ReplicatedMergeTree('/clickhouse/replicated_test_rule_with_invalid_destination', '1')",1),
])
def test_rule_with_invalid_destination(started_cluster, name, engine, alter):
try:
def get_command(x, policy):
x = x or ""
if alter and x:
return """
ALTER TABLE {name} MODIFY TTL {expression}
""".format(expression=x, name=name)
else:
return """
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
{expression}
SETTINGS storage_policy='{policy}'
""".format(expression=x, name=name, engine=engine, policy=policy)
if alter:
node1.query(get_command(None, "small_jbod_with_external"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO DISK 'unknown'", "small_jbod_with_external"))
node1.query("DROP TABLE IF EXISTS {}".format(name))
if alter:
node1.query(get_command(None, "small_jbod_with_external"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO VOLUME 'unknown'", "small_jbod_with_external"))
node1.query("DROP TABLE IF EXISTS {}".format(name))
if alter:
node1.query(get_command(None, "only_jbod2"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO DISK 'jbod1'", "only_jbod2"))
node1.query("DROP TABLE IF EXISTS {}".format(name))
if alter:
node1.query(get_command(None, "only_jbod2"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO VOLUME 'external'", "only_jbod2"))
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_inserts_to_disk_do_not_work","MergeTree()",0),
("replicated_mt_test_inserts_to_disk_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_disk_do_not_work', '1')",0),
("mt_test_inserts_to_disk_work","MergeTree()",1),
("replicated_mt_test_inserts_to_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_disk_work', '1')",1),
])
def test_inserts_to_disk_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1 if i > 0 or positive else time.time()+300))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
try:
node1.query("DROP TABLE IF EXISTS {}".format(name))
except:
pass
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_moves_to_disk_do_not_work","MergeTree()",0),
("replicated_mt_test_moves_to_disk_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_do_not_work', '1')",0),
("mt_test_moves_to_disk_work","MergeTree()",1),
("replicated_mt_test_moves_to_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_work', '1')",1),
])
def test_moves_to_disk_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
wait_expire_1 = 12
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
wait_expire_1_thread.join()
time.sleep(wait_expire_2/2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine", [
("mt_test_moves_to_volume_work","MergeTree()"),
("replicated_mt_test_moves_to_volume_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_volume_work', '1')"),
])
def test_moves_to_volume_work(started_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
p1 Int64,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY p1
TTL d1 TO VOLUME 'external'
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
wait_expire_1 = 10
time_1 = time.time() + wait_expire_1
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for p in range(2):
data = [] # 10MB in total
for i in range(5):
data.append((str(p), "'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1))) # 1MB row
node1.query("INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {'jbod1', 'jbod2'}
wait_expire_1_thread.join()
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_inserts_to_volume_do_not_work","MergeTree()",0),
("replicated_mt_test_inserts_to_volume_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_volume_do_not_work', '1')",0),
("mt_test_inserts_to_volume_work","MergeTree()",1),
("replicated_mt_test_inserts_to_volume_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_volume_work', '1')",1),
])
def test_inserts_to_volume_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
p1 Int64,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY p1
TTL d1 TO VOLUME 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MOVES {name}".format(name=name))
for p in range(2):
data = [] # 20MB in total
for i in range(10):
data.append((str(p), "'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1 if i > 0 or positive else time.time()+300))) # 1MB row
node1.query("INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "20"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine", [
("mt_test_moves_to_disk_eventually_work","MergeTree()"),
("replicated_mt_test_moves_to_disk_eventually_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_eventually_work', '1')"),
])
def test_moves_to_disk_eventually_work(started_cluster, name, engine):
try:
name_temp = name + "_temp"
node1.query("""
CREATE TABLE {name} (
s1 String
) ENGINE = MergeTree()
ORDER BY tuple()
SETTINGS storage_policy='only_jbod2'
""".format(name=name_temp))
data = [] # 35MB in total
for i in range(35):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query("INSERT INTO {} VALUES {}".format(name_temp, ",".join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name_temp)
assert set(used_disks) == {"jbod2"}
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'jbod2'
SETTINGS storage_policy='jbod1_with_jbod2'
""".format(name=name, engine=engine))
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
node1.query("DROP TABLE {}".format(name_temp))
time.sleep(2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod2"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name_temp))
node1.query("DROP TABLE IF EXISTS {}".format(name))
def test_replicated_download_ttl_info(started_cluster):
name = "test_replicated_ttl_info"
engine = "ReplicatedMergeTree('/clickhouse/test_replicated_download_ttl_info', '{replica}')"
try:
for i, node in enumerate((node1, node2), start=1):
node.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MOVES {}".format(name))
node2.query("INSERT INTO {} (s1, d1) VALUES ('{}', toDateTime({}))".format(name, get_random_string(1024 * 1024), time.time()-100))
assert set(get_used_disks_for_table(node2, name)) == {"external"}
time.sleep(1)
assert node1.query("SELECT count() FROM {}".format(name)).splitlines() == ["1"]
assert set(get_used_disks_for_table(node1, name)) == {"external"}
finally:
for node in (node1, node2):
try:
node.query("DROP TABLE IF EXISTS {}".format(name))
except:
continue
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_merges_to_disk_do_not_work","MergeTree()",0),
("replicated_mt_test_merges_to_disk_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_merges_to_disk_do_not_work', '1')",0),
("mt_test_merges_to_disk_work","MergeTree()",1),
("replicated_mt_test_merges_to_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_merges_to_disk_work', '1')",1),
])
def test_merges_to_disk_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MERGES {}".format(name))
node1.query("SYSTEM STOP MOVES {}".format(name))
wait_expire_1 = 16
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for _ in range(2):
data = [] # 16MB in total
for i in range(8):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert "2" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
wait_expire_1_thread.join()
time.sleep(wait_expire_2/2)
node1.query("SYSTEM START MERGES {}".format(name))
node1.query("OPTIMIZE TABLE {}".format(name))
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert "1" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "16"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine", [
("mt_test_merges_with_full_disk_work","MergeTree()"),
("replicated_mt_test_merges_with_full_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_merges_with_full_disk_work', '1')"),
])
def test_merges_with_full_disk_work(started_cluster, name, engine):
try:
name_temp = name + "_temp"
node1.query("""
CREATE TABLE {name} (
s1 String
) ENGINE = MergeTree()
ORDER BY tuple()
SETTINGS storage_policy='only_jbod2'
""".format(name=name_temp))
data = [] # 35MB in total
for i in range(35):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query("INSERT INTO {} VALUES {}".format(name_temp, ",".join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name_temp)
assert set(used_disks) == {"jbod2"}
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'jbod2'
SETTINGS storage_policy='jbod1_with_jbod2'
""".format(name=name, engine=engine))
wait_expire_1 = 10
time_1 = time.time() + wait_expire_1
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for _ in range(2):
data = [] # 12MB in total
for i in range(6):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert "2" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
wait_expire_1_thread.join()
node1.query("OPTIMIZE TABLE {}".format(name))
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"} # Merged to the same disk against the rule.
assert "1" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "12"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name_temp))
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_moves_after_merges_do_not_work","MergeTree()",0),
("replicated_mt_test_moves_after_merges_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_do_not_work', '1')",0),
("mt_test_moves_after_merges_work","MergeTree()",1),
("replicated_mt_test_moves_after_merges_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_work', '1')",1),
])
def test_moves_after_merges_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
wait_expire_1 = 16
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for _ in range(2):
data = [] # 14MB in total
for i in range(7):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
node1.query("OPTIMIZE TABLE {}".format(name))
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert "1" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
wait_expire_1_thread.join()
time.sleep(wait_expire_2/2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "14"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine,positive,bar", [
("mt_test_moves_after_alter_do_not_work","MergeTree()",0,"DELETE"),
("replicated_mt_test_moves_after_alter_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_do_not_work', '1')",0,"DELETE"),
("mt_test_moves_after_alter_work","MergeTree()",1,"DELETE"),
("replicated_mt_test_moves_after_alter_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_work', '1')",1,"DELETE"),
("mt_test_moves_after_alter_do_not_work","MergeTree()",0,"TO DISK 'external'"),
("replicated_mt_test_moves_after_alter_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_do_not_work', '1')",0,"TO DISK 'external'"),
("mt_test_moves_after_alter_work","MergeTree()",1,"TO DISK 'external'"),
("replicated_mt_test_moves_after_alter_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_work', '1')",1,"TO DISK 'external'"),
])
def test_ttls_do_not_work_after_alter(started_cluster, name, engine, positive, bar):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
if positive:
node1.query("""
ALTER TABLE {name}
MODIFY TTL
d1 + INTERVAL 15 MINUTE {bar}
""".format(name=name, bar=bar)) # That shall disable TTL.
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1" if positive else "external"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine", [
("mt_test_materialize_ttl_in_partition","MergeTree()"),
("replicated_mt_test_materialize_ttl_in_partition","ReplicatedMergeTree('/clickhouse/test_materialize_ttl_in_partition', '1')"),
])
def test_materialize_ttl_in_partition(started_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
p1 Int8,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY p1
PARTITION BY p1
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
data = [] # 5MB in total
for i in range(5):
data.append((str(i), "'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1))) # 1MB row
node1.query("INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
time.sleep(0.5)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
node1.query("""
ALTER TABLE {name}
MODIFY TTL
d1 TO DISK 'external' SETTINGS materialize_ttl_after_modify = 0
""".format(name=name))
time.sleep(0.5)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
node1.query("""
ALTER TABLE {name}
MATERIALIZE TTL IN PARTITION 2
""".format(name=name))
node1.query("""
ALTER TABLE {name}
MATERIALIZE TTL IN PARTITION 4
""".format(name=name))
time.sleep(0.5)
used_disks_sets = []
for i in range(len(data)):
used_disks_sets.append(set(get_used_disks_for_table(node1, name, partition=i)))
assert used_disks_sets == [{"jbod1"}, {"jbod1"}, {"external"}, {"jbod1"}, {"external"}]
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == str(len(data))
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_alter_multiple_ttls_positive", "MergeTree()", True),
("mt_replicated_test_alter_multiple_ttls_positive", "ReplicatedMergeTree('/clickhouse/replicated_test_alter_multiple_ttls_positive', '1')", True),
("mt_test_alter_multiple_ttls_negative", "MergeTree()", False),
("mt_replicated_test_alter_multiple_ttls_negative", "ReplicatedMergeTree('/clickhouse/replicated_test_alter_multiple_ttls_negative', '1')", False),
])
def test_alter_multiple_ttls(started_cluster, name, engine, positive):
"""Copyright 2019, Altinity LTD
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
"""Check that when multiple TTL expressions are set
and before any parts are inserted the TTL expressions
are changed with ALTER command then all old
TTL expressions are removed and the
the parts are moved to the specified disk or volume or
deleted if the new TTL expression is triggered
and are not moved or deleted when it is not.
"""
now = time.time()
try:
node1.query("""
CREATE TABLE {name} (
p1 Int64,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY p1
TTL d1 + INTERVAL 34 SECOND TO DISK 'jbod2',
d1 + INTERVAL 64 SECOND TO VOLUME 'external'
SETTINGS storage_policy='jbods_with_external', merge_with_ttl_timeout=0
""".format(name=name, engine=engine))
node1.query("""
ALTER TABLE {name} MODIFY
TTL d1 + INTERVAL 0 SECOND TO DISK 'jbod2',
d1 + INTERVAL 14 SECOND TO VOLUME 'external',
d1 + INTERVAL 19 SECOND DELETE
""".format(name=name))
for p in range(3):
data = [] # 6MB in total
now = time.time()
for i in range(2):
p1 = p
s1 = get_random_string(1024 * 1024) # 1MB
d1 = now - 1 if i > 0 or positive else now + 300
data.append("({}, '{}', toDateTime({}))".format(p1, s1, d1))
node1.query("INSERT INTO {name} (p1, s1, d1) VALUES {values}".format(name=name, values=",".join(data)))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod2"} if positive else {"jbod1", "jbod2"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["6"]
if positive:
expected_disks = {"external"}
else:
expected_disks = {"jbod1", "jbod2"}
check_used_disks_with_retry(node1, name, expected_disks, 50)
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["6"]
time.sleep(5)
for i in range(50):
rows_count = int(node1.query("SELECT count() FROM {name}".format(name=name)).strip())
if positive:
if rows_count == 0:
break
else:
if rows_count == 3:
break
node1.query("OPTIMIZE TABLE {name} FINAL".format(name=name))
time.sleep(0.5)
if positive:
assert rows_count == 0
else:
assert rows_count == 3
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
@pytest.mark.parametrize("name,engine", [
("concurrently_altering_ttl_mt","MergeTree()"),
("concurrently_altering_ttl_replicated_mt","ReplicatedMergeTree('/clickhouse/concurrently_altering_ttl_replicated_mt', '1')",),
])
def test_concurrent_alter_with_ttl_move(started_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
EventDate Date,
number UInt64
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
values = list({ random.randint(1, 1000000) for _ in range(0, 1000) })
def insert(num):
for i in range(num):
day = random.randint(11, 30)
value = values.pop()
month = '0' + str(random.choice([3, 4]))
node1.query("INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format(name, m=month, d=day, v=value))
def alter_move(num):
def produce_alter_move(node, name):
move_type = random.choice(["PART", "PARTITION"])
if move_type == "PART":
for _ in range(10):
try:
parts = node1.query("SELECT name from system.parts where table = '{}' and active = 1".format(name)).strip().split('\n')
break
except QueryRuntimeException:
pass
else:
raise Exception("Cannot select from system.parts")
move_part = random.choice(["'" + part + "'" for part in parts])
else:
move_part = random.choice([201903, 201904])
move_disk = random.choice(["DISK", "VOLUME"])
if move_disk == "DISK":
move_volume = random.choice(["'external'", "'jbod1'", "'jbod2'"])
else:
move_volume = random.choice(["'main'", "'external'"])
try:
node1.query("ALTER TABLE {} MOVE {mt} {mp} TO {md} {mv}".format(
name, mt=move_type, mp=move_part, md=move_disk, mv=move_volume))
except QueryRuntimeException:
pass
for i in range(num):
produce_alter_move(node1, name)
def alter_update(num):
for i in range(num):
node1.query("ALTER TABLE {} UPDATE number = number + 1 WHERE 1".format(name))
def alter_modify_ttl(num):
for i in range(num):
ttls = []
for j in range(random.randint(1, 10)):
what = random.choice(["TO VOLUME 'main'", "TO VOLUME 'external'", "TO DISK 'jbod1'", "TO DISK 'jbod2'", "TO DISK 'external'"])
when = "now()+{}".format(random.randint(-1, 5))
ttls.append("{} {}".format(when, what))
try:
node1.query("ALTER TABLE {} MODIFY TTL {}".format(name, ", ".join(ttls)))
except QueryRuntimeException:
pass
def optimize_table(num):
for i in range(num):
try: # optimize may throw after concurrent alter
node1.query("OPTIMIZE TABLE {} FINAL".format(name), settings={'optimize_throw_if_noop': '1'})
break
except:
pass
p = Pool(15)
tasks = []
for i in range(5):
tasks.append(p.apply_async(insert, (100,)))
tasks.append(p.apply_async(alter_move, (100,)))
tasks.append(p.apply_async(alter_update, (100,)))
tasks.append(p.apply_async(alter_modify_ttl, (100,)))
tasks.append(p.apply_async(optimize_table, (100,)))
for task in tasks:
task.get(timeout=120)
assert node1.query("SELECT 1") == "1\n"
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "500\n"
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
@pytest.mark.skip(reason="Flacky test")
@pytest.mark.parametrize("name,positive", [
("test_double_move_while_select_negative", 0),
("test_double_move_while_select_positive", 1),
])
def test_double_move_while_select(started_cluster, name, positive):
try:
node1.query("""
CREATE TABLE {name} (
n Int64,
s String
) ENGINE = MergeTree
ORDER BY tuple()
PARTITION BY n
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name))
node1.query("INSERT INTO {name} VALUES (1, '{string}')".format(name=name, string=get_random_string(10 * 1024 * 1024)))
parts = node1.query("SELECT name FROM system.parts WHERE table = '{name}' AND active = 1".format(name=name)).splitlines()
assert len(parts) == 1
node1.query("ALTER TABLE {name} MOVE PART '{part}' TO DISK 'external'".format(name=name, part=parts[0]))
def long_select():
if positive:
node1.query("SELECT sleep(3), sleep(2), sleep(1), n FROM {name}".format(name=name))
thread = threading.Thread(target=long_select)
thread.start()
time.sleep(1)
node1.query("ALTER TABLE {name} MOVE PART '{part}' TO DISK 'jbod1'".format(name=name, part=parts[0]))
# Fill jbod1 to force ClickHouse to make move of partition 1 to external.
node1.query("INSERT INTO {name} VALUES (2, '{string}')".format(name=name, string=get_random_string(9 * 1024 * 1024)))
node1.query("INSERT INTO {name} VALUES (3, '{string}')".format(name=name, string=get_random_string(9 * 1024 * 1024)))
node1.query("INSERT INTO {name} VALUES (4, '{string}')".format(name=name, string=get_random_string(9 * 1024 * 1024)))
time.sleep(1)
# If SELECT locked old part on external, move shall fail.
assert node1.query("SELECT disk_name FROM system.parts WHERE table = '{name}' AND active = 1 AND name = '{part}'"
.format(name=name, part=parts[0])).splitlines() == ["jbod1" if positive else "external"]
thread.join()
assert node1.query("SELECT n FROM {name} ORDER BY n".format(name=name)).splitlines() == ["1", "2", "3", "4"]
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_alter_with_merge_do_not_work","MergeTree()",0),
("replicated_mt_test_alter_with_merge_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_alter_with_merge_do_not_work', '1')",0),
("mt_test_alter_with_merge_work","MergeTree()",1),
("replicated_mt_test_alter_with_merge_work","ReplicatedMergeTree('/clickhouse/replicated_test_alter_with_merge_work', '1')",1),
])
def test_alter_with_merge_work(started_cluster, name, engine, positive):
"""Copyright 2019, Altinity LTD
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
"""Check that TTL expressions are re-evaluated for
existing parts after ALTER command changes TTL expressions
and parts are merged.
"""
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 + INTERVAL 3000 SECOND TO DISK 'jbod2',
d1 + INTERVAL 6000 SECOND TO VOLUME 'external'
SETTINGS storage_policy='jbods_with_external', merge_with_ttl_timeout=0
""".format(name=name, engine=engine))
def optimize_table(num):
for i in range(num):
try: # optimize may throw after concurrent alter
node1.query("OPTIMIZE TABLE {} FINAL".format(name), settings={'optimize_throw_if_noop': '1'})
break
except:
pass
for p in range(3):
data = [] # 6MB in total
now = time.time()
for i in range(2):
s1 = get_random_string(1024 * 1024) # 1MB
d1 = now - 1 if positive else now + 300
data.append("('{}', toDateTime({}))".format(s1, d1))
values = ",".join(data)
node1.query("INSERT INTO {name} (s1, d1) VALUES {values}".format(name=name, values=values))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1", "jbod2"}
node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["6"]
node1.query("""
ALTER TABLE {name} MODIFY
TTL d1 + INTERVAL 0 SECOND TO DISK 'jbod2',
d1 + INTERVAL 5 SECOND TO VOLUME 'external',
d1 + INTERVAL 10 SECOND DELETE
""".format(name=name))
optimize_table(20)
assert node1.query("SELECT count() FROM system.parts WHERE table = '{name}' AND active = 1".format(name=name)) == "1\n"
time.sleep(5)
optimize_table(20)
if positive:
assert check_used_disks_with_retry(node1, name, set(["external"]), 50)
else:
assert check_used_disks_with_retry(node1, name, set(["jbod1", "jbod2"]), 50)
time.sleep(5)
optimize_table(20)
if positive:
assert node1.query("SELECT count() FROM {name}".format(name=name)) == "0\n"
else:
assert node1.query("SELECT count() FROM {name}".format(name=name)) == "6\n"
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
|
multi_process.py | import multiprocessing
import os
# TODO This is pretty functionless for now.
def __log__(title):
"""
Debug log.
:param title: Debug title
:return:
"""
print(f'{title}\nModule: {__name__}\nParent Process ID: {os.getppid()}\nProcess ID: {os.getpid()}')
def f(name):
"""
Multiprocess item.
:param name: Name of process
:return:
"""
__log__('Function F')
print('Hello', name)
if __name__ == '__main__':
__log__('main line')
p = multiprocessing.Process(target=f, args=('bob',))
p.start()
p.join()
|
autohibernate.py | #!/usr/bin/env python
#
# AutoPilot :: Sourav Badami :: http://www.souravbadami.me
# Script: AutoHibernate
# Description: This script automatically detects when theres no one nearby and
# sends the system to hibernation mode.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import time
import cv2
import sys
import time
from gi.repository import Notify
import threading
import ctypes
DELTA_COUNT_THRESHOLD = 1000
STILL_TIME = 0
# Set hibernation trigger time in seconds (Default: 30 minutes)
STT = 1800
def delta_images(t0, t1, t2):
d1 = cv2.absdiff(t2, t0)
return d1
def started():
"""
Get's triggered when the application starts and notifies user.
"""
global timer_thread
Notify.init("Started")
#Shows Notification on the desktop
Notify.Notification.new("\nAutoPilot"," The system has been taken over by AutoPilot.").show()
started()
def timeout():
"""
Triggers the notification and system process on timeout.
"""
global timer_thread
Notify.init("AutoPilot")
#Shows Notification on the desktop
Notify.Notification.new("\nAutoPilot"," System going down to hibernation mode.").show()
os.system("pm-hibernate")
def start_timer():
"""
Starts the timer when there's no one around.
"""
global STILL_TIME
while STILL_TIME != 0:
STILL_TIME = STILL_TIME + 1
#print(STILL_TIME-1, "Seconds")
if STILL_TIME/2 == STT:
timeout()
time.sleep(1)
for cn in range(0,3):
cam = cv2.VideoCapture(cn)
if cam.isOpened():
break
if not cam.isOpened():
sys.stderr.write('ERROR: Did not open a camera.\n')
sys.exit(1)
print ("Running with camera number %d." % cn)
print type(cam)
print str(cam)
cam.set(3,640)
cam.set(4,480)
winName = "AutoHibernation :: AutoPilot Applications"
cv2.namedWindow(winName, cv2.CV_WINDOW_AUTOSIZE)
t_minus = cam.read()[1]
t_now = cam.read()[1]
t_plus = cam.read()[1]
t_now = cv2.resize(t_now, (640, 480))
t_minus = cv2.resize(t_minus, (640, 480))
t_plus = cv2.resize(t_plus, (640, 480))
delta_count_last = 1
while True:
delta_view = delta_images(t_minus, t_now, t_plus)
retval, delta_view = cv2.threshold(delta_view, 16, 255, 3)
cv2.normalize(delta_view, delta_view, 0, 255, cv2.NORM_MINMAX)
img_count_view = cv2.cvtColor(delta_view, cv2.COLOR_RGB2GRAY)
delta_count = cv2.countNonZero(img_count_view)
delta_view = cv2.flip(delta_view, 1)
cv2.putText(
delta_view, "UM-PID: %d"%(
delta_count), (5, 15), cv2.FONT_HERSHEY_PLAIN, 0.8, (255,255,255))
cv2.putText(
delta_view, "Pilot Status: Activated", (10, 450), cv2.FONT_HERSHEY_PLAIN, 0.8, (255,255,255))
# Credits
cv2.putText(
delta_view, "Developer: Sourav Badami", (390, 450), cv2.FONT_HERSHEY_PLAIN, 0.8, (255,255,255))
cv2.putText(
delta_view, "Blog: http://www.souravbadami.me", (390, 460), cv2.FONT_HERSHEY_PLAIN, 0.8, (255,255,255))
cv2.imshow(winName, delta_view)
if (
delta_count_last < DELTA_COUNT_THRESHOLD
and delta_count >= DELTA_COUNT_THRESHOLD):
STILL_TIME = 0
sys.stdout.flush()
elif delta_count_last >= DELTA_COUNT_THRESHOLD and delta_count < DELTA_COUNT_THRESHOLD:
STILL_TIME = 1
timer_thread = threading.Thread(target=start_timer)
timer_thread.start()
sys.stdout.flush()
now=time.time()
delta_count_last = delta_count
t_minus = t_now
t_now = t_plus
t_plus = cam.read()[1]
t_plus = cv2.blur(t_plus,(8,8))
t_plus = cv2.resize(t_plus, (640, 480))
key = cv2.waitKey(10)
if key == 0x1b or key == ord('q'):
cv2.destroyWindow(winName)
break
|
run.py | # ©GO-PC Build
# This project is under a CC0-1.0 License
# (View the license here: https://github.com/GO-PC-Build/DiscordBot/blob/master/LICENSE)
from glob import glob
from os import name, execv, system, environ
from sys import argv, executable, stdout, exit
from distutils.util import strtobool
try:
from utilsx.console import Prettier, Colors
from utilsx.discord import BotX
except ImportError:
print("UtilsX library is missing, attempting to install it...")
system(("py -3" if name == "nt" else "python3") + " -m pip install -r requirements.txt")
print("Please reboot this application")
exit(0)
raise # Fixes IDE error
from utils import VersionHandler, PrintHandler
from configparser import ConfigParser
from discord import Intents
# Check if the operating system is linux or windows. (nt = windows)
# If its windows, change the console clear command and the filepath delimiter.
clear, back_slash = "clear", "/"
if name == "nt":
clear, back_slash = "cls", "\\"
# Read our configuration
cfg = ConfigParser()
cfg.read("config.cfg")
class Bot(BotX):
"""
The main bot object, this contains our handlers and loads our extensions
"""
def __init__(self):
super().__init__(Intents.all())
system(clear)
stdout.flush()
self.prettier = Prettier(colors_enabled=strtobool(cfg["CONSOLE"].get("colors", "true")), auto_strip_message=True)
self.ph = PrintHandler(self.prettier)
self.ph.printf("Initializing client...")
self.prefix = cfg["BOT"].get("prefix", "!")
self.description = "De officiele GO-AO discord bot!"
self.vm = VersionHandler()
if strtobool(cfg["UPDATER"].get("enabled", "true")):
self.check_for_updates()
self.ph.printf("No updates found, starting bot...")
self.ph.printf("Started loading extensions.")
extensions = list(map(lambda extension: extension.replace(back_slash, ".")[:-3], glob("extensions/*.py")))
for index, _ in enumerate(self.load_extensions(extensions)):
self.ph.printf(f"Successfully loaded "
f"{Colors.light_blue.value + extensions[index].replace('extensions.', '')}")
@staticmethod
def restart():
system(clear)
stdout.flush()
execv(executable, ['python'] + argv)
def check_for_updates(self):
self.ph.printf("Checking for updates...")
if not self.vm.is_latest:
self.ph.printf("Update found! Started updating bot to the latest version...")
self.vm.update_version()
self.ph.printf("Successfully updated to the latest version. Rebooting bot.")
self.restart()
# def console_handler(self):
# data = input("").strip()
# if data == "help":
# self.prettier.print(f"Interactive Console Help Menu v{self.vm.version}\n"
# "update - Checks if it can update, if it can it will.\n"
# "stop - Kills the bot instance\n"
# "help - This menu")
# elif data == "update":
# self.check_for_updates()
# self.ph.printf("No updates found!")
# elif data == "stop":
# self.ph.printf("Stopping bot!")
# exit(0)
# else:
# self.ph.printf(f"Couldn't find a command called '{data}'")
# self.console_handler()
async def on_ready(self):
self.ph.printf(f"Currently running on v{self.vm.version}!")
# self.ph.printf("Console input ready, type `help` to see all commands.")
# Thread(target=self.console_handler).start()
# await self.logout()
if __name__ == "__main__":
location = cfg["BOT"].get("token_env", "GO_PC_BOT_TOKEN")
try:
Bot().run(environ[location])
except KeyError:
print(f"{Colors.red.value}ERROR:\n"
f"No valid bot token was provided on env `{Colors.magenta.value + location + Colors.red.value}`"
f"{Colors.default.value}\n"
f"Please create an env variable with name `{Colors.magenta.value + location + Colors.default.value}` "
f"and place your bot token in it.\n"
f"Then rerun this script.\n\n"
f"Or check out this tutorial:\n"
f"{Colors.light_blue.value}https://www.twilio.com/blog/2017/01/how-to-set-environment-variables.html"
f"{Colors.default.value}")
|
resourcedirectory_test.py | import unittest
import threading
import socket
import re
import random
from time import sleep
from coapthon.resource_directory.resourceDirectory import ResourceDirectory
from coapthon.messages.response import Response
from coapthon.messages.request import Request
from coapthon import defines
from coapthon.serializer import Serializer
from pymongo import MongoClient
from coapthon.client.helperclient import HelperClient
__author__ = 'Carmelo Aparo'
class ResourceDirectoryTest(unittest.TestCase):
def setUp(self):
self.server_address = ("127.0.0.1", 5683)
self.current_mid = random.randint(1, 1000)
self.server = ResourceDirectory("127.0.0.1", 5683, start_mongo=False)
self.server_thread = threading.Thread(target=self.server.listen, args=(10,))
self.server_thread.start()
self.delete_database()
def tearDown(self):
self.server.close()
self.server_thread.join(timeout=25)
self.server = None
@staticmethod
def delete_database():
database = defines.MONGO_DATABASE
connection = MongoClient(defines.MONGO_HOST, defines.MONGO_PORT, username=defines.MONGO_USER,
password=defines.MONGO_PWD, authSource=database, authMechanism='SCRAM-SHA-1')
collection = connection[database].resources
try:
collection.delete_many({})
except:
print("Error in delete_database")
@staticmethod
def parse_core_link_format(link_format):
data = []
while len(link_format) > 0:
pattern = "<([^>]*)>;"
result = re.match(pattern, link_format)
path = result.group(1)
link_format = link_format[result.end(1) + 2:]
pattern = "([^<,])*"
result = re.match(pattern, link_format)
attributes = result.group(0)
dict_att = {}
if len(attributes) > 0:
attributes = attributes.split(";")
for att in attributes:
a = att.split("=")
if len(a) > 1:
if a[1].isdigit():
a[1] = int(a[1])
else:
a[1] = a[1].replace('"', '')
dict_att[a[0]] = a[1]
else:
dict_att[a[0]] = a[0]
link_format = link_format[result.end(0) + 1:]
tmp = {'path': path}
dict_att.update(tmp)
data.append(dict_att)
return data
def _test_check(self, message_list, timeout=0):
serializer = Serializer()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for message, expected in message_list:
if message is not None:
datagram = serializer.serialize(message)
sleep(timeout)
sock.sendto(datagram, message.destination)
if expected is not None:
datagram, source = sock.recvfrom(4096)
received_message = serializer.deserialize(datagram, source)
if expected.type is not None:
self.assertEqual(received_message.type, expected.type)
if expected.mid is not None:
self.assertEqual(received_message.mid, expected.mid)
self.assertEqual(received_message.code, expected.code)
if expected.source is not None:
self.assertEqual(received_message.source, source)
if expected.token is not None:
self.assertEqual(received_message.token, expected.token)
if expected.content_type is not None:
self.assertEqual(received_message.content_type, expected.content_type)
if expected.payload is not None:
expected_list = self.parse_core_link_format(expected.payload)
received_list = self.parse_core_link_format(received_message.payload)
all_list = []
for expected_elem in expected_list:
for received_elem in received_list:
if expected_elem['path'] == received_elem['path']:
all_list_elem = (expected_elem, received_elem)
all_list.append(all_list_elem)
for data in all_list:
for k in data[1]:
self.assertIn(k, data[0])
if (k != "lt") and (k in data[0]):
self.assertEqual(data[0][k], data[1][k])
else:
self.assertEqual(expected.payload, received_message.payload)
sock.close()
def test_uri_discovery(self):
print("Uri discovery")
path = ".well-known/core"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '</rd-lookup/res>;rt="core.rd-lookup-res";ct=40,</rd>;rt="core.rd";ct=40,' \
'</rd-lookup/ep>;rt="core.rd-lookup-ep";ct=40'
self.current_mid += 1
self._test_check([(req, expected)])
def test_registration(self):
print("Registration")
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = defines.Content_types["application/link-format"]
req.payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";' \
'anchor="coap://spurious.example.com:5683",</sensors/light>;ct=41;rt="light-lux";if="sensor"'
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_lookup_res(self):
print("Resource lookup")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
client.post(path, payload, None, None, **ct)
client.stop()
path = "rd-lookup/res?ep=node1&rt=temperature-c"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '<coap://local-proxy-old.example.com:5683/sensors/temp>;ct=41;rt="temperature-c";' \
'if="sensor";anchor="coap://spurious.example.com:5683"'
self.current_mid += 1
self._test_check([(req, expected)])
def test_lookup_ep(self):
print("Endpoint lookup")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683&et=oic.d.sensor"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
response = client.post(path, payload, None, None, **ct)
loc_path = response.location_path
client.stop()
path = "rd-lookup/ep?et=oic.d.sensor"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '</' + loc_path + '>;con="coap://local-proxy-old.example.com:5683";ep="node1";' \
'et="oic.d.sensor";lt=500'
self.current_mid += 1
self._test_check([(req, expected)])
def test_update(self):
print("Update")
client = HelperClient(self.server_address)
path = "rd?ep=endpoint1<=500&con=coap://local-proxy-old.example.com:5683&et=oic.d.sensor"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
response = client.post(path, payload, None, None, **ct)
loc_path = response.location_path
client.stop()
path = loc_path + "?con=coaps://new.example.com:5684"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_read_endpoint_links(self):
print("Read endpoint links")
client = HelperClient(self.server_address)
path = "rd?ep=endpoint1<=500&con=coap://local-proxy-old.example.com:5683&et=oic.d.sensor"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
response = client.post(path, payload, None, None, **ct)
loc_path = response.location_path
client.stop()
path = loc_path
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '<coap://local-proxy-old.example.com:5683/sensors/temp>;ct=41;rt="temperature-c";' \
'if="sensor";anchor="coap://spurious.example.com:5683",' \
'<coap://local-proxy-old.example.com:5683/sensors/light>;ct=41;rt="light-lux";if="sensor"'
self.current_mid += 1
self._test_check([(req, expected)])
def test_delete(self):
print("Delete")
client = HelperClient(self.server_address)
path = "rd?ep=endpoint1<=500&con=coap://local-proxy-old.example.com:5683&et=oic.d.sensor"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
response = client.post(path, payload, None, None, **ct)
loc_path = response.location_path
client.stop()
path = loc_path
req = Request()
req.code = defines.Codes.DELETE.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.DELETED.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_lookup_expired_res(self):
print("Expired resource lookup")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683<=60"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
client.post(path, payload, None, None, **ct)
client.stop()
path = "rd-lookup/res?ep=node1&rt=temperature-c"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)], 61)
def test_lookup_expired_ep(self):
print("Expired endpoint lookup")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683<=60"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
client.post(path, payload, None, None, **ct)
client.stop()
path = "rd-lookup/ep?ep=node1&rt=temperature-c"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = None
self.current_mid += 1
# After 61 seconds the resource will be expired
self._test_check([(req, expected)], 61)
def test_update_expired(self):
print("Update expired registration resource")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683<=60"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
response = client.post(path, payload, None, None, **ct)
# After 61 seconds the resource will be expired
sleep(61)
loc_path = response.location_path
client.post(loc_path, None)
client.stop()
path = "rd-lookup/res?ep=node1&rt=temperature-c"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '<coap://local-proxy-old.example.com:5683/sensors/temp>;ct=41;rt="temperature-c";' \
'if="sensor";anchor="coap://spurious.example.com:5683"'
self.current_mid += 1
self._test_check([(req, expected)])
def test_wrong_ep(self):
print("Endpoint name already exists")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683<=60"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
client.post(path, payload, None, None, **ct)
client.stop()
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = defines.Content_types["application/link-format"]
req.payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";' \
'anchor="coap://spurious.example.com:5683",</sensors/light>;ct=41;rt="light-lux";if="sensor"'
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.SERVICE_UNAVAILABLE.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_no_ep(self):
print("Registration without endpoint name")
path = "rd?con=coap://local-proxy-old.example.com:5683"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = defines.Content_types["application/link-format"]
req.payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";' \
'anchor="coap://spurious.example.com:5683",</sensors/light>;ct=41;rt="light-lux";if="sensor"'
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.BAD_REQUEST.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_update_res_not_found(self):
print("Resource not found on update")
path = "rd/4521?con=coaps://new.example.com:5684"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.NOT_FOUND.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_delete_res_not_found(self):
print("Resource not found on delete")
path = "rd/4521"
req = Request()
req.code = defines.Codes.DELETE.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.NOT_FOUND.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_wildcard_res(self):
print("Use wildcard * to find resources")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
client.post(path, payload, None, None, **ct)
path = "rd?ep=node2&con=coap://[2001:db8:3::123]:61616"
payload = '</temp>;rt="temperature";anchor="coap://[2001:db8:3::123]:61616"'
client.post(path, payload, None, None, **ct)
client.stop()
path = "rd-lookup/res?rt=temp*"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '<coap://local-proxy-old.example.com:5683/sensors/temp>;ct=41;rt="temperature-c";' \
'if="sensor";anchor="coap://spurious.example.com:5683",' \
'<coap://[2001:db8:3::123]:61616/temp>;rt="temperature";' \
'anchor="coap://[2001:db8:3::123]:61616"'
self.current_mid += 1
self._test_check([(req, expected)])
def test_wildcard_ep(self):
print("Use wildcard * to find endpoints")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
response = client.post(path, payload, None, None, **ct)
loc_path1 = response.location_path
path = "rd?ep=node2&con=coap://[2001:db8:3::123]:61616"
payload = '</temp>;rt="temperature";anchor="coap://[2001:db8:3::123]:61616"'
response = client.post(path, payload, None, None, **ct)
loc_path2 = response.location_path
client.stop()
path = "rd-lookup/ep?rt=temp*"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '</' + loc_path1 + '>;con="coap://local-proxy-old.example.com:5683";ep="node1";lt=500,' \
'</' + loc_path2 + '>;con="coap://[2001:db8:3::123]:61616";' \
'ep="node2";lt=500'
self.current_mid += 1
self._test_check([(req, expected)])
if __name__ == '__main__':
unittest.main()
|
email.py | from threading import Thread
from flask import render_template
from flask_mail import Message
from app import mail
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
mail.send(msg)
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('[Microblog] Reset Your Password',
sender=app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',user=user, token=token),
html_body=render_template('email/reset_password.html',user=user, token=token))
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
|
base_build.py | from django.core.management.base import BaseCommand, CommandError
from django.core.management import call_command
from django.conf import settings
from django.db import connection
import datetime
import logging
from multiprocessing import Queue, Process, Value, Lock
class Command(BaseCommand):
help = 'Basic functions for build scrips'
logger = logging.getLogger(__name__)
def add_arguments(self, parser):
parser.add_argument('-p', '--proc',
type=int,
action='store',
dest='proc',
default=1,
help='Number of processes to run')
parser.add_argument('-t', '--test',
action='store_true',
dest='test',
default=False,
help='Include only a subset of data for testing')
def prepare_input(self, proc, items, iteration=1):
q = Queue()
procs = list()
num_items = len(items)
num = Value('i', 0)
lock = Lock()
if not num_items:
return False
# make sure not to use more jobs than proteins (chunk size will be 0, which is not good)
if proc > num_items:
proc = num_items
chunk_size = int(num_items / proc)
connection.close()
for i in range(0, proc):
first = chunk_size * i
if i == proc - 1:
last = False
else:
last = chunk_size * (i + 1)
p = Process(target=self.main_func, args=([(first, last), iteration,num,lock]))
procs.append(p)
p.start()
for p in procs:
p.join() |
trainer.py | # Copyright (c) 2020 Sarthak Mittal
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import time
import glob
import random
import pdf2image
import simplejson
import numpy as np
from tqdm import tqdm
from tkinter import *
from tkinter import filedialog, messagebox
from tkinter.ttk import Progressbar
from PIL import Image, ImageTk
from .. import FIELDS, FIELD_TYPES
from ..common import util
from ..acp.acp import AttendCopyParse
from ..acp.data import InvoiceData
from .custom_widgets import HoverButton, Logger, StoppableThread
class Trainer(Frame):
def __init__(self, master=None, **kw):
Frame.__init__(self, master, **kw)
self.background = '#303030'
self.border_color = '#404040'
self.args = {
"data_dir": "",
"prepared_data": "processed_data",
"field": list(FIELDS.keys())[0],
"batch_size": 4
}
self.textboxes = {}
self.thread = None
self.running = False
self._init_ui()
def _init_ui(self):
ws = self.master.winfo_screenwidth()
hs = self.master.winfo_screenheight()
h = hs - 100
w = int(h / 1.414) + 100
x = (ws / 2) - (w / 2)
y = (hs / 2) - (h / 2)
self.master.geometry('%dx%d+%d+%d' % (w, h, x, y))
self.master.maxsize(w, h)
self.master.minsize(w, h)
self.master.title("InvoiceNet - Trainer")
self.pack(fill=BOTH, expand=True)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=0)
self.rowconfigure(1, weight=1)
self.rowconfigure(2, weight=0)
self.rowconfigure(3, weight=1)
self.configure(bg=self.background, bd=0)
logo_frame = Frame(self, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=1)
param_frame = Frame(self, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=1)
progress_frame = Frame(self, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=0)
main_frame = Frame(self, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=1)
logo_frame.grid(row=0, column=0, sticky='news')
param_frame.grid(row=1, column=0, sticky='news')
progress_frame.grid(row=2, column=0, sticky='news', padx=50, pady=(0, 20))
main_frame.grid(row=3, column=0, sticky='news')
# Logo Frame
logo_frame.columnconfigure(0, weight=1)
logo_frame.columnconfigure(1, weight=0)
logo_frame.columnconfigure(2, weight=0)
logo_frame.columnconfigure(3, weight=1)
logo_frame.rowconfigure(0, weight=1)
self.logo_img = ImageTk.PhotoImage(Image.open(r'widgets/logo.png'))
Label(logo_frame, bg=self.background, image=self.logo_img).grid(row=0, column=1, sticky='news', pady=10)
Label(logo_frame, text="InvoiceNet", bg=self.background,
fg="white", font=("Arial", 24, "bold")).grid(row=0, column=2, sticky='news', padx=20, pady=10)
# Param Frame
param_frame.columnconfigure(0, weight=1)
param_frame.columnconfigure(1, weight=0)
param_frame.columnconfigure(2, weight=0)
param_frame.columnconfigure(3, weight=1)
param_frame.rowconfigure(0, weight=1)
param_frame.rowconfigure(1, weight=0)
param_frame.rowconfigure(2, weight=0)
param_frame.rowconfigure(3, weight=0)
param_frame.rowconfigure(4, weight=1)
data_param = Frame(param_frame, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=0)
out_param = Frame(param_frame, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=0)
field_param = Frame(param_frame, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=0)
batch_param = Frame(param_frame, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=0)
data_param.grid(row=1, column=1, pady=(0, 20), padx=20)
out_param.grid(row=2, column=1, pady=20, padx=20)
field_param.grid(row=1, column=2, pady=(0, 20), padx=20)
batch_param.grid(row=2, column=2, pady=20, padx=20)
df = Frame(data_param, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=0)
df.pack(side=TOP, fill=BOTH)
Label(df, text="Data Folder:", bg=self.background,
fg="white", font=("Arial", 8, "bold"), anchor='w').pack(side=LEFT, fill=BOTH)
HoverButton(df, image_path=r'widgets/open_dir_small.png', command=lambda: self._open_dir("data_dir"),
width=18, height=18, bg=self.background, bd=0,
highlightthickness=0, activebackground='#558de8').pack(side=RIGHT)
self.textboxes["data_dir"] = Text(data_param, height=1, width=20)
self.textboxes["data_dir"].insert('1.0', self.args["data_dir"])
self.textboxes["data_dir"].pack(side=BOTTOM)
of = Frame(out_param, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=0)
of.pack(side=TOP, fill=BOTH)
Label(of, text="Processed Data Folder:", bg=self.background,
anchor='w', fg="white", font=("Arial", 8, "bold")).pack(side=LEFT, fill=BOTH)
HoverButton(of, image_path=r'widgets/open_dir_small.png', command=lambda: self._open_dir("prepared_data"),
width=18, height=18, bg=self.background, bd=0,
highlightthickness=0, activebackground='#558de8').pack(side=RIGHT)
self.textboxes["prepared_data"] = Text(out_param, height=1, width=20)
self.textboxes["prepared_data"].insert('1.0', self.args["prepared_data"])
self.textboxes["prepared_data"].pack(side=BOTTOM)
Label(field_param, text="Field:", bg=self.background,
anchor='w', fg="white", font=("Arial", 8, "bold")).pack(side=TOP, fill=BOTH)
self.field_text = StringVar(field_param)
self.field_text.set(list(FIELDS.keys())[0])
keys = list(FIELDS.keys())
field_list = OptionMenu(field_param, self.field_text, *keys)
field_list.configure(highlightthickness=0, width=20, bg='#ffffff')
field_list.pack(side=BOTTOM)
for key in keys:
field_list['menu'].entryconfigure(key, state="normal")
Label(batch_param, text="Batch Size:", bg=self.background,
anchor='w', fg="white", font=("Arial", 8, "bold")).pack(side=TOP, fill=BOTH)
self.batch_text = StringVar(batch_param)
self.batch_text.set("4")
batch_list = OptionMenu(batch_param, self.batch_text, *[str(2 ** i) for i in range(8)])
batch_list.configure(highlightthickness=0, width=20, bg='#ffffff')
batch_list.pack(side=BOTTOM)
HoverButton(param_frame, image_path=r'widgets/prepare.png', command=self._prepare_data,
text='Prepare Data', compound='center', font=("Arial", 10, "bold"), bg=self.background,
bd=0, highlightthickness=0, activebackground=self.background).grid(row=3, column=1, columnspan=2,
padx=20, pady=(20, 0),
sticky='news')
# Progress Frame
self.progress_label = Label(progress_frame, text="Preparing data:", bg=self.background,
anchor='w', fg="white", font=("Arial", 8, "bold"), bd=0, highlightthickness=0)
self.progress_label.pack(side=TOP, expand=True, fill=X, pady=(10, 5))
self.progressbar = Progressbar(progress_frame, orient=HORIZONTAL, length=100, mode='determinate')
self.progressbar.pack(side=BOTTOM, expand=True, fill=X)
# Main Frame
main_frame.columnconfigure(0, weight=1)
main_frame.rowconfigure(0, weight=1)
main_frame.rowconfigure(1, weight=1)
button_frame = Frame(main_frame, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=0)
button_frame.grid(row=0, column=0, sticky='news')
button_frame.rowconfigure(0, weight=1)
button_frame.columnconfigure(0, weight=1)
button_frame.columnconfigure(1, weight=0)
button_frame.columnconfigure(2, weight=1)
self.start_button = HoverButton(button_frame, image_path=r'widgets/begin.png', command=self._start,
text='Start', compound='center', font=("Arial", 10, "bold"), bg=self.background,
bd=0, highlightthickness=0, activebackground=self.background)
self.stop_button = HoverButton(button_frame, image_path=r'widgets/stop.png', command=self._stop,
text='Stop', compound='center', font=("Arial", 10, "bold"), bg=self.background,
bd=0, highlightthickness=0, activebackground=self.background)
self.start_button.grid(row=0, column=1)
self.stop_button.grid(row=0, column=1)
self.stop_button.grid_forget()
self.logger = Logger(main_frame, height=18, bg=self.background, bd=0, relief=SUNKEN)
self.logger.grid(row=1, column=0, sticky='news')
def _train(self):
train_data = InvoiceData.create_dataset(
field=self.args["field"],
data_dir=os.path.join(self.args["prepared_data"], 'train/'),
batch_size=self.args["batch_size"]
)
val_data = InvoiceData.create_dataset(
field=self.args["field"],
data_dir=os.path.join(self.args["prepared_data"], 'val/'),
batch_size=self.args["batch_size"]
)
restore = None
if os.path.exists(os.path.join('./models/invoicenet/', self.args["field"])):
restore = messagebox.askyesno(
title="Restore",
message="A checkpoint was found! Do you want to restore checkpoint for training?")
restore = True if restore else False
model = AttendCopyParse(field=self.args["field"], restore=restore)
print_interval = 20
early_stop_steps = 0
best = float("inf")
train_iter = iter(train_data)
val_iter = iter(val_data)
self.logger.log("Initializing training!")
start = time.time()
step = 0
while True:
train_loss = model.train_step(next(train_iter))
if not np.isfinite(train_loss):
raise ValueError("NaN loss")
if step % print_interval == 0:
took = time.time() - start
val_loss = model.val_step(next(val_iter))
self.logger.log("[step: %d | %.2f steps/s]: train loss: %.4f val loss: %.4f" % (
step, (step + 1) / took, train_loss, val_loss))
if not np.isfinite(val_loss):
self.logger.log("ERROR: NaN loss")
self.thread.stop()
if val_loss < best:
early_stop_steps = 0
model.save("best")
else:
early_stop_steps += 1
if early_stop_steps == 500:
self.logger.log("Validation loss has not improved for 500 steps")
self.thread.stop()
step += 1
if self.thread.stopped():
self.logger.log("Training terminated!")
break
self.running = False
self.stop_button.grid_forget()
self.start_button.grid(row=0, column=1)
def _get_inputs(self):
self.args["field"] = self.field_text.get()
self.args["batch_size"] = int(self.batch_text.get())
self.args["data_dir"] = self.textboxes["data_dir"].get("1.0", 'end-1c')
self.args["prepared_data"] = self.textboxes["prepared_data"].get("1.0", 'end-1c')
if not self.args["prepared_data"].endswith('/'):
self.args["prepared_data"] += '/'
if self.args["data_dir"] == '':
return
if not self.args["data_dir"].endswith('/'):
self.args["data_dir"] += '/'
def _start(self):
self._get_inputs()
if not os.path.exists(self.args["prepared_data"]):
messagebox.showerror("Error", "Prepared data folder does not exist!")
return
files = glob.glob(self.args["prepared_data"] + "**/*.json", recursive=True)
if not files:
messagebox.showerror("Error",
"Could not find processed data in \"{}\". Did you prepare training data?".format(
self.args["prepared_data"]))
return
if not self.running:
self.running = True
self.thread = StoppableThread(target=self._train)
self.thread.daemon = True
self.thread.start()
self.start_button.grid_forget()
self.stop_button.grid(row=0, column=1)
def _stop(self):
if self.running:
self.thread.stop()
self.running = False
self.logger.log("Stopping training...")
def _open_dir(self, key):
dir_name = filedialog.askdirectory(initialdir='.', title="Select Directory Containing Invoices")
if not dir_name:
return
self.args[key] = dir_name
self.textboxes[key].delete('1.0', END)
self.textboxes[key].insert('1.0', self.args[key])
def _prepare_data(self):
self._get_inputs()
if self.args["data_dir"] == '':
messagebox.showerror("Error", "Data folder does not exist!")
return
if not os.path.exists(self.args["data_dir"]):
messagebox.showerror("Error", "Data folder does not exist!")
return
self.progressbar["value"] = 0
self.progress_label.configure(text="Preparing Data:")
os.makedirs(os.path.join(self.args["prepared_data"], 'train'), exist_ok=True)
os.makedirs(os.path.join(self.args["prepared_data"], 'val'), exist_ok=True)
filenames = [os.path.abspath(f) for f in glob.glob(self.args["data_dir"] + "**/*.pdf", recursive=True)]
random.shuffle(filenames)
idx = int(len(filenames) * 0.2)
train_files = filenames[idx:]
val_files = filenames[:idx]
self.logger.log("Total: {}".format(len(filenames)))
self.logger.log("Training: {}".format(len(train_files)))
self.logger.log("Validation: {}".format(len(val_files)))
total_samples = len(filenames)
sample_idx = 0
for phase, filenames in [('train', train_files), ('val', val_files)]:
self.logger.log("Preparing {} data...".format(phase))
for filename in tqdm(filenames):
# try:
page = pdf2image.convert_from_path(filename)[0]
page.save(os.path.join(self.args["prepared_data"], phase, os.path.basename(filename)[:-3] + 'png'))
height = page.size[1]
width = page.size[0]
ngrams = util.create_ngrams(page)
for ngram in ngrams:
if "amount" in ngram["parses"]:
ngram["parses"]["amount"] = util.normalize(ngram["parses"]["amount"], key="amount")
if "date" in ngram["parses"]:
ngram["parses"]["date"] = util.normalize(ngram["parses"]["date"], key="date")
with open(filename[:-3] + 'json', 'r') as fp:
labels = simplejson.loads(fp.read())
fields = {}
for field in FIELDS:
if field in labels:
if FIELDS[field] == FIELD_TYPES["amount"]:
fields[field] = util.normalize(labels[field], key="amount")
elif FIELDS[field] == FIELD_TYPES["date"]:
fields[field] = util.normalize(labels[field], key="date")
else:
fields[field] = labels[field]
else:
fields[field] = ''
data = {
"fields": fields,
"nGrams": ngrams,
"height": height,
"width": width,
"filename": os.path.abspath(
os.path.join(self.args["prepared_data"], phase, os.path.basename(filename)[:-3] + 'png'))
}
with open(os.path.join(self.args["prepared_data"], phase, os.path.basename(filename)[:-3] + 'json'),
'w') as fp:
fp.write(simplejson.dumps(data, indent=2))
# except Exception as exp:
# self.logger.log("Skipping {} : {}".format(filename, exp))
sample_idx += 1
self.progress_label.configure(text="Preparing data [{}/{}]:".format(sample_idx, total_samples))
self.progressbar["value"] = (sample_idx / total_samples) * 100
self.progressbar.update()
self.progress_label.configure(text="Completed!")
self.progressbar["value"] = 100
self.progressbar.update()
self.logger.log("Prepared data stored in '{}'".format(self.args["prepared_data"]))
|
__init__.py | import CCAPython.gov.cca
import logging
import time
import collections
# Configure Logging
logger = logging.getLogger('root')
def mape_k_loop(platform_component, reconfiguration_port):
"""
This is the method for the process running the monitoring loop.
"""
# Extract Contract Information
if platform_component.qos_contract != None:
monitor_interval = platform_component.qos_contract["monitor_interval"]
sample_interval = platform_component.qos_contract["sample_interval"]
reconfiguration_interval = platform_component.qos_contract["reconfiguration_interval"]
execution_time = platform_component.qos_contract["execution_time"]
deviation_factor = platform_component.qos_contract["deviation_factor"]
reconfiguration_function = platform_component.qos_contract["reconfiguration_function"]
else:
logger.debug("No QoS Contract!!!")
return
# The progress is stored at a dictionary
# The key is the timestamp when the variable was read.
# The value is the computation progress at this timestamp.
progress = 0.0
progress_log = {}
first_time_stamp = time.time()
last_reconfiguration = first_time_stamp
first_sample = progress
progress_log[first_time_stamp] = progress
last_time_stamp = first_time_stamp
last_sample = first_sample
# While the computation is not over
while progress < 1.0 :
logger.debug("Monitoring Computation Progress.")
# Monitor
progress = reconfiguration_port.getComputationProgress()
current_time_stamp = time.time()
current_sample = progress
progress_log[current_time_stamp] = current_sample
logger.debug("Progress: " + ("{:.2f}".format(progress)))
# Analyze
# Only run the analysis phase if there was progress made.
if current_sample > last_sample :
# Sort the progress_log by time stamp.
# oldest_sample will store the oldest progress recorded.
# oldest_time_stamp will store the time stamp of the oldest progress recorded.
ordered_logs = collections.OrderedDict(sorted(progress_log.items())).items()
number_of_samples = len(ordered_logs)
if number_of_samples < sample_interval:
# In this scenario, there is not enough samples in the interval, so we use the first sample
oldest_sample = first_sample
oldest_time_stamp = first_time_stamp
else:
# In this case, there is enough samples, so we take the last in the interval.
oldest_sample = ordered_logs[-(sample_interval)][1]
oldest_time_stamp = ordered_logs[-(sample_interval)][0]
# average_step_interval is how much does it take to increase the progress in 0.1
# current_time_stamp is the time of the last progress verification
average_step_interval = (current_time_stamp - oldest_time_stamp) / ((current_sample - oldest_sample) * 10)
logger.debug("Average Step Interval:" + ("{:.2f}".format(average_step_interval)))
# Plan
predicted_remaining_time = 10 * (1.0 - current_sample) * average_step_interval
logger.debug("Predicted Remaining Time: " + ("{:.2f}".format(predicted_remaining_time)))
reconfiguration_action = (False, 0)
elapsed_time = current_time_stamp - first_time_stamp
# The case for increasing the resources
if (elapsed_time + predicted_remaining_time) > deviation_factor * execution_time :
new_resources = reconfiguration_function(platform_component, progress_log)
reconfiguration_action = (True, new_resources)
logger.debug("Computation Must Be Reconfigured. New Resources: " + "{:.2f}".format(str(new_resources)))
# The case for keeping the resources
elif (elapsed_time + predicted) < deviation_factor * executionTime:
reconfiguration_action = (False, 0)
# Execute
if reconfiguration_action[0] == True and (current_time_stamp - last_reconfiguration > reconfiguration_interval):
new_resources = reconfiguration_action[1]
reconfiguration_port.updateResources(new_resources)
last_reconfiguration = current_time_stamp
# Update Samples
last_time_stamp = current_time_stamp
last_sample = current_sample
else :
logger.debug("Progress Unchanged.")
# The Loop will sleep for monitor_interval seconds
time.sleep(monitor_interval)
elapsed_time = time.time() - first_time_stamp
logger.debug("Elapsed Time: " + "{:.2f}".format(elapsed_time))
return
class AllocationPort(CCAPython.gov.cca.Port):
def __init__(self, portType, component):
super(AllocationPort, self).__init__(portType)
self.component = component
return
def getResources(self):
"""
This should return a resource description for the computation.
This description should contain:
- Number of nodes
- Number of cores per node
- Memory size per node
- Hostname (for building the host file)
"""
logger.debug("Setting Resources: " + str(self.component.resources))
reconfiguration_port = self.component.services.getPort("ComputationReconfigurationPort")
mape_process = Process(target = mape_k_loop, args=(self.component, reconfiguration_port))
mape_process.daemon = True
mape_process.start()
logger.debug("Monitoring Started.")
return self.component.resources
class QoSConfigurationPort(CCAPython.gov.cca.Port):
def __init__(self, portType, component):
super(QoSConfigurationPort, self).__init__(portType)
self.component = component
return
def setQoSContract(self, resources = None, qos_contract = None):
"""
The contextual contract must be supplied by the computational system to the inner platform.
There are two sets of information:
- The initial resource description. The Platform will use this description to instantiate
a self.resources attribute with the initial resources.
- The QoS requirements dict. For malleable scenario, I'm considering:
- execution_time: execution time estimative given the initial resources.
- execution_cost: execution cost restriction.
- deviation_factor: deviantion factor for the above restrictions.
- monitor_interval: interval between two monitoring loops.
- sample_interval: how far back in the progress log should the analysis consider.
- reconfiguration_interval: mininum interval between two reconfigurations.
- reconfiguration_function: defined by the application provider or component developer, that,
given the contract, the cluster state and the progress log, return a new set of resources.
The reconfiguration function should take as input:
- Cluster Statistics (see base.platform.infrastructure.Cluster for formatting)
- Computation Progress Log
- Current Resources
The output should be:
- A new resource set to be allocated and sent to the Computation. (node_count, node_configuration)
"""
self.component.resources = resources
self.component.qos_contract = qos_contract
return
class MalleablePlatformComponent(CCAPython.gov.cca.Component):
def __init__(self):
# By default, there is no contract defined at component creation.
# It must be set by the QoSConfigurationPort
self.qos_contract = None
# By default, there is no resources set defined at component creation.
# It must be set by the QoSConfigurationPort
# The type of this object must be base.platform.infrastructure.Cluster
self.resources = None
self.allocation_port = AllocationPort("elastichpc.base.platform.malleable.AllocationPort", self)
self.qosConfiguration_port = QoSConfigurationPort("elastichpc.base.platform.malleable.QoSConfigurationPort", self)
return
def setServices(self, services):
self.services = services
services.addProvidesPort(self.allocation_port, "AllocationPort", "elastichpc.base.platform.malleable.AllocationPort", None)
services.addProvidesPort(self.qosConfiguration_port, "QoSConfigurationPort", "elastichpc.base.platform.malleable.QoSConfigurationPort", None)
services.registerUsesPort("ComputationReconfigurationPort","elastichpc.base.computation.malleable.ReconfigurationPort", None)
return
|
start - Copy.py | import threading
import time
import cv2,os,sys,socket,struct,pickle
import numpy as np
from tkinter import *
#globals
tempf=np.zeros((0,0))
start_time=time.time()
endme=False
#tkinter stufffffff
angle=0.0
def assign_angle(val):
global angle
angle=val
# print(val)
root=Tk()
root.geometry('600x400')
slider1= Scale(root, from_=-90, to=90, length=400, resolution=0.1, orient=HORIZONTAL, command=assign_angle)
slider1.pack()
l=Label(root, text='Angle = 0.00')
l.pack()
# socket stufffffffff
client_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
host_ip = '169.254.250.37' # paste your server ip address here
#host_ip = '192.168.43.34'
# host_name = socket.gethostname()
# host_ip = socket.gethostbyname(host_name)
port = 9999
client_socket.connect((host_ip,port)) # a tuple
payload_size = struct.calcsize("Q")
print("payload Size :",payload_size)
from tensorflow_od_saved_model import *
# camera stuffff
camera = cv2.VideoCapture(0)
_,image_np=camera.read()
input_tensor = np.expand_dims(image_np, 0)
detections= detect_fn(input_tensor)
camera.release()
print("captured first image\n Everything seems fine",detections)
# All thread functions
def add():
global frame,s1,s2,l,root,angle,endme
data = b""
while not endme:
print(threading.currentThread(),"waiting for s1")
s1.acquire()
print(threading.currentThread(),"got s1")
start_t=time.time()
while len(data) < 8:
packet = client_socket.recv(1024) # 4K
if not packet: break
data+=packet
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("Q",packed_msg_size)[0]
while len(data) < msg_size:
data += client_socket.recv(4*1024)
end_t=time.time()
print("stream delay",end_t-start_t)
frame_data = data[:msg_size]
data = data[msg_size:]
frame = pickle.loads(frame_data)
# cv2.imshow("RECEIVING VIDEO",frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# time.sleep(0.1)
#l.config(text='Angle = '+str(angle)+'°')
#root.update_idletasks()
#root.update()
client_socket.sendall(pickle.dumps(angle))
s2.release()
# time.sleep(10)
else:
s2.release()
def mul(bt):
print("inside mul")
global frame,detectionss,tempf,s1,s2,s3,endme
while not endme:
# bt.join()
print(threading.currentThread(),"waiting for s2")
s2.acquire()
print(threading.currentThread(),"got s2")
print(threading.currentThread(),"waiting for s3")
s3.acquire()
print(threading.currentThread(),"got s3")
frame=cv2.rotate(frame,cv2.ROTATE_180)
tempf=frame
s1.release()
input_tensor = np.expand_dims(tempf, 0)
detectionss = detect_fn(input_tensor)
# print(threading.currentThread(),"read num as",num)
# temp*=10
# time.sleep(1.8)
# print(threading.currentThread()," num= ",temp)
# num2=temp
s4.release()
# time.sleep(10)
else:
s4.release()
s1.release()
def sub(bt):
print("inside sub")
fps=0
global detectionss,s3,s4,tempf,start_time,endme
# bt.join()
while not endme:
print(threading.currentThread(),"waiting for s4")
start_t=time.time()
s4.acquire()
print(threading.currentThread(),"got s4")
detections=detectionss
tempframe=tempf
s3.release()
label_id_offset = 1
image_np_with_detections = tempframe.copy()
viz_utils.visualize_boxes_and_labels_on_image_array(
image_np_with_detections,
detections['detection_boxes'][0].numpy(),
detections['detection_classes'][0].numpy().astype(np.int32),
detections['detection_scores'][0].numpy(),
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=5,
min_score_thresh=.60,
agnostic_mode=False)
cv2.imshow("detection",image_np_with_detections )
end_time=time.time()
print("time for 1 frame",end_time-start_t)
if end_time-start_time>=1 :
print("No of frames",fps)
fps=0
start_time=time.time()
else:
fps+=1
if cv2.waitKey(1) & 0xff == 27: break
else:
s3.release()
start_t=time.time()
num2=num=0
s1=threading.Semaphore(value=1)
s2=threading.Semaphore(value=0)
s3=threading.Semaphore(value=1)
s4=threading.Semaphore(value=0)
t1=threading.Thread(target=add)
t2=threading.Thread(target=mul,args=[t1])
t3=threading.Thread(target=sub,args=[t2])
t1.start()
t2.start()
t3.start()
while True:
time.sleep(1)
key=input()
if key=='q' or key == 'Q':
endme=True
break
t1.join()
t2.join()
t3.join()
# threading.excepthook(args=[])
end_t=time.time()
print(end_t-start_t)
|
server.py | # NAME : MOHAMMED FURKHAN SHAIKH
# Took base code from below link and started working on it
###https://medium.com/swlh/lets-write-a-chat-app-in-python-f6783a9ac170
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
import tkinter
import os
import errno
clients = {} # dictionary to save clients
addresses = {} # save clients addresses
usernames = [] # current list of connected usernames
HOST = '' # empty for localhost
PORT = 33000 # arbitrary port number between 1024 and 65000
BUFSIZ = 1024 # TCP window size
ADDR = (HOST, PORT) # address tuple used by socket bind method
SERVER = socket(AF_INET, SOCK_STREAM)
"""AF_INET is the address family of the socket
here it is Internet family (IPv4).
SOCK_STREAM is the stream socket ie with continuous flow (TCP)."""
SERVER.bind(ADDR) # Server socket is binded/linked to localhost and portnumber
FILES_ON_SERVER = [] # list of files currently on server
files_to_send={} # dictionary files to send to each clients
# method to accept incoming connections. Always on
def accept_incoming_connections():
"""Sets up handling for incoming clients."""
while True: # keep running untill closed manually
try:
client, client_address = SERVER.accept()
print("%s:%s has connected." % client_address)
flag = True # flag in case connection closed from client
while True: # retry for username conflict
username = client.recv(BUFSIZ).decode("utf8")
if username == "{quit}": # check if username is recieved or quit message
client.send(bytes("ERROR1", "utf8"))
client.close()
flag = False
break
elif username in clients.values(): # check username is existing or not
client.send(bytes("ERROR1", "utf8"))
continue
else:
break
if flag == False:
continue
except:
continue
msg_list.insert(tkinter.END, username+" has joined the server!") # add message on GUI
clients[client] = username # save username
addresses[client] = client_address # save client ip and port
Thread(target=handle_client, args=(client,)).start() # start new thread for messages and files for each client
# method separate for each client to send and recieve messages/files
def handle_client(client): # Takes client socket as argument.
"""Handles a single client connection."""
name = clients[client] # username as name
welcome = 'Welcome %s! ' % name
client.send(bytes(welcome, "utf8"))
msg = "%s has joined the server!" % name
print(msg)
broadcast(bytes(msg, "utf8")) # display to all clients
msg = " Connected clients:"
for cc in clients.keys():
msg = msg + ' ' + clients[cc]
broadcast(bytes(msg,"utf8"))
msg_list.insert(tkinter.END, msg) # add message on GUI
while True:
try:
msg = client.recv(BUFSIZ).decode("utf8")
print("in handle loop: ", msg)
if msg == "{file}": # if client wants to send file
recieve_file(client)
elif msg == "{chk_files}": # if client wants to check files on server
check_files(client)
elif msg == "{DOWN}": # to send files to client
send_file(client)
elif msg == "{quit}": # if client wants to exit
client.close()
msg_list.insert(tkinter.END, "%s has left the server." % name)
del clients[client]
if (len(clients.keys()) != 0):
broadcast(bytes("%s has left the server." % name, "utf8"))
msg = "connected clients:"
for cc in clients.keys():
msg = msg + ' ' + clients[cc]
broadcast(bytes(msg, "utf8"))
msg_list.insert(tkinter.END, msg) # add message on GUI
break
except:
pass
# this method sends list of files on server to check which of them client has
def check_files(client): # argument is client socket
if len(FILES_ON_SERVER) > 0: # check if server got any files # no client has send files
client.send(bytes("SOL", "utf8")) # start of List
msg = client.recv(BUFSIZ).decode("utf8")
if msg == "LIST_OK": # see if client ready to recieve list of files
for i in range(len(FILES_ON_SERVER)):
client.send(bytes(FILES_ON_SERVER[i], "utf8")) # send file names one by one
msg = client.recv(BUFSIZ).decode("utf8") # blocking recv function
client.send(bytes("{EOL}", "utf8")) # End of list
msg = client.recv(BUFSIZ).decode("utf8")
if msg == "ALL_OK": # client has all files
return
elif msg == "NUM": # client sends index of files
files_to_send[client] = []
client.send(bytes("OK", "utf8"))
while True:
msg = client.recv(BUFSIZ).decode("utf8")
if msg != "EOL": # end of list
files_to_send[client].append(FILES_ON_SERVER[int(msg)]) # dictionary to save file names
client.send(bytes("OK", "utf8"))
else:
break
# method to send files to client
def send_file(client): # argument is client socket
if len(files_to_send.keys()) != 0:
if len(files_to_send[client]) != 0:
client.send(bytes("FILE", "utf8"))
msg = client.recv(BUFSIZ).decode("utf8") # OK
filename = files_to_send[client][0]
client.send(bytes(filename, "utf8"))
msg = client.recv(BUFSIZ).decode("utf8")
f = open("server_directory/" + filename, 'rb+')
while True:
l = f.read(BUFSIZ)
# print(l)
if l:
client.send(l)
msg = client.recv(BUFSIZ).decode("utf8")
# print(msg)
else:
client.send(bytes("{EOF}", "utf8"))
f.close()
files_to_send[client].remove(filename)
msg_list.insert(tkinter.END, filename+" sent to "+ clients[client])
print("Done Sending")
break
else:
client.send(bytes("NO_FILE", "utf8"))
else:
client.send(bytes("NO_FILE", "utf8"))
# method to recieve files from clients
def recieve_file(client): # argument is client socket
client.send(bytes("FILE OK", "utf8"))
filename = client.recv(BUFSIZ).decode("utf8")
print("receiving file: ", filename)
client.send(bytes("FILENAME OK", "utf8"))
f = open("server_directory/" + filename, 'wb+')
while True:
msg = client.recv(BUFSIZ)
if msg != bytes("{EOF}", "utf8"):
f.write(msg)
client.send(bytes("OK", "utf8"))
else:
f.close()
print("Done Receiving")
FILES_ON_SERVER.append(filename) # update file list of server
print(FILES_ON_SERVER) # update file list of server
msg_list.insert(tkinter.END, clients[client] + ": uploaded "+filename) # display on GUI
broadcast(bytes("uploaded file " + filename, "utf8"), clients[client] + ": ") # notify everyone
break
# method to send messages to all clients
def broadcast(msg, prefix=""): # prefix is for name identification.
"""Broadcasts a message to all the clients."""
for sock in clients.keys():
sock.send(bytes(prefix, "utf8") + msg)
# for closing window and program
def on_closing(event=None):
"""This function is to be called when the window is closed."""
try:
msg = "{quit}"
client_socket.send(bytes(msg, "utf8"))
client_socket.close()
top.quit()
os._exit(1)
except:
top.destroy()
os._exit(1)
if __name__ == "__main__":
top = tkinter.Tk() # top is the main or root window
top.title("Shared Box Server")
messages_frame = tkinter.Frame(top)
my_msg = tkinter.StringVar() # For the messages to be sent.
my_msg.set("")
scrollbar = tkinter.Scrollbar(messages_frame) # To navigate through past messages.
msg_list = tkinter.Listbox(messages_frame, height=15, width=45, yscrollcommand=scrollbar.set)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
msg_list.pack(side=tkinter.LEFT, fill=tkinter.X)
msg_list.pack()
messages_frame.pack()
exit_button = tkinter.Button(top, text="EXIT", command=on_closing)
exit_button.pack()
top.protocol("WM_DELETE_WINDOW", on_closing)
# check and create server directory to store files
try:
os.makedirs("server_directory") # make client directory by username
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
SERVER.listen(5) # Listens for 5 connections at max.
print("Waiting for connection...")
msg_list.insert(tkinter.END, "Waiting for clients to connect")
ACCEPT_THREAD = Thread(target=accept_incoming_connections)
ACCEPT_THREAD.start() # Starts the infinite loop.
tkinter.mainloop() # Starts GUI execution.
ACCEPT_THREAD.join() # wait for thread to complete
SERVER.close()
|
pub_proxy.py | # -*- coding: utf-8 -*-
'''
Listener worker process
'''
from __future__ import absolute_import
# Import pythond stdlib
import os
import signal
import logging
import threading
# Import third party libs
import zmq
# Import napalm-logs pkgs
from napalm_logs.config import PUB_IPC_URL
from napalm_logs.config import PUB_PX_IPC_URL
from napalm_logs.proc import NapalmLogsProc
# exceptions
from napalm_logs.exceptions import NapalmLogsExit
log = logging.getLogger(__name__)
class NapalmLogsPublisherProxy(NapalmLogsProc):
'''
Internal IPC proxy sub-process class.
'''
def __init__(self, hwm):
self.hwm = hwm
self.__up = False
def _exit_gracefully(self, signum, _):
log.debug('Caught signal in the internal proxy process')
self.stop()
def _setup_ipc(self):
'''
Setup the IPC PUB and SUB sockets for the proxy.
'''
log.debug('Setting up the internal IPC proxy')
self.ctx = zmq.Context()
# Frontend
self.sub = self.ctx.socket(zmq.SUB)
self.sub.bind(PUB_PX_IPC_URL)
self.sub.setsockopt(zmq.SUBSCRIBE, b'')
log.debug('Setting HWM for the proxy frontend: %d', self.hwm)
try:
self.sub.setsockopt(zmq.HWM, self.hwm)
# zmq 2
except AttributeError:
# zmq 3
self.sub.setsockopt(zmq.SNDHWM, self.hwm)
# Backend
self.pub = self.ctx.socket(zmq.PUB)
self.pub.bind(PUB_IPC_URL)
log.debug('Setting HWM for the proxy backend: %d', self.hwm)
try:
self.pub.setsockopt(zmq.HWM, self.hwm)
# zmq 2
except AttributeError:
# zmq 3
self.pub.setsockopt(zmq.SNDHWM, self.hwm)
def start(self):
'''
Listen to messages and publish them.
'''
self._setup_ipc()
# Start suicide polling thread
thread = threading.Thread(target=self._suicide_when_without_parent, args=(os.getppid(),))
thread.start()
signal.signal(signal.SIGTERM, self._exit_gracefully)
try:
zmq.proxy(self.sub, self.pub)
except zmq.ZMQError as error:
if self.__up is False:
log.info('Exiting on process shutdown')
return
else:
log.error(error, exc_info=True)
raise NapalmLogsExit(error)
def stop(self):
log.info('Stopping the internal IPC proxy')
self.__up = False
self.sub.close()
self.pub.close()
self.ctx.term()
|
vm_util.py | # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of utility functions for working with virtual machines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import logging
import os
import platform
import random
import re
import string
import subprocess
import tempfile
import threading
import time
from absl import flags
import jinja2
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import temp_dir
from six.moves import range
FLAGS = flags.FLAGS
PRIVATE_KEYFILE = 'perfkitbenchmarker_keyfile'
PUBLIC_KEYFILE = 'perfkitbenchmarker_keyfile.pub'
# The temporary directory on VMs. We cannot reuse GetTempDir()
# because run_uri will not be available at time of module load and we need
# to use this directory as a base for other module level constants.
VM_TMP_DIR = '/tmp/pkb'
# Default timeout for issuing a command.
DEFAULT_TIMEOUT = 300
# Defaults for retrying commands.
POLL_INTERVAL = 30
TIMEOUT = 1200
FUZZ = .5
MAX_RETRIES = -1
WINDOWS = 'nt'
DARWIN = 'Darwin'
PASSWORD_LENGTH = 15
OUTPUT_STDOUT = 0
OUTPUT_STDERR = 1
OUTPUT_EXIT_CODE = 2
_SIMULATE_MAINTENANCE_SEMAPHORE = threading.Semaphore(0)
flags.DEFINE_integer('default_timeout', TIMEOUT, 'The default timeout for '
'retryable commands in seconds.')
flags.DEFINE_integer('burn_cpu_seconds', 0,
'Amount of time in seconds to burn cpu on vm before '
'starting benchmark')
flags.DEFINE_integer('burn_cpu_threads', 1, 'Number of threads to use to '
'burn cpu before starting benchmark.')
flags.DEFINE_integer('background_cpu_threads', None,
'Number of threads of background cpu usage while '
'running a benchmark')
flags.DEFINE_integer('background_network_mbits_per_sec', None,
'Number of megabits per second of background '
'network traffic to generate during the run phase '
'of the benchmark')
flags.DEFINE_boolean('simulate_maintenance', False,
'Whether to simulate VM maintenance during the benchmark. '
'This requires both benchmark and provider support.')
flags.DEFINE_integer('simulate_maintenance_delay', 0,
'The number of seconds to wait to start simulating '
'maintenance.')
flags.DEFINE_boolean('ssh_reuse_connections', True,
'Whether to reuse SSH connections rather than '
'reestablishing a connection for each remote command.')
# We set this to the short value of 5 seconds so that the cluster boot benchmark
# can measure a fast connection when bringing up a VM. This avoids retries that
# may not be as quick as every 5 seconds when specifying a larger value.
flags.DEFINE_integer('ssh_connect_timeout', 5, 'timeout for SSH connection.',
lower_bound=0)
flags.DEFINE_string('ssh_control_path', None,
'Overrides the default ControlPath setting for ssh '
'connections if --ssh_reuse_connections is set. This can '
'be helpful on systems whose default temporary directory '
'path is too long (sockets have a max path length) or a '
'version of ssh that doesn\'t support the %h token. See '
'ssh documentation on the ControlPath setting for more '
'detailed information.')
flags.DEFINE_string('ssh_control_persist', '30m',
'Setting applied to ssh connections if '
'--ssh_reuse_connections is set. Sets how long the '
'connections persist before they are removed. '
'See ssh documentation about the ControlPersist setting '
'for more detailed information.')
flags.DEFINE_integer('ssh_server_alive_interval', 30,
'Value for ssh -o ServerAliveInterval. Use with '
'--ssh_server_alive_count_max to configure how long to '
'wait for unresponsive servers.')
flags.DEFINE_integer('ssh_server_alive_count_max', 10,
'Value for ssh -o ServerAliveCountMax. Use with '
'--ssh_server_alive_interval to configure how long to '
'wait for unresponsive servers.')
class IpAddressSubset(object):
"""Enum of options for --ip_addresses."""
REACHABLE = 'REACHABLE'
BOTH = 'BOTH'
INTERNAL = 'INTERNAL'
EXTERNAL = 'EXTERNAL'
ALL = (REACHABLE, BOTH, INTERNAL, EXTERNAL)
flags.DEFINE_enum('ip_addresses', IpAddressSubset.REACHABLE,
IpAddressSubset.ALL,
'For networking tests: use both internal and external '
'IP addresses (BOTH), internal and external only if '
'the receiving VM is reachable by internal IP (REACHABLE), '
'external IP only (EXTERNAL) or internal IP only (INTERNAL)')
flags.DEFINE_enum('background_network_ip_type', IpAddressSubset.EXTERNAL,
(IpAddressSubset.INTERNAL, IpAddressSubset.EXTERNAL),
'IP address type to use when generating background network '
'traffic')
class IpAddressMetadata(object):
INTERNAL = 'internal'
EXTERNAL = 'external'
def GetTempDir():
"""Returns the tmp dir of the current run."""
return temp_dir.GetRunDirPath()
def PrependTempDir(file_name):
"""Returns the file name prepended with the tmp dir of the current run."""
return os.path.join(GetTempDir(), file_name)
def GenTempDir():
"""Creates the tmp dir for the current run if it does not already exist."""
temp_dir.CreateTemporaryDirectories()
def SSHKeyGen():
"""Create PerfKitBenchmarker SSH keys in the tmp dir of the current run."""
if not os.path.isdir(GetTempDir()):
GenTempDir()
if not os.path.isfile(GetPrivateKeyPath()):
create_cmd = ['ssh-keygen',
'-t', 'rsa',
'-N', '',
'-m', 'PEM',
'-q',
'-f', PrependTempDir(PRIVATE_KEYFILE)]
IssueCommand(create_cmd)
def GetPrivateKeyPath():
return PrependTempDir(PRIVATE_KEYFILE)
def GetPublicKeyPath():
return PrependTempDir(PUBLIC_KEYFILE)
def GetSshOptions(ssh_key_filename, connect_timeout=None):
"""Return common set of SSH and SCP options."""
options = [
'-2',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
'-o', 'IdentitiesOnly=yes',
'-o', 'PreferredAuthentications=publickey',
'-o', 'PasswordAuthentication=no',
'-o', 'ConnectTimeout=%d' % (
connect_timeout or FLAGS.ssh_connect_timeout),
'-o', 'GSSAPIAuthentication=no',
'-o', 'ServerAliveInterval=%d' % FLAGS.ssh_server_alive_interval,
'-o', 'ServerAliveCountMax=%d' % FLAGS.ssh_server_alive_count_max,
'-i', ssh_key_filename
]
if FLAGS.use_ipv6:
options.append('-6')
if FLAGS.ssh_reuse_connections:
control_path = (FLAGS.ssh_control_path or
os.path.join(temp_dir.GetSshConnectionsDir(), '%h'))
options.extend([
'-o', 'ControlPath="%s"' % control_path,
'-o', 'ControlMaster=auto',
'-o', 'ControlPersist=%s' % FLAGS.ssh_control_persist
])
options.extend(FLAGS.ssh_options)
return options
# TODO(skschneider): Remove at least RunParallelProcesses and RunParallelThreads
# from this file (update references to call directly into background_tasks).
RunParallelProcesses = background_tasks.RunParallelProcesses
RunParallelThreads = background_tasks.RunParallelThreads
RunThreaded = background_tasks.RunThreaded
def Retry(poll_interval=POLL_INTERVAL, max_retries=MAX_RETRIES,
timeout=None, fuzz=FUZZ, log_errors=True,
retryable_exceptions=None):
"""A function decorator that will retry when exceptions are thrown.
Args:
poll_interval: The time between tries in seconds. This is the maximum poll
interval when fuzz is specified.
max_retries: The maximum number of retries before giving up. If -1, this
means continue until the timeout is reached. The function will stop
retrying when either max_retries is met or timeout is reached.
timeout: The timeout for all tries in seconds. If -1, this means continue
until max_retries is met. The function will stop retrying when either
max_retries is met or timeout is reached.
fuzz: The amount of randomness in the sleep time. This is used to
keep threads from all retrying at the same time. At 0, this
means sleep exactly poll_interval seconds. At 1, this means
sleep anywhere from 0 to poll_interval seconds.
log_errors: A boolean describing whether errors should be logged.
retryable_exceptions: A tuple of exceptions that should be retried. By
default, this is None, which indicates that all exceptions should
be retried.
Returns:
A function that wraps functions in retry logic. It can be
used as a decorator.
"""
if retryable_exceptions is None:
retryable_exceptions = Exception
def Wrap(f):
"""Wraps the supplied function with retry logic."""
def WrappedFunction(*args, **kwargs):
"""Holds the retry logic."""
local_timeout = FLAGS.default_timeout if timeout is None else timeout
if local_timeout >= 0:
deadline = time.time() + local_timeout
else:
deadline = float('inf')
tries = 0
while True:
try:
tries += 1
return f(*args, **kwargs)
except retryable_exceptions as e:
fuzz_multiplier = 1 - fuzz + random.random() * fuzz
sleep_time = poll_interval * fuzz_multiplier
if ((time.time() + sleep_time) >= deadline or
(max_retries >= 0 and tries > max_retries)):
raise
else:
if log_errors:
logging.info('Retrying exception running %s: %s', f.__name__, e)
time.sleep(sleep_time)
return WrappedFunction
return Wrap
class _BoxedObject(object):
"""Box a value in a reference so it is modifiable inside an inner function.
In python3 the nonlocal keyword could be used instead - but for python2
there is no support for modifying an external scoped variable value.
"""
def __init__(self, initial_value):
self.value = initial_value
def _ReadIssueCommandOutput(tf_out, tf_err):
"""Reads IssueCommand Output from stdout and stderr."""
tf_out.seek(0)
stdout = tf_out.read().decode('ascii', 'ignore')
tf_err.seek(0)
stderr = tf_err.read().decode('ascii', 'ignore')
return stdout, stderr
def IssueCommand(cmd, force_info_log=False, suppress_warning=False,
env=None, timeout=DEFAULT_TIMEOUT, cwd=None,
raise_on_failure=True, suppress_failure=None,
raise_on_timeout=True):
"""Tries running the provided command once.
Args:
cmd: A list of strings such as is given to the subprocess.Popen()
constructor.
force_info_log: A boolean indicating whether the command result should
always be logged at the info level. Command results will always be
logged at the debug level if they aren't logged at another level.
suppress_warning: A boolean indicating whether the results should
not be logged at the info level in the event of a non-zero
return code. When force_info_log is True, the output is logged
regardless of suppress_warning's value.
env: A dict of key/value strings, such as is given to the subprocess.Popen()
constructor, that contains environment variables to be injected.
timeout: Timeout for the command in seconds. If the command has not finished
before the timeout is reached, it will be killed. Set timeout to None to
let the command run indefinitely. If the subprocess is killed, the
return code will indicate an error, and stdout and stderr will
contain what had already been written to them before the process was
killed.
cwd: Directory in which to execute the command.
raise_on_failure: A boolean indicating if non-zero return codes should raise
IssueCommandError.
suppress_failure: A function passed (stdout, stderr, ret_code) for non-zero
return codes to determine if the failure should be suppressed e.g. a
delete command which fails because the item to be deleted does not
exist.
raise_on_timeout: A boolean indicating if killing the process due to the
timeout being hit should raise a IssueCommandTimeoutError
Returns:
A tuple of stdout, stderr, and retcode from running the provided command.
Raises:
IssueCommandError: When raise_on_failure=True and retcode is non-zero.
IssueCommandTimeoutError: When raise_on_timeout=True and
command duration exceeds timeout
"""
if env:
logging.debug('Environment variables: %s', env)
# Force conversion to string so you get a nice log statement before hitting a
# type error or NPE. subprocess will still catch it.
full_cmd = ' '.join(str(w) for w in cmd)
logging.info('Running: %s', full_cmd)
time_file_path = '/usr/bin/time'
running_on_windows = RunningOnWindows()
running_on_darwin = RunningOnDarwin()
should_time = (not (running_on_windows or running_on_darwin) and
os.path.isfile(time_file_path) and FLAGS.time_commands)
shell_value = running_on_windows
with tempfile.TemporaryFile() as tf_out, \
tempfile.TemporaryFile() as tf_err, \
tempfile.NamedTemporaryFile(mode='r') as tf_timing:
cmd_to_use = cmd
if should_time:
cmd_to_use = [time_file_path,
'-o', tf_timing.name,
'--quiet',
'-f', ', WallTime:%Es, CPU:%Us, MaxMemory:%Mkb '] + cmd
process = subprocess.Popen(cmd_to_use, env=env, shell=shell_value,
stdin=subprocess.PIPE, stdout=tf_out,
stderr=tf_err, cwd=cwd)
did_timeout = _BoxedObject(False)
was_killed = _BoxedObject(False)
def _KillProcess():
did_timeout.value = True
if not raise_on_timeout:
logging.warning('IssueCommand timed out after %d seconds. '
'Killing command "%s".', timeout, full_cmd)
process.kill()
was_killed.value = True
timer = threading.Timer(timeout, _KillProcess)
timer.start()
try:
process.wait()
finally:
timer.cancel()
stdout, stderr = _ReadIssueCommandOutput(tf_out, tf_err)
timing_output = ''
if should_time:
timing_output = tf_timing.read().rstrip('\n')
debug_text = ('Ran: {%s}\nReturnCode:%s%s\nSTDOUT: %s\nSTDERR: %s' %
(full_cmd, process.returncode, timing_output, stdout, stderr))
if force_info_log or (process.returncode and not suppress_warning):
logging.info(debug_text)
else:
logging.debug(debug_text)
# Raise timeout error regardless of raise_on_failure - as the intended
# semantics is to ignore expected errors caused by invoking the command
# not errors from PKB infrastructure.
if did_timeout.value and raise_on_timeout:
debug_text = (
'{0}\nIssueCommand timed out after {1} seconds. '
'{2} by perfkitbenchmarker.'.format(
debug_text, timeout,
'Process was killed' if was_killed.value else
'Process may have been killed'))
raise errors.VmUtil.IssueCommandTimeoutError(debug_text)
elif process.returncode and (raise_on_failure or suppress_failure):
if (suppress_failure and
suppress_failure(stdout, stderr, process.returncode)):
# failure is suppressible, rewrite the stderr and return code as passing
# since some callers assume either is a failure e.g.
# perfkitbenchmarker.providers.aws.util.IssueRetryableCommand()
return stdout, '', 0
raise errors.VmUtil.IssueCommandError(debug_text)
return stdout, stderr, process.returncode
def IssueBackgroundCommand(cmd, stdout_path, stderr_path, env=None):
"""Run the provided command once in the background.
Args:
cmd: Command to be run, as expected by subprocess.Popen.
stdout_path: Redirect stdout here. Overwritten.
stderr_path: Redirect stderr here. Overwritten.
env: A dict of key/value strings, such as is given to the subprocess.Popen()
constructor, that contains environment variables to be injected.
"""
logging.debug('Environment variables: %s', env)
full_cmd = ' '.join(cmd)
logging.info('Spawning: %s', full_cmd)
outfile = open(stdout_path, 'w')
errfile = open(stderr_path, 'w')
shell_value = RunningOnWindows()
subprocess.Popen(cmd, env=env, shell=shell_value,
stdout=outfile, stderr=errfile, close_fds=True)
@Retry()
def IssueRetryableCommand(cmd, env=None):
"""Tries running the provided command until it succeeds or times out.
Args:
cmd: A list of strings such as is given to the subprocess.Popen()
constructor.
env: An alternate environment to pass to the Popen command.
Returns:
A tuple of stdout and stderr from running the provided command.
"""
stdout, stderr, retcode = IssueCommand(cmd, env=env, raise_on_failure=False)
if retcode:
debug_text = ('Ran: {%s}\nReturnCode:%s\nSTDOUT: %s\nSTDERR: %s' %
(' '.join(cmd), retcode, stdout, stderr))
raise errors.VmUtil.CalledProcessException(
'Command returned a non-zero exit code:\n{}'.format(debug_text))
return stdout, stderr
def ParseTimeCommandResult(command_result):
"""Parse command result and get time elapsed.
Note this parses the output of bash's time builtin, not /usr/bin/time or other
implementations. You may need to run something like bash -c "time ./command"
to produce parseable output.
Args:
command_result: The result after executing a remote time command.
Returns:
Time taken for the command.
"""
time_data = re.findall(r'real\s+(\d+)m(\d+.\d+)', command_result)
time_in_seconds = 60 * float(time_data[0][0]) + float(time_data[0][1])
return time_in_seconds
def ShouldRunOnExternalIpAddress(ip_type=None):
"""Returns whether a test should be run on an instance's external IP."""
ip_type_to_check = ip_type or FLAGS.ip_addresses
return ip_type_to_check in (IpAddressSubset.EXTERNAL, IpAddressSubset.BOTH,
IpAddressSubset.REACHABLE)
def ShouldRunOnInternalIpAddress(sending_vm, receiving_vm, ip_type=None):
"""Returns whether a test should be run on an instance's internal IP.
Based on the command line flag --ip_addresses. Internal IP addresses are used
when:
* --ip_addresses=BOTH or --ip-addresses=INTERNAL
* --ip_addresses=REACHABLE and 'sending_vm' can ping 'receiving_vm' on its
internal IP.
Args:
sending_vm: VirtualMachine. The client.
receiving_vm: VirtualMachine. The server.
ip_type: optional ip_type to use instead of what is set in the FLAGS
Returns:
Whether a test should be run on an instance's internal IP.
"""
ip_type_to_check = ip_type or FLAGS.ip_addresses
return (ip_type_to_check in (IpAddressSubset.BOTH, IpAddressSubset.INTERNAL)
or (ip_type_to_check == IpAddressSubset.REACHABLE and
sending_vm.IsReachable(receiving_vm)))
def GetLastRunUri():
"""Returns the last run_uri used (or None if it can't be determined)."""
runs_dir_path = temp_dir.GetAllRunsDirPath()
try:
dir_names = next(os.walk(runs_dir_path))[1]
except StopIteration:
# The runs directory was not found.
return None
if not dir_names:
# No run subdirectories were found in the runs directory.
return None
# Return the subdirectory with the most recent modification time.
return max(dir_names,
key=lambda d: os.path.getmtime(os.path.join(runs_dir_path, d)))
@contextlib.contextmanager
def NamedTemporaryFile(mode='w+b', prefix='tmp', suffix='', dir=None,
delete=True):
"""Behaves like tempfile.NamedTemporaryFile.
The existing tempfile.NamedTemporaryFile has the annoying property on
Windows that it cannot be opened a second time while it is already open.
This makes it impossible to use it with a "with" statement in a cross platform
compatible way. This serves a similar role, but allows the file to be closed
within a "with" statement without causing the file to be unlinked until the
context exits.
Args:
mode: see mode in tempfile.NamedTemporaryFile.
prefix: see prefix in tempfile.NamedTemporaryFile.
suffix: see suffix in tempfile.NamedTemporaryFile.
dir: see dir in tempfile.NamedTemporaryFile.
delete: see delete in NamedTemporaryFile.
Yields:
A cross platform file-like object which is "with" compatible.
"""
f = tempfile.NamedTemporaryFile(mode=mode, prefix=prefix, suffix=suffix,
dir=dir, delete=False)
try:
yield f
finally:
if not f.closed:
f.close()
if delete:
os.unlink(f.name)
def GenerateSSHConfig(vms, vm_groups):
"""Generates an SSH config file to simplify connecting to the specified VMs.
Writes a file to GetTempDir()/ssh_config with an SSH configuration for each VM
provided in the arguments. Users can then SSH with any of the following:
ssh -F <ssh_config_path> <vm_name>
ssh -F <ssh_config_path> vm<vm_index>
ssh -F <ssh_config_path> <group_name>-<index>
Args:
vms: list of BaseVirtualMachines.
vm_groups: dict mapping VM group name string to list of BaseVirtualMachines.
"""
target_file = os.path.join(GetTempDir(), 'ssh_config')
template_path = data.ResourcePath('ssh_config.j2')
environment = jinja2.Environment(undefined=jinja2.StrictUndefined)
with open(template_path) as fp:
template = environment.from_string(fp.read())
with open(target_file, 'w') as ofp:
ofp.write(template.render({'vms': vms, 'vm_groups': vm_groups}))
ssh_options = [' ssh -F {0} {1}'.format(target_file, pattern)
for pattern in ('<vm_name>', 'vm<index>',
'<group_name>-<index>')]
logging.info('ssh to VMs in this benchmark by name with:\n%s',
'\n'.join(ssh_options))
def RunningOnWindows():
"""Returns True if PKB is running on Windows."""
return os.name == WINDOWS
def RunningOnDarwin():
"""Returns True if PKB is running on a Darwin OS machine."""
return os.name != WINDOWS and platform.system() == DARWIN
def ExecutableOnPath(executable_name):
"""Return True if the given executable can be found on the path."""
cmd = ['where'] if RunningOnWindows() else ['which']
cmd.append(executable_name)
shell_value = RunningOnWindows()
process = subprocess.Popen(cmd,
shell=shell_value,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process.communicate()
if process.returncode:
return False
return True
def GenerateRandomWindowsPassword(password_length=PASSWORD_LENGTH):
"""Generates a password that meets Windows complexity requirements."""
# The special characters have to be recognized by the Azure CLI as
# special characters. This greatly limits the set of characters
# that we can safely use. See
# https://github.com/Azure/azure-xplat-cli/blob/master/lib/commands/arm/vm/vmOsProfile._js#L145
special_chars = '*!@#$%+='
# Ensure that the password contains at least one of each 4 required
# character types starting with letters to avoid starting with chars which
# are problematic on the command line e.g. @.
prefix = [random.choice(string.ascii_lowercase),
random.choice(string.ascii_uppercase),
random.choice(string.digits),
random.choice(special_chars)]
password = [
random.choice(string.ascii_letters + string.digits + special_chars)
for _ in range(password_length - 4)]
return ''.join(prefix + password)
def StartSimulatedMaintenance():
"""Initiates the simulated maintenance event."""
if FLAGS.simulate_maintenance:
_SIMULATE_MAINTENANCE_SEMAPHORE.release()
def SetupSimulatedMaintenance(vm):
"""Called ready VM for simulated maintenance."""
if FLAGS.simulate_maintenance:
def _SimulateMaintenance():
_SIMULATE_MAINTENANCE_SEMAPHORE.acquire()
time.sleep(FLAGS.simulate_maintenance_delay)
vm.SimulateMaintenanceEvent()
t = threading.Thread(target=_SimulateMaintenance)
t.daemon = True
t.start()
def CopyFileBetweenVms(filename, src_vm, src_path, dest_vm, dest_path):
"""Copies a file from the src_vm to the dest_vm."""
with tempfile.NamedTemporaryFile() as tf:
temp_path = tf.name
src_vm.RemoteCopy(
temp_path, os.path.join(src_path, filename), copy_to=False)
dest_vm.RemoteCopy(
temp_path, os.path.join(dest_path, filename), copy_to=True)
def ReplaceText(vm, current_value, new_value, file_name, regex_char='/'):
"""Replaces text <current_value> with <new_value> in remote <file_name>."""
vm.RemoteCommand('sed -i -r "s{regex_char}{current_value}{regex_char}'
'{new_value}{regex_char}" {file}'.format(
regex_char=regex_char,
current_value=current_value,
new_value=new_value,
file=file_name))
def DictonaryToEnvString(dictionary):
"""Convert a dictionary to a space sperated 'key=value' string.
Args:
dictionary: the key-value dictionary to be convert
Returns:
a string representing the dictionary
"""
dict_str = ''
for key, value in sorted(dictionary.items()):
dict_str += ' {key}={value}'.format(key=key, value=value)
return dict_str
|
2021-11-21-cmind-drawingcode.py | import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import re
import socket,threading
import pyautogui
from worrd import gud
def recvMsg(soc): #좌표 받음
while True:
data = soc.recv(15) #길이를
msg = data.decode()
a=msg.split(',')
ex.sok(a[0],a[1],a[2],a[3])
soc.close()
class Client:
ip = 'localhost'
port = 4444
def __init__(self):
self.client_soc = None
def conn(self):
self.client_soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_soc.connect((Client.ip, Client.port))
def run(self):
self.conn()
t2 = threading.Thread(target=recvMsg, args=(self.client_soc,))
t2.start()
def main():
c = Client()
c.run()
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket .connect(("localhost", 4444))
class MyApp(QMainWindow): #창
xy=[]
def __init__(self):
super().__init__()
self.image = QImage(QSize(400, 400), QImage.Format_RGB32)
self.image.fill(Qt.white)
self.brush_size = 5
self.brush_color = Qt.black
self.initUI()
self.show()
def print_label(self, vbutton, vlabel):
vlabel.setText(vbutton.text())
def initUI(self): #창 설정
self.setWindowTitle('Simple Painter')
self.setGeometry(300, 200, 400, 400)
self.intialL1 = 'First Label'
a=gud()
button = QPushButton(a[0], self)
button.move(150, 0)
def paintEvent(self, e):
canvas = QPainter(self)
canvas.drawImage(self.rect(), self.image, self.image.rect())
qp = QPainter()
qp.begin(self)
self.draw_line(qp)
qp.end()
def mousePressEvent(self, e):
if e.button() == Qt.LeftButton: #처음
self.drawing = True
self.last_point = e.pos() #마우스 좌표
def mouseMoveEvent(self, e):
if (e.buttons() & Qt.LeftButton) & self.drawing: #처음 빼고
painter = QPainter(self.image)
painter.setPen(QPen(self.brush_color, self.brush_size, Qt.SolidLine, Qt.RoundCap))
a=str(self.last_point)+str(e.pos())
painter.drawLine(self.last_point, e.pos()) # (1칸전,현재) drawline(x1, y1, x2, y2)
self.last_point = e.pos() #마우스커서좌표
b = re.findall("\d+", a)
del b[0]
del b[2]
c = ''
for a in b:
a=str(a)
k=a.zfill(3) #길이를
c = c + str(k) + ','
c=c[:-1] #좌표
b = []
u=re.findall("\d+", str(self.geometry()))
del u[0]
self.xy=u
i = re.findall("\d+", str(pyautogui.position()))
if int(i[0]) >= int(u[0]) and int(i[0]) <= int(u[0])+int(u[2]) and int(i[1]) >= int(u[1])+30 and int(i[1]) <= int(u[1])+int(u[3])-40:#그려지는 좌표 제한
client_socket.send(c.encode()) #좌표 전송
def sok(self, q,w,e,r):
painter = QPainter(self.image)
painter.setPen(QPen(self.brush_color, self.brush_size, Qt.SolidLine, Qt.RoundCap))
painter.drawLine(int(q), int(w),int(e),int(r)) # (1칸전,현재) drawline(x1, y1, x2, y2)
self.update()
def draw_line(self, qp):
qp.setPen(QPen(Qt.white, 40))
qp.drawLine(0, 380, 400, 380)
qp.setPen(QPen(Qt.white, 37))
qp.drawLine(0, 10, 400, 10)
if __name__ == '__main__':
main()
app = QApplication(sys.argv)
ex = MyApp() #창
sys.exit(app.exec_())
client_socket.close()
|
bellamylib.py | '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
The main library for running the IRC bot. The IRCBot class will be the bot
itself. The settings can be done manually or read from a config file. The
example config file shows some of the options. A simple implementation
of the bot would be:
myBot = IRCBot()
myBot.setInfoFromConfig('config')
myBot.start()
From there the bot will read the configuration from the config file and
do all the connections.
Use incoming() to receive text which will return an IRCMessage instance.
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import time
import socket
import random
import threading
# Class for parsing the IRC incoming messages.
# Not every message will be associated with every variable
class IRCMessage:
message = ''
prefix = ''
nick = ''
host = ''
serv = ''
IRCcmd = ''
IRCparams = []
body = ''
command = ''
argument = ''
def __init__(self, text):
if text.startswith(':'):
if len(text.split(' ')) < 2:
return
self.message = text.partition(':')[2]
self.prefix = self.message.split()[0]
if self.prefix.find('!') != -1 and self.prefix.find('@') != -1:
self.nick = self.prefix.split('!')[0]
self.host = self.prefix.split('!')[1].split('@')[0]
self.serv = self.prefix.split('!')[1].split('@')[0]
self.IRCcmd = self.message.split()[1]
self.IRCparams = []
for token in self.message.split(' ')[2:]:
if token.startswith(':'):
break
self.IRCparams.append(token)
if len(text.split(':')) > 2:
self.body = ':'.join(text.split(':')[2:])
self.command = self.body.split(' ')[0]
if len(self.body.split(' ')) > 1:
self.argument = self.body.partition(' ')[2]
# General needed information about the bot's state and other small strings
# This is where the data from the config file goes
# An instance of BotInfo is made inside the IRCBot class. There is no need
# to make one separately.
class _BotInfo:
server = None
channel = None
nick = None
password = None
mbid = None
sourceCode = None
joinmsg = None
green = False
greenNick = None
version = None
games = None
state = None
def __init__(self):
self.mbid = "9c9f1380-2516-4fc9-a3e6-f9f61941d090"
self.sourceCode = "http://waa.ai/4m8N"
self.version = "5.2.0"
self.joinmsg = False
self.games = True
self.state = False
def parseConfig(self, filename):
try:
with open(filename, 'r') as f:
configLines = f.read().splitlines()
configLines = list(configLines)
except IOError:
raise IOError("Error opening " + filename)
for line in configLines:
if line.startswith("#"):
continue
splitLine = line.split("=")
if len(splitLine) < 2:
continue
arg = splitLine[0].strip()
value = splitLine[1].strip()
if arg == "server":
self.server = value
elif arg == "channel":
self.channel = value
elif arg == "nick":
self.nick = value
elif arg == "password":
self.password = value
elif arg == "mbid":
self.mbid = value
elif arg == "source":
self.sourceCode = value
elif arg == "joinmsg":
if value == "on":
self.joinmsg = True
elif value == "off":
self.joinmsg = False
elif arg == "gamemode":
if value == "on":
self.games = True
elif value == "off":
self.games = False
elif arg == "state":
if value == "on":
self.state = True
elif value == "off":
self.state = False
def verifyConfig(self):
if self.server == None or self.nick == None or self.channel == None:
raise Exception("Server, nick, and channel required in config.")
if self.password == None:
print("Warning: No password entered.")
# A timer that stores a time delay (minutes or seconds) that can be checked
# until the localtime has surpassed the delay. Limits are the next highest time
# division (i.e. 1 minute or 1 hour).
class _Timer:
rawDelay = None # The amount of time delayed after an initialization
timeDelay = None # The second or minute on the clock calculated from rawDelay
timerType = None # "sec" or "min"
random = False
randRange = ()
loop = False
active = False
def __init__(self, ttype, delay, rand=False, rrange=(), loop=False):
self.rawDelay = delay
if ttype is not "sec" and ttype is not "min":
raise ValueError("type needs to be \"sec\" or \"min\"")
self.timerType = ttype
self.random = rand
self.randRange = rrange
self.loop = loop
self.__setDelay(self.rawDelay)
self.active = True
# If the loop is a randomly generated range, the raw delay is ignored.
def __setDelay(self, delay):
if self.random:
if len(self.randRange) != 2:
raise ValueError("Timer random range needs two specified values.")
delay = random.randint(self.randRange[0], self.randRange[1])
if self.timerType == "sec":
currentSec = time.localtime()[5]
if (currentSec + delay) > 61:
self.timeDelay = (currentSec + delay) - 61
else:
self.timeDelay = currentSec + delay
elif self.timerType == "min":
currentMin = time.localtime()[4]
if (currentMin + delay) > 59:
self.timeDelay = (currentMin + delay) - 59
else:
self.timeDelay = currentMin + delay
def check(self):
if not self.active:
return False
if self.timerType == "sec":
state = time.localtime()[5] == self.timeDelay
elif self.timerType == "min":
state = time.localtime()[4] == self.timeDelay
if state:
if self.loop:
self.__setDelay(self.rawDelay)
else:
self.active = False
return state
# The main loop for bot timers. This will be run in a different thread to
# run in parallel with the main bot.
class _BotTimers:
__timerList = []
# These are accesed from outside to change
initialized = False # Controls the main loop
active = False # Controls whether timer functions will run
def __init__(self):
self.thread = threading.Thread(target=self.__loop, args=())
self.initialized = True
def begin(self):
self.active = True
self.thread.start()
# Creates a timer class, that when reached will activate a callback function
def addTimer(self, ttype, delay, callback,
args, _rand=False, _rrange=(), _loop=False):
t_entry = {}
timer = _Timer(ttype, delay, rand=_rand, rrange=_rrange, loop=_loop)
t_entry['timer'] = timer
t_entry['callback'] = callback
t_entry['args'] = args
self.__timerList.append(t_entry)
def __loop(self):
while self.initialized:
del_q = []
for entry in self.__timerList:
if entry['timer'].check() and self.active:
entry['callback'](entry['args'])
if not entry['timer'].active:
del_q.append(entry)
for e in del_q:
self.__timerList.remove(e)
time.sleep(1)
# General class for the IRC connection. Contains all join and messaging commands
class IRCBot:
__chat = None # Socket connection.
__info = None # BotInfo instance.
userlist = [] # All the users in the channel.
modlist = [] # All the elevated users, hop and higher.
owners = [] # All the owners (will typically be just one if any).
def __init__(self):
self.__chat = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__info = _BotInfo()
def setInfoFromConfig(self, filename):
self.__info.parseConfig(filename)
# The 'getter' functions for interfacing with the info instance
def currentServer(self):
return self.__info.server
def channel(self):
return self.__info.channel
def nick(self):
return self.__info.nick
def mbid(self):
return self.__info.mbid
def source(self):
return self.__info.sourceCode
def joinmsg(self):
return self.__info.joinmsg
def greenActive(self):
return self.__info.green
def checkGreen(self, nick):
return self.__info.greenNick == nick
def version(self):
return self.__info.version
def isAwake(self):
return self.__info.state
def gamesActive(self):
return self.__info.games
# 'Setter' functions for the info instance.
def greenNick(self, nick):
self.__info.greenNick = nick
def greenOn(self):
self.__info.green = True
def greenOff(self):
self.__info.green = False
def wake(self):
self.__info.state = True
def sleep(self):
self.__info.state = False
def activateGames(self):
self.__info.games = True
def deactivateGames(self):
self.__info.games = False
def activateJoinMsg(self):
self.__info.joinmsg = True
def deactivateJoinMsg(self):
self.__info.joinmsg = False
# Timer controls
def initializeTimers(self):
self.__timers = _BotTimers()
def timersInitialized(self):
return self.__timers is not None
def startTimers(self):
self.__timers.begin()
def pauseTimers(self):
self.__timers.active = False
def resumeTimers(self):
self.__timers.active = True
def killTimers(self):
self.__timers.initialized = False
def addTimer(self, ttype, delay, callback,
args, rand=False, rrange=(), loop=False):
self.__timers.addTimer(ttype, delay, callback, args,
_rand=rand, _rrange=rrange, _loop=loop)
def start(self):
self.__info.verifyConfig()
print("Connecting to %s." % self.__info.server)
self.__connectServer(self.__info.server)
self.__identify(self.__info.password)
self.__connectNick(self.__info.nick)
self.__connectUser(self.__info.nick, "Hello")
# Here we wait until the server prompts to enter the password.
unregistered = True
while unregistered:
text = ''
try:
text = self.__getText()
try:
print(text)
except UnicodeEncodeError:
None
except socket.timeout:
None
if text.lower().find('identify') != -1:
unregistered = False
self.__identify(self.__info.password)
print("Bot nick set as %s." % self.__info.nick)
print("Joining channel %s." % self.__info.channel)
self.__join(self.__info.channel)
# Channel setup.
def setUser(self, nick):
self.__info.nick = nick
def setChannel(self, chan):
self.__info.channel = chan
def setServer(self, serv):
self.__info.server = serv
def setPassword(self, password):
self.__info.password = password
# Internal connection initializations.
def __connectServer(self, server):
self.__chat.connect((server, 6667))
def __connectUser(self, name, message):
self.__chat.send(("USER %s botnick botnick :%s\r\n" % (name, message)).encode('utf-8'))
def __connectNick(self, nick):
self.__chat.send(("NICK %s\r\n" % nick).encode('utf-8'))
def __identify(self, password):
self.__chat.send(("PRIVMSG nickserv :identify %s\r\n" % password).encode('utf-8'))
def __join(self, channel):
self.__chat.send(("JOIN %s\r\n" % channel).encode('utf-8'))
def __getText(self):
return self.__chat.recv(2048).decode('utf-8')
# Formats and prepares an IRCMessage instance to return.
# Also updates the userlists if applicable.
def incoming(self):
text = self.__getText().strip()
if text.find("PING") != -1:
self.__chat.send(("PONG %s\r\n" % (text.split()[1])).encode('utf-8'))
else:
try:
print(text)
except UnicodeDecoreError:
None
for line in text.split('\n'):
message = IRCMessage(line)
if message.IRCcmd == '353':
self.__setUserList(message)
elif message.IRCcmd == 'PART' or message.IRCcmd == 'QUIT':
self.__removeUser(message.nick)
elif message.IRCcmd == 'JOIN':
self.__appendUser(message.nick)
elif message.IRCcmd == 'MODE':
self.__updateUser(message.IRCparams)
# Only returns the last message for checking
return message
# Bot interaction commands.
def msg(self, message):
self.__chat.send(("PRIVMSG %s :%s\r\n" % (self.__info.channel, message)).encode('utf-8'))
def kick(self, nick, message):
self.__chat.send(("KICK %s %s :%s\r\n" % (self.__info.channel, nick, message)).encode('utf-8'))
def quitirc(self, message):
self.__chat.send(("QUIT :Quit %s\r\n" % message).encode('utf-8'))
def memo(self, nick, message):
self.__chat.send(("PRIVMSG memoserv :send %s %s\r\n"
% (nick, message)).encode('utf-8'))
def action(self, message):
self.msg("\x01ACTION %s" % message)
# Userlist commands
def __appendUser(self, nick):
if len(nick) < 1:
return
nick = nick.strip()
if nick[0] in ('%', '@', '&'):
nick = nick[1:]
if nick not in self.modlist:
self.modlist.append(nick)
elif nick[0] == '~':
nick = nick[1:]
if nick not in self.modlist or nick not in self.owners:
self.modlist.append(nick)
self.owners.append(nick)
if nick not in self.userlist:
self.userlist.append(nick)
def __removeUser(self, nick):
nick = nick.strip()
if nick in self.userlist:
self.userlist.remove(nick)
if nick in self.modlist:
self.modlist.remove(nick)
if nick in self.owners:
self.owners.remove(nick)
def __updateUser(self, IRCparams):
if len(IRCparams) < 3:
return
modeset = IRCparams[1]
nick = IRCparams[2].strip()
if modeset.startswith('+'):
if 'o' in modeset or 'a' in modeset or 'h' in modeset:
if nick not in self.modlist:
self.modlist.append(nick)
if 'q' in modeset:
if nick not in self.owners:
self.owners.append(nick)
elif modeset.startswith('-'):
if 'o' in modeset or 'a' in modeset or 'h' in modeset:
if nick in self.modlist:
self.modlist.remove(nick)
elif 'q' in modeset:
if nick in self.owners:
self.owners.remove(nick)
def __setUserList(self, text):
for nick in text.body.split(' '):
self.__appendUser(nick)
|
test_bz2.py | from test import support
from test.support import bigmemtest, _4G
import unittest
from io import BytesIO, DEFAULT_BUFFER_SIZE
import os
import pickle
import glob
import pathlib
import random
import shutil
import subprocess
import threading
from test.support import unlink
import _compression
import sys
# Skip tests if the bz2 module doesn't exist.
bz2 = support.import_module('bz2')
from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor
has_cmdline_bunzip2 = None
def ext_decompress(data):
global has_cmdline_bunzip2
if has_cmdline_bunzip2 is None:
has_cmdline_bunzip2 = bool(shutil.which('bunzip2'))
if has_cmdline_bunzip2:
return subprocess.check_output(['bunzip2'], input=data)
else:
return bz2.decompress(data)
class BaseTest(unittest.TestCase):
"Base for other testcases."
TEXT_LINES = [
b'root:x:0:0:root:/root:/bin/bash\n',
b'bin:x:1:1:bin:/bin:\n',
b'daemon:x:2:2:daemon:/sbin:\n',
b'adm:x:3:4:adm:/var/adm:\n',
b'lp:x:4:7:lp:/var/spool/lpd:\n',
b'sync:x:5:0:sync:/sbin:/bin/sync\n',
b'shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\n',
b'halt:x:7:0:halt:/sbin:/sbin/halt\n',
b'mail:x:8:12:mail:/var/spool/mail:\n',
b'news:x:9:13:news:/var/spool/news:\n',
b'uucp:x:10:14:uucp:/var/spool/uucp:\n',
b'operator:x:11:0:operator:/root:\n',
b'games:x:12:100:games:/usr/games:\n',
b'gopher:x:13:30:gopher:/usr/lib/gopher-data:\n',
b'ftp:x:14:50:FTP User:/var/ftp:/bin/bash\n',
b'nobody:x:65534:65534:Nobody:/home:\n',
b'postfix:x:100:101:postfix:/var/spool/postfix:\n',
b'niemeyer:x:500:500::/home/niemeyer:/bin/bash\n',
b'postgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\n',
b'mysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\n',
b'www:x:103:104::/var/www:/bin/false\n',
]
TEXT = b''.join(TEXT_LINES)
DATA = b'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
EMPTY_DATA = b'BZh9\x17rE8P\x90\x00\x00\x00\x00'
BAD_DATA = b'this is not a valid bzip2 file'
# Some tests need more than one block of uncompressed data. Since one block
# is at least 100,000 bytes, we gather some data dynamically and compress it.
# Note that this assumes that compression works correctly, so we cannot
# simply use the bigger test data for all tests.
test_size = 0
BIG_TEXT = bytearray(128*1024)
for fname in glob.glob(os.path.join(os.path.dirname(__file__), '*.py')):
with open(fname, 'rb') as fh:
test_size += fh.readinto(memoryview(BIG_TEXT)[test_size:])
if test_size > 128*1024:
break
BIG_DATA = bz2.compress(BIG_TEXT, compresslevel=1)
def setUp(self):
self.filename = support.TESTFN
def tearDown(self):
if os.path.isfile(self.filename):
os.unlink(self.filename)
class BZ2FileTest(BaseTest):
"Test the BZ2File class."
def createTempFile(self, streams=1, suffix=b""):
with open(self.filename, "wb") as f:
f.write(self.DATA * streams)
f.write(suffix)
def testBadArgs(self):
self.assertRaises(TypeError, BZ2File, 123.456)
self.assertRaises(ValueError, BZ2File, os.devnull, "z")
self.assertRaises(ValueError, BZ2File, os.devnull, "rx")
self.assertRaises(ValueError, BZ2File, os.devnull, "rbt")
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=0)
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=10)
def testRead(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
def testReadBadFile(self):
self.createTempFile(streams=0, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertRaises(OSError, bz2f.read)
def testReadMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testReadMonkeyMultiStream(self):
# Test BZ2File.read() on a multi-stream archive where a stream
# boundary coincides with the end of the raw read buffer.
buffer_size = _compression.BUFFER_SIZE
_compression.BUFFER_SIZE = len(self.DATA)
try:
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
finally:
_compression.BUFFER_SIZE = buffer_size
def testReadTrailingJunk(self):
self.createTempFile(suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT)
def testReadMultiStreamTrailingJunk(self):
self.createTempFile(streams=5, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testRead0(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(0), b"")
def testReadChunk10(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT)
def testReadChunk10MultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT * 5)
def testRead100(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(100), self.TEXT[:100])
def testPeek(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testReadInto(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
n = 128
b = bytearray(n)
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b, self.TEXT[:n])
n = len(self.TEXT) - n
b = bytearray(len(self.TEXT))
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b[:n], self.TEXT[-n:])
def testReadLine(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES:
self.assertEqual(bz2f.readline(), line)
def testReadLineMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES * 5:
self.assertEqual(bz2f.readline(), line)
def testReadLines(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES)
def testReadLinesMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES * 5)
def testIterator(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES)
def testIteratorMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES * 5)
def testClosedIteratorDeadlock(self):
# Issue #3309: Iteration on a closed BZ2File should release the lock.
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.close()
self.assertRaises(ValueError, next, bz2f)
# This call will deadlock if the above call failed to release the lock.
self.assertRaises(ValueError, bz2f.readlines)
def testWrite(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteChunks10(self):
with BZ2File(self.filename, "w") as bz2f:
n = 0
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
bz2f.write(str)
n += 1
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteNonDefaultCompressLevel(self):
expected = bz2.compress(self.TEXT, compresslevel=5)
with BZ2File(self.filename, "w", compresslevel=5) as bz2f:
bz2f.write(self.TEXT)
with open(self.filename, "rb") as f:
self.assertEqual(f.read(), expected)
def testWriteLines(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.writelines)
bz2f.writelines(self.TEXT_LINES)
# Issue #1535500: Calling writelines() on a closed BZ2File
# should raise an exception.
self.assertRaises(ValueError, bz2f.writelines, ["a"])
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteMethodsOnReadOnlyFile(self):
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(b"abc")
with BZ2File(self.filename, "r") as bz2f:
self.assertRaises(OSError, bz2f.write, b"a")
self.assertRaises(OSError, bz2f.writelines, [b"a"])
def testAppend(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with BZ2File(self.filename, "a") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT * 2)
def testSeekForward(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekForwardAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(len(self.TEXT) + 150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwards(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def testSeekBackwardsAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
readto = len(self.TEXT) + 100
while readto > 0:
readto -= len(bz2f.read(readto))
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[100-150:] + self.TEXT)
def testSeekBackwardsFromEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150, 2)
self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])
def testSeekBackwardsFromEndAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-1000, 2)
self.assertEqual(bz2f.read(), (self.TEXT * 2)[-1000:])
def testSeekPostEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwice(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwiceMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPreStart(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT)
def testSeekPreStartMultiStream(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT * 2)
def testFileno(self):
self.createTempFile()
with open(self.filename, 'rb') as rawf:
bz2f = BZ2File(rawf)
try:
self.assertEqual(bz2f.fileno(), rawf.fileno())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.fileno)
def testSeekable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.seekable())
bz2f.read()
self.assertTrue(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
src = BytesIO(self.DATA)
src.seekable = lambda: False
bz2f = BZ2File(src)
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
def testReadable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.readable())
bz2f.read()
self.assertTrue(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
def testWritable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertFalse(bz2f.writable())
bz2f.read()
self.assertFalse(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertTrue(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
def testOpenDel(self):
self.createTempFile()
for i in range(10000):
o = BZ2File(self.filename)
del o
def testOpenNonexistent(self):
self.assertRaises(OSError, BZ2File, "/non/existent")
def testReadlinesNoNewline(self):
# Issue #1191043: readlines() fails on a file containing no newline.
data = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t'
with open(self.filename, "wb") as f:
f.write(data)
with BZ2File(self.filename) as bz2f:
lines = bz2f.readlines()
self.assertEqual(lines, [b'Test'])
with BZ2File(self.filename) as bz2f:
xlines = list(bz2f.readlines())
self.assertEqual(xlines, [b'Test'])
def testContextProtocol(self):
f = None
with BZ2File(self.filename, "wb") as f:
f.write(b"xxx")
f = BZ2File(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with BZ2File(self.filename, "wb") as f:
1/0
except ZeroDivisionError:
pass
else:
self.fail("1/0 didn't raise an exception")
def testThreading(self):
# Issue #7205: Using a BZ2File from several threads shouldn't deadlock.
data = b"1" * 2**20
nthreads = 10
with BZ2File(self.filename, 'wb') as f:
def comp():
for i in range(5):
f.write(data)
threads = [threading.Thread(target=comp) for i in range(nthreads)]
with support.start_threads(threads):
pass
def testMixedIterationAndReads(self):
self.createTempFile()
linelen = len(self.TEXT_LINES[0])
halflen = linelen // 2
with BZ2File(self.filename) as bz2f:
bz2f.read(halflen)
self.assertEqual(next(bz2f), self.TEXT_LINES[0][halflen:])
self.assertEqual(bz2f.read(), self.TEXT[linelen:])
with BZ2File(self.filename) as bz2f:
bz2f.readline()
self.assertEqual(next(bz2f), self.TEXT_LINES[1])
self.assertEqual(bz2f.readline(), self.TEXT_LINES[2])
with BZ2File(self.filename) as bz2f:
bz2f.readlines()
self.assertRaises(StopIteration, next, bz2f)
self.assertEqual(bz2f.readlines(), [])
def testMultiStreamOrdering(self):
# Test the ordering of streams when reading a multi-stream archive.
data1 = b"foo" * 1000
data2 = b"bar" * 1000
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(data1)
with BZ2File(self.filename, "a") as bz2f:
bz2f.write(data2)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), data1 + data2)
def testOpenBytesFilename(self):
str_filename = self.filename
try:
bytes_filename = str_filename.encode("ascii")
except UnicodeEncodeError:
self.skipTest("Temporary file name needs to be ASCII")
with BZ2File(bytes_filename, "wb") as f:
f.write(self.DATA)
with BZ2File(bytes_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
# Sanity check that we are actually operating on the right file.
with BZ2File(str_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testOpenPathLikeFilename(self):
filename = pathlib.Path(self.filename)
with BZ2File(filename, "wb") as f:
f.write(self.DATA)
with BZ2File(filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testDecompressLimited(self):
"""Decompressed data buffering should be limited"""
bomb = bz2.compress(b'\0' * int(2e6), compresslevel=9)
self.assertLess(len(bomb), _compression.BUFFER_SIZE)
decomp = BZ2File(BytesIO(bomb))
self.assertEqual(decomp.read(1), b'\0')
max_decomp = 1 + DEFAULT_BUFFER_SIZE
self.assertLessEqual(decomp._buffer.raw.tell(), max_decomp,
"Excessive amount of data was decompressed")
# Tests for a BZ2File wrapping another file object:
def testReadBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
self.assertFalse(bio.closed)
def testPeekBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testWriteBytesIO(self):
with BytesIO() as bio:
with BZ2File(bio, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
self.assertEqual(ext_decompress(bio.getvalue()), self.TEXT)
self.assertFalse(bio.closed)
def testSeekForwardBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwardsBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def test_read_truncated(self):
# Drop the eos_magic field (6 bytes) and CRC (4 bytes).
truncated = self.DATA[:-10]
with BZ2File(BytesIO(truncated)) as f:
self.assertRaises(EOFError, f.read)
with BZ2File(BytesIO(truncated)) as f:
self.assertEqual(f.read(len(self.TEXT)), self.TEXT)
self.assertRaises(EOFError, f.read, 1)
# Incomplete 4-byte file header, and block header of at least 146 bits.
for i in range(22):
with BZ2File(BytesIO(truncated[:i])) as f:
self.assertRaises(EOFError, f.read, 1)
class BZ2CompressorTest(BaseTest):
def testCompress(self):
bz2c = BZ2Compressor()
self.assertRaises(TypeError, bz2c.compress)
data = bz2c.compress(self.TEXT)
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
bz2c = BZ2Compressor()
data = bz2c.compress(b'')
data += bz2c.flush()
self.assertEqual(data, self.EMPTY_DATA)
def testCompressChunks10(self):
bz2c = BZ2Compressor()
n = 0
data = b''
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
data += bz2c.compress(str)
n += 1
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
@bigmemtest(size=_4G + 100, memuse=2)
def testCompress4G(self, size):
# "Test BZ2Compressor.compress()/flush() with >4GiB input"
bz2c = BZ2Compressor()
data = b"x" * size
try:
compressed = bz2c.compress(data)
compressed += bz2c.flush()
finally:
data = None # Release memory
data = bz2.decompress(compressed)
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b"x")), 0)
finally:
data = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Compressor(), proto)
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
def testDecompress(self):
bz2d = BZ2Decompressor()
self.assertRaises(TypeError, bz2d.decompress)
text = bz2d.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressChunks10(self):
bz2d = BZ2Decompressor()
text = b''
n = 0
while True:
str = self.DATA[n*10:(n+1)*10]
if not str:
break
text += bz2d.decompress(str)
n += 1
self.assertEqual(text, self.TEXT)
def testDecompressUnusedData(self):
bz2d = BZ2Decompressor()
unused_data = b"this is unused data"
text = bz2d.decompress(self.DATA+unused_data)
self.assertEqual(text, self.TEXT)
self.assertEqual(bz2d.unused_data, unused_data)
def testEOFError(self):
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, b"anything")
self.assertRaises(EOFError, bz2d.decompress, b"")
@bigmemtest(size=_4G + 100, memuse=3.3)
def testDecompress4G(self, size):
# "Test BZ2Decompressor.decompress() with >4GiB input"
blocksize = 10 * 1024 * 1024
block = random.getrandbits(blocksize * 8).to_bytes(blocksize, 'little')
try:
data = block * (size // blocksize + 1)
compressed = bz2.compress(data)
bz2d = BZ2Decompressor()
decompressed = bz2d.decompress(compressed)
self.assertTrue(decompressed == data)
finally:
data = None
compressed = None
decompressed = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Decompressor(), proto)
def testDecompressorChunksMaxsize(self):
bzd = BZ2Decompressor()
max_length = 100
out = []
# Feed some input
len_ = len(self.BIG_DATA) - 64
out.append(bzd.decompress(self.BIG_DATA[:len_],
max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data without providing more input
out.append(bzd.decompress(b'', max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data while providing more input
out.append(bzd.decompress(self.BIG_DATA[len_:],
max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
# Retrieve remaining uncompressed data
while not bzd.eof:
out.append(bzd.decompress(b'', max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
out = b"".join(out)
self.assertEqual(out, self.BIG_TEXT)
self.assertEqual(bzd.unused_data, b"")
def test_decompressor_inputbuf_1(self):
# Test reusing input buffer after moving existing
# contents to beginning
bzd = BZ2Decompressor()
out = []
# Create input buffer and fill it
self.assertEqual(bzd.decompress(self.DATA[:100],
max_length=0), b'')
# Retrieve some results, freeing capacity at beginning
# of input buffer
out.append(bzd.decompress(b'', 2))
# Add more data that fits into input buffer after
# moving existing data to beginning
out.append(bzd.decompress(self.DATA[100:105], 15))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[105:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_2(self):
# Test reusing input buffer by appending data at the
# end right away
bzd = BZ2Decompressor()
out = []
# Create input buffer and empty it
self.assertEqual(bzd.decompress(self.DATA[:200],
max_length=0), b'')
out.append(bzd.decompress(b''))
# Fill buffer with new data
out.append(bzd.decompress(self.DATA[200:280], 2))
# Append some more data, not enough to require resize
out.append(bzd.decompress(self.DATA[280:300], 2))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_3(self):
# Test reusing input buffer after extending it
bzd = BZ2Decompressor()
out = []
# Create almost full input buffer
out.append(bzd.decompress(self.DATA[:200], 5))
# Add even more data to it, requiring resize
out.append(bzd.decompress(self.DATA[200:300], 5))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_failure(self):
bzd = BZ2Decompressor()
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
# Previously, a second call could crash due to internal inconsistency
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
@support.refcount_test
def test_refleaks_in___init__(self):
gettotalrefcount = support.get_attribute(sys, 'gettotalrefcount')
bzd = BZ2Decompressor()
refs_before = gettotalrefcount()
for i in range(100):
bzd.__init__()
self.assertAlmostEqual(gettotalrefcount() - refs_before, 0, delta=10)
class CompressDecompressTest(BaseTest):
def testCompress(self):
data = bz2.compress(self.TEXT)
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
text = bz2.compress(b'')
self.assertEqual(text, self.EMPTY_DATA)
def testDecompress(self):
text = bz2.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressEmpty(self):
text = bz2.decompress(b"")
self.assertEqual(text, b"")
def testDecompressToEmptyString(self):
text = bz2.decompress(self.EMPTY_DATA)
self.assertEqual(text, b'')
def testDecompressIncomplete(self):
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
def testDecompressBadData(self):
self.assertRaises(OSError, bz2.decompress, self.BAD_DATA)
def testDecompressMultiStream(self):
text = bz2.decompress(self.DATA * 5)
self.assertEqual(text, self.TEXT * 5)
def testDecompressTrailingJunk(self):
text = bz2.decompress(self.DATA + self.BAD_DATA)
self.assertEqual(text, self.TEXT)
def testDecompressMultiStreamTrailingJunk(self):
text = bz2.decompress(self.DATA * 5 + self.BAD_DATA)
self.assertEqual(text, self.TEXT * 5)
class OpenTest(BaseTest):
"Test the open function."
def open(self, *args, **kwargs):
return bz2.open(*args, **kwargs)
def test_binary_modes(self):
for mode in ("wb", "xb"):
if mode == "xb":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "rb") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "ab") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_implicit_binary_modes(self):
# Test implicit binary modes (no "b" or "t" in mode string).
for mode in ("w", "x"):
if mode == "x":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "a") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_text_modes(self):
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
for mode in ("wt", "xt"):
if mode == "xt":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt") as f:
self.assertEqual(f.read(), text)
with self.open(self.filename, "at") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol * 2)
def test_x_mode(self):
for mode in ("x", "xb", "xt"):
unlink(self.filename)
with self.open(self.filename, mode) as f:
pass
with self.assertRaises(FileExistsError):
with self.open(self.filename, mode) as f:
pass
def test_fileobj(self):
with self.open(BytesIO(self.DATA), "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(BytesIO(self.DATA), "rb") as f:
self.assertEqual(f.read(), self.TEXT)
text = self.TEXT.decode("ascii")
with self.open(BytesIO(self.DATA), "rt") as f:
self.assertEqual(f.read(), text)
def test_bad_params(self):
# Test invalid parameter combinations.
self.assertRaises(ValueError,
self.open, self.filename, "wbt")
self.assertRaises(ValueError,
self.open, self.filename, "xbt")
self.assertRaises(ValueError,
self.open, self.filename, "rb", encoding="utf-8")
self.assertRaises(ValueError,
self.open, self.filename, "rb", errors="ignore")
self.assertRaises(ValueError,
self.open, self.filename, "rb", newline="\n")
def test_encoding(self):
# Test non-default encoding.
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
with self.open(self.filename, "wt", encoding="utf-16-le") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("utf-16-le")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt", encoding="utf-16-le") as f:
self.assertEqual(f.read(), text)
def test_encoding_error_handler(self):
# Test with non-default encoding error handler.
with self.open(self.filename, "wb") as f:
f.write(b"foo\xffbar")
with self.open(self.filename, "rt", encoding="ascii", errors="ignore") \
as f:
self.assertEqual(f.read(), "foobar")
def test_newline(self):
# Test with explicit newline (universal newline mode disabled).
text = self.TEXT.decode("ascii")
with self.open(self.filename, "wt", newline="\n") as f:
f.write(text)
with self.open(self.filename, "rt", newline="\r") as f:
self.assertEqual(f.readlines(), [text])
def test_main():
support.run_unittest(
BZ2FileTest,
BZ2CompressorTest,
BZ2DecompressorTest,
CompressDecompressTest,
OpenTest,
)
support.reap_children()
if __name__ == '__main__':
test_main()
|
color_picker.py | import math
import time
import win32api
import win32gui
from threading import Thread
from PySide2.QtCore import QEvent, Signal
from PySide2.QtWidgets import QHBoxLayout, QLabel, QVBoxLayout, QWidget
from desktopmagic.screengrab_win32 import getRectAsImage
class ColorModel:
def __init__(self, r, g, b, h, s, v):
self.red = r
self.green = g
self.blue = b
self.hue = h
self.saturation = s
self.value = v
def update(self, type, value):
self.__setattr__(type, value)
def get_by_type(self, type):
return self.__getattribute__(type)
class ColorSys:
@staticmethod
def hsv_to_rgb(h, s, v):
h = float(h)
s = float(s)
v = float(v)
h60 = h / 60.0
h60f = math.floor(h60)
hi = int(h60f) % 6
f = h60 - h60f
p = v * (1 - s)
q = v * (1 - f * s)
t = v * (1 - (1 - f) * s)
r, g, b = 0, 0, 0
if hi == 0:
r, g, b = v, t, p
elif hi == 1:
r, g, b = q, v, p
elif hi == 2:
r, g, b = p, v, t
elif hi == 3:
r, g, b = p, q, v
elif hi == 4:
r, g, b = t, p, v
elif hi == 5:
r, g, b = v, p, q
r, g, b = int(r * 255), int(g * 255), int(b * 255)
return r, g, b
@staticmethod
def rgb_to_hsv(r, g, b):
r, g, b = r / 255.0, g / 255.0, b / 255.0
mx = max(r, g, b)
mn = min(r, g, b)
df = mx - mn
if mx == mn:
h = 0
elif mx == r:
h = (60 * ((g - b) / df) + 360) % 360
elif mx == g:
h = (60 * ((b - r) / df) + 120) % 360
elif mx == b:
h = (60 * ((r - g) / df) + 240) % 360
if mx == 0:
s = 0
else:
s = df / mx
v = mx
return h, s, v
class ColorPicker:
def __init__(self, color_signal, mouse_signal, close_signal, size):
self.COLOR_SIGNAL = color_signal
self.MOUSE_SIGNAL = mouse_signal
self.CLOSE_SIGNAL = close_signal
self.size = size
def screen_shot(self, rect=None):
return getRectAsImage(rect)
def get_rgb(self, im, x, y):
rgb_im = im.convert('RGB')
r, g, b = rgb_im.getpixel((x, y))
h, s, v = ColorSys.rgb_to_hsv(r, g, b)
return ColorModel(r, g, b, h, s, v)
def get_position(self):
flags, hcursor, x_y = win32gui.GetCursorInfo()
return x_y[0], x_y[1]
def get_screen_rgb(self):
x, y = self.get_position()
self.MOUSE_SIGNAL.emit(x, y)
rect = (x, y, x + self.size, y + self.size)
im = self.screen_shot(rect)
color_selection = []
for y in range(0, self.size):
for x in range(0, self.size):
color_selection.append(self.get_rgb(im, x, y))
self.COLOR_SIGNAL.emit(color_selection)
def start(self):
state_left = win32api.GetKeyState(0x01) # Left button down = 0 or 1. Button up = -127 or -128
state_right = win32api.GetKeyState(0x02) # Right button down = 0 or 1. Button up = -127 or -128
state = None
while True:
a = win32api.GetKeyState(0x01)
b = win32api.GetKeyState(0x02)
if state_right == b and a == state_left:
self.get_screen_rgb()
else:
if a == -127 or a == -128:
self.get_screen_rgb()
state = True
elif b == -127 or b == -128:
state = False
break
time.sleep(0.005)
self.CLOSE_SIGNAL.emit(state)
time.sleep(5)
class ColorMouseWidget(QWidget):
COLOR_SIGNAL = Signal(list)
CLOSE_SIGNAL = Signal(bool)
MOUSE_POSITION = Signal(int, int)
def __init__(self, size=5, paint_signal=None):
QWidget.__init__(self)
self.setFixedSize(75, 75)
self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)
self.label_list = []
self.setLayout(QVBoxLayout())
self.layout().setSpacing(0)
self.layout().setMargin(0)
self.setup_ui(size)
self.PAINT_FATHER_SIGNAL = paint_signal
self.setup_signals()
self.setMouseTracking(True)
self.get_color(size)
# self.setColor()
def setup_ui(self, size):
optionContentLayout = QHBoxLayout()
optionContentLayout.setMargin(0)
optionContentLayout.setSpacing(0)
colorLabelLayout = QVBoxLayout()
colorLabelLayout.setMargin(0)
colorLabelLayout.setSpacing(0)
for y in range(size):
layout = QHBoxLayout()
layout.setMargin(0)
layout.setSpacing(0)
for x in range(size):
self.label_list.append(QLabel(''))
layout.addWidget(self.label_list[len(self.label_list) - 1])
colorLabelLayout.addLayout(layout)
optionContentLayout.addLayout(colorLabelLayout)
self.layout().addLayout(optionContentLayout)
def reset(self):
self.slider.setValue(int(self.default_value))
def setup_signals(self):
self.MOUSE_POSITION.connect(self.location_on_the_screen)
self.COLOR_SIGNAL.connect(self.colorize)
self.CLOSE_SIGNAL.connect(self.pick_color)
def eventFilter(self, source, event):
if event.type() == QEvent.Wheel:
return True
return super(ColorMouseWidget, self).eventFilter(source, event)
def colorize(self, colors):
i = 0
self.colors = colors
for color in colors:
self.label_list[i].setStyleSheet(
f'margin:-1px;background:rgb({color.red},{color.green},{color.blue});{"border:3px solid white;" if int((len(colors)-1)/2) == i else "border:2px solid black"}')
i += 1
def pick_color(self, btn_clicked):
if btn_clicked:
# return colors for the father widget
# self.PAINT_FATHER_SIGNAL.emit(self.colors)
print('Color was selected with success.')
else:
print('Color picker was canceled')
self.close()
def get_color(self, size):
Thread(target=lambda: ColorPicker(self.COLOR_SIGNAL, self.MOUSE_POSITION, self.CLOSE_SIGNAL, size).start(),
daemon=True).start()
def location_on_the_screen(self, x, y):
self.move(x + 10, y + 10)
import sys
from PySide2.QtCore import Qt
from PySide2.QtWidgets import QApplication
if __name__ == '__main__':
app = QApplication(sys.argv)
dialog_1 = ColorMouseWidget()
dialog_1.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)
dialog_1.show()
app.exec_()
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 29663
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
main.py | # -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""The Evaluate Service of the service."""
import os
import logging
try:
import flask
import flask_restful
import flask_limiter
import werkzeug
import gevent
except Exception:
logging.warning(
"The dependencies [Flask==1.1.2,Flask-RESTful==0.3.8, Werkzeug==1.0.1 ] have not been install, \
and will install it automatically, if failed, please install it manually.")
os.system("pip3 install Flask==1.1.2")
os.system("pip3 install Flask-RESTful==0.3.8")
os.system("pip3 install Flask-Limiter==1.4")
os.system("pip3 install Werkzeug==1.0.1")
os.system("pip3 install gevent")
from flask import abort, Flask, request, Response
from flask_restful import Resource, Api
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
try:
from werkzeug import secure_filename
except Exception:
from werkzeug.utils import secure_filename
import glob
import multiprocessing
import time
import shutil
from evaluate_service.class_factory import ClassFactory
from .hardwares import * # noqa F401
import datetime
import traceback
import argparse
from .run_flask import run_flask, get_white_list, get_request_frequency_limit
app = Flask(__name__)
api = Api(app)
limiter = Limiter(
app,
key_func=get_remote_address,
default_limits=["100/minute"]
)
MAX_EVAL_EPOCHS = 10000
@app.before_request
def limit_remote_addr():
"""Set limit remote address."""
client_ip = str(request.remote_addr)
white_list = get_white_list()
if white_list and client_ip not in white_list:
abort(403)
class Evaluate(Resource):
"""Evaluate Service for service."""
decorators = [limiter.limit(get_request_frequency_limit)]
def __init__(self):
self.result = {"latency": "9999", "out_data": [], "status": "sucess", "timestamp": "", "error_message": ""}
@classmethod
def _add_params(cls, work_path, optional_params):
cls.current_path = work_path
cls.optional_params = optional_params
def post(self):
"""Interface to response to the post request of the client."""
try:
self.parse_paras()
self.upload_files()
self.hardware_instance = ClassFactory.get_cls(self.hardware)(self.optional_params)
except Exception:
self.result["status"] = "Params error."
self.result["error_message"] = traceback.format_exc()
logging.error("[ERROR] Params error!")
traceback.print_exc()
return self.result
if self.reuse_model == "True":
logging.warning("Reuse the model, no need to convert the model.")
else:
try:
self.hardware_instance.convert_model(backend=self.backend, model=self.model, weight=self.weight,
save_dir=self.share_dir, input_shape=self.input_shape,
out_nodes=self.out_nodes, precision=self.precision)
except Exception:
self.result["status"] = "Model convert failed."
self.result["error_message"] = traceback.format_exc()
logging.error("[ERROR] Model convert failed!")
traceback.print_exc()
return self.result
try:
latency_sum = 0
for repeat in range(min(self.repeat_times, 10)):
latency, output = self.hardware_instance.inference(converted_model=self.share_dir,
input_data=self.input_data)
latency_sum += float(latency)
self.result["latency"] = latency_sum / self.repeat_times
self.result["out_data"] = output
except Exception:
self.result["status"] = "Inference failed."
self.result["error_message"] = traceback.format_exc()
logging.error("[ERROR] Inference failed! ")
traceback.print_exc()
return self.result
def parse_paras(self):
"""Parse the parameters in the request from the client."""
self.backend = request.form["backend"]
self.hardware = request.form["hardware"]
self.reuse_model = request.form["reuse_model"]
self.job_id = self._check_get_job_id(request.form["job_id"])
self.input_shape = request.form.get("input_shape", type=str, default="")
self.out_nodes = request.form.get("out_nodes", type=str, default="")
self.repeat_times = self._check_get_repeat_times(request.form.get("repeat_times"))
self.precision = request.form.get("precision", type=str, default="FP32")
@staticmethod
def _check_get_repeat_times(repeat_times):
"""Check validation of input repeat_times."""
_repeat_times = repeat_times
try:
_repeat_times = int(_repeat_times)
except ValueError:
logging.warning("repeat_times {} is not a valid integer".format(_repeat_times))
abort(400, "repeat_times {} is not a valid integer".format(_repeat_times))
if not 0 < _repeat_times <= MAX_EVAL_EPOCHS:
logging.warning("repeat_times {} is not in valid range (1-{})".format(_repeat_times, MAX_EVAL_EPOCHS))
abort(400, "repeat_times {} is not in valid range (1-{})".format(_repeat_times, MAX_EVAL_EPOCHS))
return _repeat_times
@staticmethod
def _check_get_job_id(job_id):
"""Check validation of params."""
import re
if len(re.compile("[^_A-Za-z0-9]").findall(job_id)) > 0:
logging.warning("job_id {} contains invalid characters".format(job_id))
abort(400, "job_id {} contains invalid characters".format(job_id))
return job_id
def upload_files(self):
"""Upload the files from the client to the service."""
self.now_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')
self.result["timestamp"] = self.now_time
logging.warning("The timestamp is {}.".format(self.now_time))
self.upload_file_path = os.path.join(self.current_path, "out", self.now_time)
self.share_dir = os.path.join(self.current_path, "out", self.job_id)
os.makedirs(self.upload_file_path)
model_file = request.files.get("model_file")
if model_file is not None:
self.model = self.upload_file_path + "/" + secure_filename(model_file.filename)
model_file.save(self.model)
data_file = request.files.get("data_file")
if data_file is not None:
self.input_data = self.upload_file_path + "/" + secure_filename(data_file.filename)
data_file.save(self.input_data)
weight_file = request.files.get("weight_file")
if weight_file is not None:
self.weight = self.upload_file_path + "/" + secure_filename(weight_file.filename)
weight_file.save(self.weight)
else:
self.weight = ""
logging.warning("upload file sucess!")
def _clean_data_path(clean_interval, work_path):
while True:
_clean_time = time.time() - clean_interval
# _current_path = os.path.dirname(os.path.abspath(__file__))
folder_pattern = "{}/out/*".format(work_path)
folders = glob.glob(folder_pattern)
for folder in folders:
if os.path.isdir(folder) and os.path.getctime(folder) < _clean_time:
logging.warning("remove old folder: {}".format(folder))
try:
shutil.rmtree(folder)
except Exception:
logging.warning("failed to remove {}".format(folder))
time.sleep(3600)
def _parse_args():
parser = argparse.ArgumentParser(description="Evaluate service")
parser.add_argument("-i", "--host_ip", type=str, required=True, help="the ip of the evaluate service machine")
parser.add_argument("-p", "--port", type=int, required=False, default=8888, help="the listening port")
parser.add_argument("-w", "--work_path", type=str, required=True, help="the work dir to save the file")
parser.add_argument("-t", "--davinci_environment_type", type=str, required=False, default="ATLAS300",
help="the type the davinci hardwares")
parser.add_argument("-c", "--clean_interval", type=int, required=False, default=1 * 6 * 3600,
help="the time interval to clean the temp folder")
parser.add_argument("-u", "--ddk_user_name", type=str, required=False, default="user",
help="the user to acess ATLAS200200 DK")
parser.add_argument("-atlas_host_ip", "--atlas_host_ip", type=str, required=False, default=None,
help="the ip of ATLAS200200 DK")
args = parser.parse_args()
return args
def run():
"""Run the evaluate service."""
os.umask(0o027)
args = _parse_args()
ip_address = args.host_ip
listen_port = args.port
clean_interval = args.clean_interval
work_path = args.work_path
optional_params = {"davinci_environment_type": args.davinci_environment_type,
"ddk_user_name": args.ddk_user_name,
"atlas_host_ip": args.atlas_host_ip
}
p = multiprocessing.Process(target=_clean_data_path, args=(clean_interval, work_path), daemon=True)
p.start()
Evaluate._add_params(work_path, optional_params)
api.add_resource(Evaluate, '/')
run_flask(app, host=ip_address, port=listen_port)
|
test_multiprocessing.py | import contextlib
import multiprocessing
import pytest
import redis
from redis.connection import Connection, ConnectionPool
from redis.exceptions import ConnectionError
from .conftest import _get_client
@contextlib.contextmanager
def exit_callback(callback, *args):
try:
yield
finally:
callback(*args)
class TestMultiprocessing:
# Test connection sharing between forks.
# See issue #1085 for details.
# use a multi-connection client as that's the only type that is
# actually fork/process-safe
@pytest.fixture()
def r(self, request):
return _get_client(redis.Redis, request=request, single_connection_client=False)
def test_close_connection_in_child(self, master_host):
"""
A connection owned by a parent and closed by a child doesn't
destroy the file descriptors so a parent can still use it.
"""
conn = Connection(host=master_host[0], port=master_host[1])
conn.send_command("ping")
assert conn.read_response() == b"PONG"
def target(conn):
conn.send_command("ping")
assert conn.read_response() == b"PONG"
conn.disconnect()
proc = multiprocessing.Process(target=target, args=(conn,))
proc.start()
proc.join(3)
assert proc.exitcode == 0
# The connection was created in the parent but disconnected in the
# child. The child called socket.close() but did not call
# socket.shutdown() because it wasn't the "owning" process.
# Therefore the connection still works in the parent.
conn.send_command("ping")
assert conn.read_response() == b"PONG"
def test_close_connection_in_parent(self, master_host):
"""
A connection owned by a parent is unusable by a child if the parent
(the owning process) closes the connection.
"""
conn = Connection(host=master_host[0], port=master_host[1])
conn.send_command("ping")
assert conn.read_response() == b"PONG"
def target(conn, ev):
ev.wait()
# the parent closed the connection. because it also created the
# connection, the connection is shutdown and the child
# cannot use it.
with pytest.raises(ConnectionError):
conn.send_command("ping")
ev = multiprocessing.Event()
proc = multiprocessing.Process(target=target, args=(conn, ev))
proc.start()
conn.disconnect()
ev.set()
proc.join(3)
assert proc.exitcode == 0
@pytest.mark.parametrize("max_connections", [1, 2, None])
def test_pool(self, max_connections, master_host):
"""
A child will create its own connections when using a pool created
by a parent.
"""
pool = ConnectionPool.from_url(
f"redis://{master_host[0]}:{master_host[1]}",
max_connections=max_connections,
)
conn = pool.get_connection("ping")
main_conn_pid = conn.pid
with exit_callback(pool.release, conn):
conn.send_command("ping")
assert conn.read_response() == b"PONG"
def target(pool):
with exit_callback(pool.disconnect):
conn = pool.get_connection("ping")
assert conn.pid != main_conn_pid
with exit_callback(pool.release, conn):
assert conn.send_command("ping") is None
assert conn.read_response() == b"PONG"
proc = multiprocessing.Process(target=target, args=(pool,))
proc.start()
proc.join(3)
assert proc.exitcode == 0
# Check that connection is still alive after fork process has exited
# and disconnected the connections in its pool
conn = pool.get_connection("ping")
with exit_callback(pool.release, conn):
assert conn.send_command("ping") is None
assert conn.read_response() == b"PONG"
@pytest.mark.parametrize("max_connections", [1, 2, None])
def test_close_pool_in_main(self, max_connections, master_host):
"""
A child process that uses the same pool as its parent isn't affected
when the parent disconnects all connections within the pool.
"""
pool = ConnectionPool.from_url(
f"redis://{master_host[0]}:{master_host[1]}",
max_connections=max_connections,
)
conn = pool.get_connection("ping")
assert conn.send_command("ping") is None
assert conn.read_response() == b"PONG"
def target(pool, disconnect_event):
conn = pool.get_connection("ping")
with exit_callback(pool.release, conn):
assert conn.send_command("ping") is None
assert conn.read_response() == b"PONG"
disconnect_event.wait()
assert conn.send_command("ping") is None
assert conn.read_response() == b"PONG"
ev = multiprocessing.Event()
proc = multiprocessing.Process(target=target, args=(pool, ev))
proc.start()
pool.disconnect()
ev.set()
proc.join(3)
assert proc.exitcode == 0
def test_redis_client(self, r):
"A redis client created in a parent can also be used in a child"
assert r.ping() is True
def target(client):
assert client.ping() is True
del client
proc = multiprocessing.Process(target=target, args=(r,))
proc.start()
proc.join(3)
assert proc.exitcode == 0
assert r.ping() is True
|
main.py | #! python3
# main.py - anata gui
# GUI automation
import pyautogui
# file path and control
import os
# determine exe or script
import sys
# play sound file
#import playsound
# GUI
import tkinter
# image
from PIL import ImageTk
from PIL import Image
# itertools
import itertools
# multiprocess
import multiprocessing
#anata.py
import anata
#util.py
import util
# skill icon class
class SkillIcon():
# initialize
def __init__(self, widget, skilltext, tooltip):
# tkinter widget
self.widget = widget
# process
self.proc = 0
# onoff
self.onoff = 0
# skill text file name
self.skill = skilltext
# skill tooltip
self.tooltip = tooltip
self.id = None
self.tw = None
# bind eventfunction to click event
self.widget.bind('<Button-1>',self.click)
# bind eventfunction to canvas enter & leave event
self.widget.bind('<Enter>',self.enter)
self.widget.bind('<Leave>',self.leave)
# icon click event
def click(self, event):
# global value
global iconsiz
global sndpath
global afttime
# if onofflist is 0, onofflist set 1 and create red rectangle
# else, onofflist set 0 and delete red rectangle
if self.onoff == 0:
self.onoff = 1
lowrx = -1 + iconsiz
lowry = -1 + iconsiz
# create red rectangle
self.widget.create_rectangle(1,1,lowrx,lowry,width=2,outline='red',tags='tangle')
#playsound.playsound(sndpath+'start.wav')
# exec skill unsynchronize
self.proc = multiprocessing.Process(target=anata.multi,args=(self.skill,))
self.proc.start()
# after function
self.widget.after(afttime,self.repeat)
else:
self.onoff = 0
# delete red rectangle
self.widget.delete('tangle')
# kill process
self.proc.terminate()
# after function
def repeat(self):
# global value
global afttime
# check subprocess
if self.proc.is_alive() == False:
self.onoff = 0
# delete red rectangle
self.widget.delete('tangle')
return
# continue after function
if self.onoff == 1:
self.widget.after(afttime,self.repeat)
# click icon function
def enter(self, event):
self.schedule()
# leave from icon function
def leave(self, event):
self.unschedule()
self.id = self.widget.after(1000,self.deltooltip)
def schedule(self):
if self.tw:
return
self.unschedule()
self.id = self.widget.after(1500,self.disptooltip)
def unschedule(self):
id = self.id
self.id = None
if id:
self.widget.after_cancel(id)
# display tool tip
def disptooltip(self):
id = self.id
self.id = None
if id:
self.widget.after_cancel(id)
x, y = self.widget.winfo_pointerxy()
self.tw = tkinter.Toplevel(self.widget)
self.tw.wm_overrideredirect(True)
self.tw.geometry(f'+{x+10}+{y+10}')
self.tw.attributes('-topmost',True)
label = tkinter.Label(self.tw,text=self.tooltip,background="lightyellow",relief="solid",borderwidth=1,justify="left")
label.pack(ipadx=10)
# delete tool tip
def deltooltip(self):
tw = self.tw
self.tw = None
if tw:
tw.destroy()
# closing event
def onclosing():
global root
global canvaslist
# kill all multiprocess
try:
for canvas in canvaslist:
if type(canvas.proc) is not int:
canvas.proc.terminate()
finally:
# exit root
root.destroy()
def main():
# global value
global iconsiz
global sndpath
global root
global afttime
global canvaslist
# determine if application is a script file or frozen exe
if getattr(sys, 'frozen', False):
applicationpath = os.path.dirname(sys.executable)
else:
applicationpath = os.path.dirname(__file__)
# get exceute file path
# __file__ : [absolute path + file name] and get folder path by dirname(__file__)
if os.path.dirname(applicationpath) != '':
# default file path change to exec file path for process
os.chdir(applicationpath)
# get config parameter
rootttl = util.rootttl
iconsiz = util.iconsiz
iconrow = util.iconrow
iconcol = util.iconcol
titlicn = util.titlicn
deficon = util.deficon
topmost = util.topmost
afttime = util.afttime
winxpos = util.winxpos
winypos = util.winypos
icnpath = util.icnpath
sndpath = util.sndpath
txtpath = util.txtpath
# create window
root = tkinter.Tk()
# set title
root.title(rootttl)
# set screen front
root.attributes('-topmost',topmost)
# set title icon
root.iconbitmap(default=titlicn)
imgwidth = iconsiz * iconrow
imgheight = iconsiz * iconcol
# initialize list
imglist = []
canvaslist = []
# create image canvas
# put widget x,y position
# loop with iconnnum
for j, i in itertools.product(range(iconcol),range(iconrow)):
canvas = tkinter.Canvas(bg="black",width=iconsiz,height=iconsiz)
canvaswidth = i * iconsiz
canvasheight = j * iconsiz
canvas.place(x=canvaswidth,y=canvasheight)
idx = i + (j*iconrow)
icnimg = icnpath+str(idx+1)+'.png'
# image is there?
if os.path.isfile(icnimg) == False:
icnimg = icnpath + deficon
img = Image.open(icnimg)
img = img.resize((iconsiz,iconsiz))
imglist.append(img)
imglist[idx] = ImageTk.PhotoImage(imglist[idx])
# create image
canvas.create_image(0,0,image=imglist[idx],anchor=tkinter.NW)
try:
with open(txtpath+'skill'+str(idx+1)+'.txt', mode='r', encoding='utf-8') as f:
readdata = f.readlines()[0].replace('\n', '')
except FileNotFoundError:
pass
canvaslist.append(SkillIcon(canvas,'skill'+str(idx+1)+'.txt',readdata))
# bind window close event
root.protocol("WM_DELETE_WINDOW",onclosing)
screenwidth, screenheight = pyautogui.size()
windowxposition = screenwidth - imgwidth - winxpos
windowyposition = screenheight - imgheight - winypos
windowheight = imgheight
# set window size and position
root.geometry(str(imgwidth)+'x'+str(windowheight)+'+'+str(windowxposition)+'+'+str(windowyposition))
# window size fixed
root.resizable(width=False,height=False)
root.mainloop()
if __name__ == '__main__':
# it is for exe with multiprocessing
multiprocessing.freeze_support()
main()
|
sauce.py | import csv
import os
import subprocess
import threading
# Gather the packages to test.
PREFIX = './packages/node_modules/'
CISCOSPARK = os.path.join(PREFIX, '@ciscospark')
WEBEX = os.path.join(PREFIX, '@webex')
PROD_ENV_VARS = {
'CONVERSATION_SERVICE': 'https://conv-a.wbx2.com/conversation/api/v1',
'ENCRYPTION_SERVICE_URL': 'https://encryption-a.wbx2.com',
'IDBROKER_BASE_URL': 'https://idbroker.webex.com',
'IDENTITY_BASE_URL': 'https://identity.webex.com',
'U2C_SERVICE_URL': 'https://u2c.wbx2.com/u2c/api/v1',
'WDM_SERVICE_URL': 'https://wdm-a.wbx2.com/wdm/api/v1',
# Logging
'ENABLE_VERBOSE_NETWORK_LOGGING': 'true',
# Enable CI for Sauce Labs
'CI': 'true'
}
INT_ENV_VARS = {
# Environments
'ATLAS_SERVICE_URL': 'https://atlas-intb.ciscospark.com/admin/api/v1',
'CONVERSATION_SERVICE': 'https://conversation-intb.ciscospark.com/conversation/api/v1',
'ENCRYPTION_SERVICE_URL': 'https://encryption-intb.ciscospark.com/encryption/api/v1',
# Do not use 'https://hydra-intb.ciscospark.com/v1' for Hydra. CI expects 'apialpha'.
'HYDRA_SERVICE_URL': 'https://apialpha.ciscospark.com/v1/',
'IDBROKER_BASE_URL': 'https://idbrokerbts.webex.com',
'IDENTITY_BASE_URL': 'https://identitybts.webex.com',
'U2C_SERVICE_URL': 'https://u2c-intb.ciscospark.com/u2c/api/v1',
'WDM_SERVICE_URL': 'https://wdm-intb.ciscospark.com/wdm/api/v1',
'WHISTLER_API_SERVICE_URL': 'https://whistler.onint.ciscospark.com/api/v1',
# Logging
'ENABLE_VERBOSE_NETWORK_LOGGING': 'true',
# Enable CI for Sauce Labs
'CI': 'true'
}
OUTPUT_DIR = 'output'
OUTPUT_FILE_PATH = os.path.join(OUTPUT_DIR, 'test-comparison.csv')
TEST_COMMAND = 'npm run sauce:run -- npm test -- --packages %s'
SKIP_PACKAGES = [
'@webex/test-helper-server' # no tests
'@webex/internal-plugin-calendar', # no tests
'@webex/plugin-webhooks' # no tests
]
def should_include_package(path_name, name):
scoped_name = os.path.join(os.path.basename(path_name), name)
return os.path.isdir(os.path.join(path_name, name)) and scoped_name not in SKIP_PACKAGES
def get_package_names(path_name):
namespace = path_name.replace(PREFIX, '')
return [os.path.join(namespace, name) for name in os.listdir(path_name) if should_include_package(path_name, name)]
def run_subprocess(bash_command, env_vars):
env = os.environ.copy()
env.update(env_vars)
process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE, env=env)
output, error = process.communicate()
return process.returncode # , output, error
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def print_result(return_code, prefix='Tests are a...'):
if return_code == 0:
print(bcolors.OKGREEN + prefix + 'success.' + bcolors.ENDC)
else:
print(bcolors.FAIL + prefix + 'failure.' + bcolors.ENDC)
def run_test(package, environment):
env_vars = INT_ENV_VARS if environment is 'integration' else PROD_ENV_VARS
print(bcolors.OKBLUE + 'Testing `%s` on %s...' % (package, environment) + bcolors.ENDC)
bash_command = TEST_COMMAND % package
return_code = run_subprocess(bash_command, env_vars)
print_result(return_code, prefix='Testing `%s` on %s...' % (package, environment))
return return_code
def run_env_tests(package, writer, csv_file):
prod_return_code = run_test(package, 'production')
int_return_code = run_test(package, 'integration')
writer.writerow([package, prod_return_code, int_return_code])
csv_file.flush()
def run_tests_in_sequence(packages, writer, csv_file):
for package in packages:
run_env_tests(package, writer, csv_file)
def run_tests_in_parallel(packages, writer, csv_file):
threads = [threading.Thread(target=run_env_tests, args=(package, writer, csv_file)) for package in packages]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def main():
ciscospark_packages = get_package_names(CISCOSPARK)
webex_packages = get_package_names(WEBEX)
packages = ciscospark_packages + webex_packages
print ('Skipping %d packages: %s' % (len(SKIP_PACKAGES), ', '.join(SKIP_PACKAGES)))
print('Testing %d packages...' % len(packages))
try:
os.mkdir(OUTPUT_DIR)
except OSError:
pass
threads = []
with open(OUTPUT_FILE_PATH, 'wb') as csv_file:
writer = csv.writer(csv_file, quoting=csv.QUOTE_MINIMAL)
writer.writerow(['Package', 'Production exit code', 'Integration exit code'])
run_tests_in_sequence(packages, writer, csv_file)
print('Wrote output to: %s' % OUTPUT_FILE_PATH)
print('Done.')
if __name__ == "__main__":
main()
|
core.py | """
Core components of Home Assistant.
Home Assistant is a Home Automation framework for observing the state
of entities and react to changes.
"""
import enum
import functools as ft
import logging
import os
import signal
import threading
import time
from types import MappingProxyType
import voluptuous as vol
import homeassistant.helpers.temperature as temp_helper
import homeassistant.util as util
import homeassistant.util.dt as dt_util
import homeassistant.util.location as location
from homeassistant.config import get_default_config_dir
from homeassistant.const import (
ATTR_DOMAIN, ATTR_FRIENDLY_NAME, ATTR_NOW, ATTR_SERVICE,
ATTR_SERVICE_CALL_ID, ATTR_SERVICE_DATA, EVENT_CALL_SERVICE,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
EVENT_SERVICE_EXECUTED, EVENT_SERVICE_REGISTERED, EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED, MATCH_ALL, RESTART_EXIT_CODE,
SERVICE_HOMEASSISTANT_RESTART, SERVICE_HOMEASSISTANT_STOP, TEMP_CELSIUS,
TEMP_FAHRENHEIT, __version__)
from homeassistant.exceptions import (
HomeAssistantError, InvalidEntityFormatError)
from homeassistant.helpers.entity import split_entity_id, valid_entity_id
DOMAIN = "homeassistant"
# How often time_changed event should fire
TIMER_INTERVAL = 1 # seconds
# How long we wait for the result of a service call
SERVICE_CALL_LIMIT = 10 # seconds
# Define number of MINIMUM worker threads.
# During bootstrap of HA (see bootstrap._setup_component()) worker threads
# will be added for each component that polls devices.
MIN_WORKER_THREAD = 2
_LOGGER = logging.getLogger(__name__)
class HomeAssistant(object):
"""Root object of the Home Assistant home automation."""
def __init__(self):
"""Initialize new Home Assistant object."""
self.pool = pool = create_worker_pool()
self.bus = EventBus(pool)
self.services = ServiceRegistry(self.bus, pool)
self.states = StateMachine(self.bus)
self.config = Config()
def start(self):
"""Start home assistant."""
_LOGGER.info(
"Starting Home Assistant (%d threads)", self.pool.worker_count)
create_timer(self)
self.bus.fire(EVENT_HOMEASSISTANT_START)
def block_till_stopped(self):
"""Register service homeassistant/stop and will block until called."""
request_shutdown = threading.Event()
request_restart = threading.Event()
def stop_homeassistant(*args):
"""Stop Home Assistant."""
request_shutdown.set()
def restart_homeassistant(*args):
"""Reset Home Assistant."""
_LOGGER.warning('Home Assistant requested a restart.')
request_restart.set()
request_shutdown.set()
self.services.register(
DOMAIN, SERVICE_HOMEASSISTANT_STOP, stop_homeassistant)
self.services.register(
DOMAIN, SERVICE_HOMEASSISTANT_RESTART, restart_homeassistant)
try:
signal.signal(signal.SIGTERM, stop_homeassistant)
except ValueError:
_LOGGER.warning(
'Could not bind to SIGTERM. Are you running in a thread?')
try:
signal.signal(signal.SIGHUP, restart_homeassistant)
except ValueError:
_LOGGER.warning(
'Could not bind to SIGHUP. Are you running in a thread?')
except AttributeError:
pass
try:
while not request_shutdown.isSet():
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
self.stop()
return RESTART_EXIT_CODE if request_restart.isSet() else 0
def stop(self):
"""Stop Home Assistant and shuts down all threads."""
_LOGGER.info("Stopping")
self.bus.fire(EVENT_HOMEASSISTANT_STOP)
self.pool.stop()
class JobPriority(util.OrderedEnum):
"""Provides job priorities for event bus jobs."""
EVENT_CALLBACK = 0
EVENT_SERVICE = 1
EVENT_STATE = 2
EVENT_TIME = 3
EVENT_DEFAULT = 4
@staticmethod
def from_event_type(event_type):
"""Return a priority based on event type."""
if event_type == EVENT_TIME_CHANGED:
return JobPriority.EVENT_TIME
elif event_type == EVENT_STATE_CHANGED:
return JobPriority.EVENT_STATE
elif event_type == EVENT_CALL_SERVICE:
return JobPriority.EVENT_SERVICE
elif event_type == EVENT_SERVICE_EXECUTED:
return JobPriority.EVENT_CALLBACK
else:
return JobPriority.EVENT_DEFAULT
class EventOrigin(enum.Enum):
"""Represent the origin of an event."""
local = "LOCAL"
remote = "REMOTE"
def __str__(self):
"""Return the event."""
return self.value
class Event(object):
# pylint: disable=too-few-public-methods
"""Represents an event within the Bus."""
__slots__ = ['event_type', 'data', 'origin', 'time_fired']
def __init__(self, event_type, data=None, origin=EventOrigin.local,
time_fired=None):
"""Initialize a new event."""
self.event_type = event_type
self.data = data or {}
self.origin = origin
self.time_fired = time_fired or dt_util.utcnow()
def as_dict(self):
"""Create a dict representation of this Event."""
return {
'event_type': self.event_type,
'data': dict(self.data),
'origin': str(self.origin),
'time_fired': self.time_fired,
}
def __repr__(self):
"""Return the representation."""
# pylint: disable=maybe-no-member
if self.data:
return "<Event {}[{}]: {}>".format(
self.event_type, str(self.origin)[0],
util.repr_helper(self.data))
else:
return "<Event {}[{}]>".format(self.event_type,
str(self.origin)[0])
def __eq__(self, other):
"""Return the comparison."""
return (self.__class__ == other.__class__ and
self.event_type == other.event_type and
self.data == other.data and
self.origin == other.origin and
self.time_fired == other.time_fired)
class EventBus(object):
"""Allows firing of and listening for events."""
def __init__(self, pool=None):
"""Initialize a new event bus."""
self._listeners = {}
self._lock = threading.Lock()
self._pool = pool or create_worker_pool()
@property
def listeners(self):
"""Dict with events and the number of listeners."""
with self._lock:
return {key: len(self._listeners[key])
for key in self._listeners}
def fire(self, event_type, event_data=None, origin=EventOrigin.local):
"""Fire an event."""
if not self._pool.running:
raise HomeAssistantError('Home Assistant has shut down.')
with self._lock:
# Copy the list of the current listeners because some listeners
# remove themselves as a listener while being executed which
# causes the iterator to be confused.
get = self._listeners.get
listeners = get(MATCH_ALL, []) + get(event_type, [])
event = Event(event_type, event_data, origin)
if event_type != EVENT_TIME_CHANGED:
_LOGGER.info("Bus:Handling %s", event)
if not listeners:
return
job_priority = JobPriority.from_event_type(event_type)
for func in listeners:
self._pool.add_job(job_priority, (func, event))
def listen(self, event_type, listener):
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
"""
with self._lock:
if event_type in self._listeners:
self._listeners[event_type].append(listener)
else:
self._listeners[event_type] = [listener]
def listen_once(self, event_type, listener):
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns registered listener that can be used with remove_listener.
"""
@ft.wraps(listener)
def onetime_listener(event):
"""Remove listener from eventbus and then fire listener."""
if hasattr(onetime_listener, 'run'):
return
# Set variable so that we will never run twice.
# Because the event bus might have to wait till a thread comes
# available to execute this listener it might occur that the
# listener gets lined up twice to be executed.
# This will make sure the second time it does nothing.
onetime_listener.run = True
self.remove_listener(event_type, onetime_listener)
listener(event)
self.listen(event_type, onetime_listener)
return onetime_listener
def remove_listener(self, event_type, listener):
"""Remove a listener of a specific event_type."""
with self._lock:
try:
self._listeners[event_type].remove(listener)
# delete event_type list if empty
if not self._listeners[event_type]:
self._listeners.pop(event_type)
except (KeyError, ValueError):
# KeyError is key event_type listener did not exist
# ValueError if listener did not exist within event_type
pass
class State(object):
"""Object to represent a state within the state machine.
entity_id: the entity that is represented.
state: the state of the entity
attributes: extra information on entity and state
last_changed: last time the state was changed, not the attributes.
last_updated: last time this object was updated.
"""
__slots__ = ['entity_id', 'state', 'attributes',
'last_changed', 'last_updated']
# pylint: disable=too-many-arguments
def __init__(self, entity_id, state, attributes=None, last_changed=None,
last_updated=None):
"""Initialize a new state."""
if not valid_entity_id(entity_id):
raise InvalidEntityFormatError((
"Invalid entity id encountered: {}. "
"Format should be <domain>.<object_id>").format(entity_id))
self.entity_id = entity_id.lower()
self.state = str(state)
self.attributes = MappingProxyType(attributes or {})
self.last_updated = last_updated or dt_util.utcnow()
self.last_changed = last_changed or self.last_updated
@property
def domain(self):
"""Domain of this state."""
return split_entity_id(self.entity_id)[0]
@property
def object_id(self):
"""Object id of this state."""
return split_entity_id(self.entity_id)[1]
@property
def name(self):
"""Name of this state."""
return (
self.attributes.get(ATTR_FRIENDLY_NAME) or
self.object_id.replace('_', ' '))
def as_dict(self):
"""Return a dict representation of the State.
To be used for JSON serialization.
Ensures: state == State.from_dict(state.as_dict())
"""
return {'entity_id': self.entity_id,
'state': self.state,
'attributes': dict(self.attributes),
'last_changed': self.last_changed,
'last_updated': self.last_updated}
@classmethod
def from_dict(cls, json_dict):
"""Initialize a state from a dict.
Ensures: state == State.from_json_dict(state.to_json_dict())
"""
if not (json_dict and 'entity_id' in json_dict and
'state' in json_dict):
return None
last_changed = json_dict.get('last_changed')
if isinstance(last_changed, str):
last_changed = dt_util.parse_datetime(last_changed)
last_updated = json_dict.get('last_updated')
if isinstance(last_updated, str):
last_updated = dt_util.parse_datetime(last_updated)
return cls(json_dict['entity_id'], json_dict['state'],
json_dict.get('attributes'), last_changed, last_updated)
def __eq__(self, other):
"""Return the comparison of the state."""
return (self.__class__ == other.__class__ and
self.entity_id == other.entity_id and
self.state == other.state and
self.attributes == other.attributes)
def __repr__(self):
"""Return the representation of the states."""
attr = "; {}".format(util.repr_helper(self.attributes)) \
if self.attributes else ""
return "<state {}={}{} @ {}>".format(
self.entity_id, self.state, attr,
dt_util.as_local(self.last_changed).isoformat())
class StateMachine(object):
"""Helper class that tracks the state of different entities."""
def __init__(self, bus):
"""Initialize state machine."""
self._states = {}
self._bus = bus
self._lock = threading.Lock()
def entity_ids(self, domain_filter=None):
"""List of entity ids that are being tracked."""
if domain_filter is None:
return list(self._states.keys())
domain_filter = domain_filter.lower()
with self._lock:
return [state.entity_id for state in self._states.values()
if state.domain == domain_filter]
def all(self):
"""Create a list of all states."""
with self._lock:
return list(self._states.values())
def get(self, entity_id):
"""Retrieve state of entity_id or None if not found."""
return self._states.get(entity_id.lower())
def is_state(self, entity_id, state):
"""Test if entity exists and is specified state."""
entity_id = entity_id.lower()
return (entity_id in self._states and
self._states[entity_id].state == state)
def is_state_attr(self, entity_id, name, value):
"""Test if entity exists and has a state attribute set to value."""
entity_id = entity_id.lower()
return (entity_id in self._states and
self._states[entity_id].attributes.get(name, None) == value)
def remove(self, entity_id):
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
"""
entity_id = entity_id.lower()
with self._lock:
old_state = self._states.pop(entity_id, None)
if old_state is None:
return False
event_data = {
'entity_id': entity_id,
'old_state': old_state,
'new_state': None,
}
self._bus.fire(EVENT_STATE_CHANGED, event_data)
return True
def set(self, entity_id, new_state, attributes=None):
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
"""
entity_id = entity_id.lower()
new_state = str(new_state)
attributes = attributes or {}
with self._lock:
old_state = self._states.get(entity_id)
is_existing = old_state is not None
same_state = is_existing and old_state.state == new_state
same_attr = is_existing and old_state.attributes == attributes
if same_state and same_attr:
return
# If state did not exist or is different, set it
last_changed = old_state.last_changed if same_state else None
state = State(entity_id, new_state, attributes, last_changed)
self._states[entity_id] = state
event_data = {
'entity_id': entity_id,
'old_state': old_state,
'new_state': state,
}
self._bus.fire(EVENT_STATE_CHANGED, event_data)
# pylint: disable=too-few-public-methods
class Service(object):
"""Represents a callable service."""
__slots__ = ['func', 'description', 'fields', 'schema']
def __init__(self, func, description, fields, schema):
"""Initialize a service."""
self.func = func
self.description = description or ''
self.fields = fields or {}
self.schema = schema
def as_dict(self):
"""Return dictionary representation of this service."""
return {
'description': self.description,
'fields': self.fields,
}
def __call__(self, call):
"""Execute the service."""
try:
if self.schema:
call.data = self.schema(call.data)
self.func(call)
except vol.MultipleInvalid as ex:
_LOGGER.error('Invalid service data for %s.%s: %s',
call.domain, call.service, ex)
# pylint: disable=too-few-public-methods
class ServiceCall(object):
"""Represents a call to a service."""
__slots__ = ['domain', 'service', 'data', 'call_id']
def __init__(self, domain, service, data=None, call_id=None):
"""Initialize a service call."""
self.domain = domain
self.service = service
self.data = data or {}
self.call_id = call_id
def __repr__(self):
"""Return the represenation of the service."""
if self.data:
return "<ServiceCall {}.{}: {}>".format(
self.domain, self.service, util.repr_helper(self.data))
else:
return "<ServiceCall {}.{}>".format(self.domain, self.service)
class ServiceRegistry(object):
"""Offers services over the eventbus."""
def __init__(self, bus, pool=None):
"""Initialize a service registry."""
self._services = {}
self._lock = threading.Lock()
self._pool = pool or create_worker_pool()
self._bus = bus
self._cur_id = 0
bus.listen(EVENT_CALL_SERVICE, self._event_to_service_call)
@property
def services(self):
"""Dict with per domain a list of available services."""
with self._lock:
return {domain: {key: value.as_dict() for key, value
in self._services[domain].items()}
for domain in self._services}
def has_service(self, domain, service):
"""Test if specified service exists."""
return service in self._services.get(domain, [])
# pylint: disable=too-many-arguments
def register(self, domain, service, service_func, description=None,
schema=None):
"""
Register a service.
Description is a dict containing key 'description' to describe
the service and a key 'fields' to describe the fields.
Schema is called to coerce and validate the service data.
"""
description = description or {}
service_obj = Service(service_func, description.get('description'),
description.get('fields', {}), schema)
with self._lock:
if domain in self._services:
self._services[domain][service] = service_obj
else:
self._services[domain] = {service: service_obj}
self._bus.fire(
EVENT_SERVICE_REGISTERED,
{ATTR_DOMAIN: domain, ATTR_SERVICE: service})
def call(self, domain, service, service_data=None, blocking=False):
"""
Call a service.
Specify blocking=True to wait till service is executed.
Waits a maximum of SERVICE_CALL_LIMIT.
If blocking = True, will return boolean if service executed
succesfully within SERVICE_CALL_LIMIT.
This method will fire an event to call the service.
This event will be picked up by this ServiceRegistry and any
other ServiceRegistry that is listening on the EventBus.
Because the service is sent as an event you are not allowed to use
the keys ATTR_DOMAIN and ATTR_SERVICE in your service_data.
"""
call_id = self._generate_unique_id()
event_data = {
ATTR_DOMAIN: domain,
ATTR_SERVICE: service,
ATTR_SERVICE_DATA: service_data,
ATTR_SERVICE_CALL_ID: call_id,
}
if blocking:
executed_event = threading.Event()
def service_executed(call):
"""Callback method that is called when service is executed."""
if call.data[ATTR_SERVICE_CALL_ID] == call_id:
executed_event.set()
self._bus.listen(EVENT_SERVICE_EXECUTED, service_executed)
self._bus.fire(EVENT_CALL_SERVICE, event_data)
if blocking:
success = executed_event.wait(SERVICE_CALL_LIMIT)
self._bus.remove_listener(
EVENT_SERVICE_EXECUTED, service_executed)
return success
def _event_to_service_call(self, event):
"""Callback for SERVICE_CALLED events from the event bus."""
service_data = event.data.get(ATTR_SERVICE_DATA)
domain = event.data.get(ATTR_DOMAIN)
service = event.data.get(ATTR_SERVICE)
call_id = event.data.get(ATTR_SERVICE_CALL_ID)
if not self.has_service(domain, service):
return
service_handler = self._services[domain][service]
service_call = ServiceCall(domain, service, service_data, call_id)
# Add a job to the pool that calls _execute_service
self._pool.add_job(JobPriority.EVENT_SERVICE,
(self._execute_service,
(service_handler, service_call)))
def _execute_service(self, service_and_call):
"""Execute a service and fires a SERVICE_EXECUTED event."""
service, call = service_and_call
service(call)
if call.call_id is not None:
self._bus.fire(
EVENT_SERVICE_EXECUTED, {ATTR_SERVICE_CALL_ID: call.call_id})
def _generate_unique_id(self):
"""Generate a unique service call id."""
self._cur_id += 1
return "{}-{}".format(id(self), self._cur_id)
class Config(object):
"""Configuration settings for Home Assistant."""
# pylint: disable=too-many-instance-attributes
def __init__(self):
"""Initialize a new config object."""
self.latitude = None
self.longitude = None
self.temperature_unit = None
self.location_name = None
self.time_zone = None
# If True, pip install is skipped for requirements on startup
self.skip_pip = False
# List of loaded components
self.components = []
# Remote.API object pointing at local API
self.api = None
# Directory that holds the configuration
self.config_dir = get_default_config_dir()
def distance(self, lat, lon):
"""Calculate distance from Home Assistant in meters."""
return location.distance(self.latitude, self.longitude, lat, lon)
def path(self, *path):
"""Generate path to the file within the config dir."""
return os.path.join(self.config_dir, *path)
def temperature(self, value, unit):
"""Convert temperature to user preferred unit if set."""
if not (unit in (TEMP_CELSIUS, TEMP_FAHRENHEIT) and
self.temperature_unit and unit != self.temperature_unit):
return value, unit
try:
temp = float(value)
except ValueError: # Could not convert value to float
return value, unit
return (
round(temp_helper.convert(temp, unit, self.temperature_unit), 1),
self.temperature_unit)
def as_dict(self):
"""Create a dict representation of this dict."""
time_zone = self.time_zone or dt_util.UTC
return {
'latitude': self.latitude,
'longitude': self.longitude,
'temperature_unit': self.temperature_unit,
'location_name': self.location_name,
'time_zone': time_zone.zone,
'components': self.components,
'version': __version__
}
def create_timer(hass, interval=TIMER_INTERVAL):
"""Create a timer that will start on HOMEASSISTANT_START."""
# We want to be able to fire every time a minute starts (seconds=0).
# We want this so other modules can use that to make sure they fire
# every minute.
assert 60 % interval == 0, "60 % TIMER_INTERVAL should be 0!"
def timer():
"""Send an EVENT_TIME_CHANGED on interval."""
stop_event = threading.Event()
def stop_timer(event):
"""Stop the timer."""
stop_event.set()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_timer)
_LOGGER.info("Timer:starting")
last_fired_on_second = -1
calc_now = dt_util.utcnow
while not stop_event.isSet():
now = calc_now()
# First check checks if we are not on a second matching the
# timer interval. Second check checks if we did not already fire
# this interval.
if now.second % interval or \
now.second == last_fired_on_second:
# Sleep till it is the next time that we have to fire an event.
# Aim for halfway through the second that fits TIMER_INTERVAL.
# If TIMER_INTERVAL is 10 fire at .5, 10.5, 20.5, etc seconds.
# This will yield the best results because time.sleep() is not
# 100% accurate because of non-realtime OS's
slp_seconds = interval - now.second % interval + \
.5 - now.microsecond/1000000.0
time.sleep(slp_seconds)
now = calc_now()
last_fired_on_second = now.second
# Event might have been set while sleeping
if not stop_event.isSet():
try:
hass.bus.fire(EVENT_TIME_CHANGED, {ATTR_NOW: now})
except HomeAssistantError:
# HA raises error if firing event after it has shut down
break
def start_timer(event):
"""Start the timer."""
thread = threading.Thread(target=timer, name='Timer')
thread.daemon = True
thread.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_timer)
def create_worker_pool(worker_count=None):
"""Create a worker pool."""
if worker_count is None:
worker_count = MIN_WORKER_THREAD
def job_handler(job):
"""Called whenever a job is available to do."""
try:
func, arg = job
func(arg)
except Exception: # pylint: disable=broad-except
# Catch any exception our service/event_listener might throw
# We do not want to crash our ThreadPool
_LOGGER.exception("BusHandler:Exception doing job")
def busy_callback(worker_count, current_jobs, pending_jobs_count):
"""Callback to be called when the pool queue gets too big."""
_LOGGER.warning(
"WorkerPool:All %d threads are busy and %d jobs pending",
worker_count, pending_jobs_count)
for start, job in current_jobs:
_LOGGER.warning("WorkerPool:Current job from %s: %s",
dt_util.as_local(start).isoformat(), job)
return util.ThreadPool(job_handler, worker_count, busy_callback)
|
test_pubsub.py | import multiprocessing as mp
import secrets
import time
import reth
import perwez
ENV_NAME = "QbertNoFrameskip-v4"
WORKER_BATCH_SIZE = 64
WORKER_CNT = 8
def _trainer(server_url):
data_recv = perwez.RecvSocket(server_url, "data", broadcast=False)
weight_send = perwez.SendSocket(server_url, "weight", broadcast=True)
cnt = 0
while True:
latest_res = data_recv.recv()
shapes = [x.shape for x in latest_res]
cnt += 1
if cnt % 5 == 0:
print(f"recv data: {cnt}, shape: {shapes}")
weight = secrets.token_bytes(50 * 1024 * 1024)
weight_send.send(weight)
time.sleep(0.333)
def _worker(idx, server_url):
weight_recv = perwez.RecvSocket(server_url, "weight", broadcast=True)
data_send = perwez.SendSocket(server_url, "data", broadcast=False)
env = reth.env.make(ENV_NAME)
buffer = reth.buffer.NumpyBuffer(WORKER_BATCH_SIZE, circular=False)
w_cnt = 0
d_cnt = 0
s0 = env.reset().astype("f4")
while w_cnt < 10:
if not weight_recv.empty():
weight_recv.recv()
w_cnt += 1
s1, r, done, _ = env.step(env.action_space.sample())
s1 = s1.astype("f4")
buffer.append((s0, r, done, s1))
if buffer.size == buffer.capacity:
data_send.send(buffer.data)
d_cnt += 1
buffer.clear()
print(f"worker{idx}: recv weights {w_cnt}, send data {d_cnt}")
if done:
s0 = env.reset().astype("f4")
else:
s0 = s1
def test_pubsub_full():
try:
server_proc, config = perwez.start_server()
procs = []
trainer_proc = mp.Process(target=_trainer, args=(config["url"],))
trainer_proc.start()
for idx in range(WORKER_CNT):
proc = mp.Process(target=_worker, args=(idx, config["url"]))
proc.start()
procs.append(proc)
for p in procs:
p.join()
finally:
trainer_proc.terminate()
trainer_proc.join()
for p in procs:
p.terminate()
p.join()
server_proc.terminate()
server_proc.join()
|
color-control.py | import numpy as np
import cv2 as cv
import paho.mqtt.client as mqtt
import socket
import sys
import cv2 as cv
import pickle
import numpy as np
import struct
import zlib
import multiprocessing
from multiprocessing import Queue
import queue
import math
import time
######################## DEFINICIONES PARA MQTT ####################
host = "192.168.1.11"
topic = "Control/orden"
puerto = 1883
distancia = Queue(10)
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected to broker")
client.subscribe("Control/distancia")
else:
print("Connection failed")
def on_message(client, userdata, msg):
m = str(msg.payload.decode("utf-8"))
if userdata.full():
pass
else:
if m != '\r\n':
userdata.put(m)
#print('%s %s' % (msg.topic, msg.payload))
def on_publish(client, userdata, result):
#print("Orden Enviada")
pass
def cliente(cola_distancia, cola_objetivo):
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.on_publish = on_publish
client.user_data_set(cola_distancia)
client.connect(host, puerto)
client.loop_start()
objetivo = ""
contadorREV = 10
rotarREV = False
distancia = 0
while True:
if cola_objetivo.empty():
objetivo = "NA"
else:
objetivo = cola_objetivo.get()
area = objetivo[3]
print(area)
if cola_distancia.empty():
distancia = 0
else:
distancia = cola_distancia.get()
if distancia == '':
distancia = 0
else:
distancia = int(distancia)
if distancia == 0:
pass
elif distancia == 100:
client.publish(topic, "on")
elif distancia < 30:
if not rotarREV:
orden = "REVE"
client.publish(topic, orden)
contadorREV -= 1
if contadorREV == 0:
rotarREV = True
contadorREV = 10
else:
orden = "DERE"
client.publish(topic, orden)
contadorREV -= 1
if contadorREV == 0:
rotarREV = False
contadorREV = 10
else:
if objetivo == "NA":
orden = "STOP"
client.publish(topic, orden)
else:
color = objetivo[0]
x = objetivo[1]
area = objetivo[3]
tipo = objetivo[4]
if color == 'R':
if tipo == 'C':
#Perseguir al cuadrado rojo
centro = 300
margen = 50
distanciamin = 100000
if x < (centro - margen):
# Obejetivo a la derecha
orden = "IZQU"
client.publish(topic, orden)
elif x > (centro + margen):
# Objetivo a la izquierda
orden = "DERE"
client.publish(topic, orden)
else:
# Objetivo centrado
if area < distanciamin:
orden = "AVAN"
client.publish(topic, orden)
else:
orden = "STOP"
client.publish(topic, orden)
time.sleep(0.015)
#################### SOCKET RECEPCION VIDEO #############
con = False
def videoRec(cola_framesR, cola_framesG, cola_framesB, cola_framesF):
HOST = ''
PORT = 8485
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Socket de video creado')
s.bind((HOST, PORT))
print('Socket de video bind completado')
s.listen(10)
print('Socket de video en espera')
conn, addr = s.accept()
print("Conexion de video establecida con %s:%s" % (addr[0], addr[1]))
data = b""
payload_size = struct.calcsize(">L")
print("payload_size: {}".format(payload_size))
while True:
while len(data) < payload_size:
# print("Recv: {}".format(len(data)))
data += conn.recv(4096)
# print("Done Recv: {}".format(len(data)))
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack(">L", packed_msg_size)[0]
# print("msg_size: {}".format(msg_size))
while len(data) < msg_size:
data += conn.recv(4096)
frame_data = data[:msg_size]
data = data[msg_size:]
frame = pickle.loads(frame_data, fix_imports=True, encoding="bytes")
frame = cv.imdecode(frame, cv.IMREAD_COLOR)
frame = cv.flip(frame, 0)
frame = cv.flip(frame, 1)
cola_framesR.put(frame)
cola_framesG.put(frame)
cola_framesB.put(frame)
cola_framesF.put(frame)
#################### OPENCV ####################
def nothing(x):
pass
font = cv.FONT_HERSHEY_COMPLEX_SMALL
def buscador(cola_frames, val_ini, color, cola_obj):
cv.namedWindow("Barras " + color)
cv.createTrackbar('L-H', "Barras " + color, val_ini.l_h, 180, nothing)
cv.createTrackbar('U-H', "Barras " + color, val_ini.u_h, 180, nothing)
cv.createTrackbar('L-S', "Barras " + color, val_ini.l_s, 255, nothing)
cv.createTrackbar('U-S', "Barras " + color, val_ini.u_s, 255, nothing)
cv.createTrackbar('L-V', "Barras " + color, val_ini.l_v, 255, nothing)
cv.createTrackbar('U-V', "Barras " + color, val_ini.u_v, 255, nothing)
while True:
if(cola_frames.empty()):
nothing(1)
else:
frame = cola_frames.get()
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
l_h = cv.getTrackbarPos('L-H', "Barras " + color)
l_s = cv.getTrackbarPos('L-S', "Barras " + color)
l_v = cv.getTrackbarPos('L-V', "Barras " + color)
u_h = cv.getTrackbarPos('U-H', "Barras " + color)
u_s = cv.getTrackbarPos('U-S', "Barras " + color)
u_v = cv.getTrackbarPos('U-V', "Barras " + color)
color_min = np.array([l_h, l_s, l_v])
color_max = np.array([u_h, u_s, u_v])
mask = cv.inRange(hsv, color_min, color_max)
kernel = np.ones((5, 5), np.uint8)
mask = cv.erode(mask, kernel)
contours, _ = cv.findContours(
mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
if(len(contours) < 1):
if cola_obj.full():
pass
else:
cola_obj.put([color, "NA"])
else:
areamax = 0
for contour in contours:
area = cv.contourArea(contour)
approx = cv.approxPolyDP(
contour, 0.02*cv.arcLength(contour, True), True)
x = approx.ravel()[0]
y = approx.ravel()[1]
if(area > 400):
momentos = cv.moments(contour)
cx = int(momentos['m10']/momentos['m00'])
cy = int(momentos['m01']/momentos['m00'])
if len(approx) == 3:
cv.drawContours(
frame, [approx], 0, (val_ini.B, val_ini.G, val_ini.R), 2)
cv.circle(frame, (cx, cy), 3,
(val_ini.B, val_ini.G, val_ini.R), -1)
if cola_obj.full():
pass
else:
cola_obj.put([color, area, "T", cx, cy])
cv.putText(frame, "Base", (x, y),
font, 1, (255, 255, 255))
elif len(approx) >= 4 and len(approx) <= 8:
if area > areamax:
areamax = area
cv.drawContours(
frame, [approx], 0, (val_ini.B, val_ini.G, val_ini.R), 2)
cv.circle(frame, (cx, cy), 3,
(val_ini.B, val_ini.G, val_ini.R), -1)
if cola_obj.full():
pass
else:
cola_obj.put([color, area, "C", cx, cy])
cv.putText(frame, "Objetivo", (x, y),
font, 1, (255, 255, 255))
cv.imshow("Mask " + color, mask)
cv.imshow("Siguiendo " + color, frame)
if cv.waitKey(1) == ord('q'):
break
cv.waitKey(1)
class valores_ini:
l_h = 0
l_s = 0
l_v = 0
u_h = 0
u_s = 0
u_v = 0
R = 0
G = 0
B = 0
"""val_R = valores_ini()
val_R.l_h = 0
val_R.l_s = 0
val_R.l_v = 0
val_R.u_h = 0
val_R.u_s = 0
val_R.u_v = 0
val_R.R = 0"""
val_R = valores_ini()
val_R.l_h = 140
val_R.l_s = 55
val_R.l_v = 0
val_R.u_h = 180
val_R.u_s = 229
val_R.u_v = 255
val_R.R = 255
"""val_G = valores_ini()
val_G.l_h = 45
val_G.l_s = 100
val_G.l_v = 40
val_G.u_h = 70
val_G.u_s = 255
val_G.u_v = 255
val_G.G = 255"""
val_G = valores_ini()
val_G.l_h = 0
val_G.l_s = 0
val_G.l_v = 0
val_G.u_h = 0
val_G.u_s = 0
val_G.u_v = 0
val_G.G = 0
"""val_B = valores_ini()
val_B.l_h = 80
val_B.l_s = 80
val_B.l_v = 40
val_B.u_h = 100
val_B.u_s = 255
val_B.u_v = 255
val_B.B = 255"""
val_B = valores_ini()
val_B.l_h = 0
val_B.l_s = 0
val_B.l_v = 0
val_B.u_h = 0
val_B.u_s = 0
val_B.u_v = 0
val_B.B = 0
########### CONTROL PRINCIPAL ################
class objets:
color = ""
tipo = ""
area = 0
x = 0
y = 0
def control(cola_frames, cola_objR, cola_objG, cola_objB, cola_objetivo):
mas_cercano = objets()
objR = objets()
objG = objets()
objB = objets()
objetivo = ""
while True:
recR = []
recG = []
recB = []
try:
recR = cola_objR.get()
recG = cola_objG.get()
recB = cola_objB.get()
except:
recR[1] = "NA"
recG[1] = "NA"
recB[1] = "NA"
if recR[1] == "NA":
objR = objets()
else:
objR.area = recR[1]
objR.color = 'R'
objR.x = recR[3]
objR.y = recR[4]
objR.tipo = recR[2]
if recG[1] == "NA":
objG = objets()
else:
objG.area = recG[1]
objG.color = 'G'
objG.x = recG[3]
objG.y = recG[4]
objG.tipo = recG[2]
if recB[1] == "NA":
objB = objets()
else:
objB.area = recB[1]
objB.color = 'B'
objB.x = recB[3]
objB.y = recB[4]
objB.tipo = recB[2]
if objR.area > objG.area and objR.area > objB.area and objR.area > 0:
objetivo = "R"
if cola_objetivo.full():
pass
else:
cola_objetivo.put(
[objetivo, objR.x, objR.y, objR.area, objR.tipo])
elif objG.area > objB.area and objG.area > 0:
objetivo = "G"
if cola_objetivo.full():
pass
else:
cola_objetivo.put(
[objetivo, objG.x, objG.y, objG.area, objG.tipo])
elif objB.area > 0:
objetivo = "B"
if cola_objetivo.full():
pass
else:
cola_objetivo.put(
[objetivo, objB.x, objB.y, objB.area, objB.tipo])
else:
objetivo = ""
########### MAIN ######################
if __name__ == '__main__':
framesR = Queue()
framesG = Queue()
framesB = Queue()
framesF = Queue()
objetosR = Queue(10)
objetosG = Queue(10)
objetosB = Queue(10)
objetivo = Queue(10)
cliente_thread = multiprocessing.Process(
target=cliente, args=(distancia, objetivo))
videoRec_thread = multiprocessing.Process(
target=videoRec, args=(framesR, framesG, framesB, framesF))
R_thread = multiprocessing.Process(
target=buscador, args=(framesR, val_R, "R", objetosR))
G_thread = multiprocessing.Process(
target=buscador, args=(framesG, val_G, "G", objetosG))
B_thread = multiprocessing.Process(
target=buscador, args=(framesB, val_B, "B", objetosB))
control_thread = multiprocessing.Process(
target=control, args=(framesF, objetosR, objetosG, objetosB, objetivo))
cliente_thread.start()
videoRec_thread.start()
R_thread.start()
G_thread.start()
B_thread.start()
control_thread.start()
cliente_thread.join()
videoRec_thread.join()
R_thread.join()
G_thread.join()
B_thread.join()
control_thread.join()
exit()
|
timing.py | """Time related utilities."""
import pytz
import sys
import os
import re
import collections
import functools
import time
import datetime
import threading
import traceback
class TimeoutException(Exception):
"""Timeout exception error."""
pass
class TimeoutExceptionInfo(object):
"""
Holds timeout exception information.
"""
def __init__(self, start_time=None):
"""
Mark the time for started waiting.
"""
if start_time is None:
self.started = time.time()
else:
self.started = start_time
def msg(self):
"""
Return a message to be used by TimeoutException containing
timing information.
"""
ended = time.time()
started_wait = datetime.datetime.fromtimestamp(self.started).strftime(
"%Y-%m-%d %H:%M:%S"
)
raised_date = datetime.datetime.fromtimestamp(ended).strftime(
"%Y-%m-%d %H:%M:%S"
)
duration = ended - self.started
return "Info[started at {}, raised at {} after {}s]".format(
started_wait, raised_date, round(duration, 2)
)
class KThread(threading.Thread):
"""
A subclass of threading.Thread, with a kill() method.
"""
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self._will_kill = False
def start(self):
"""Start the thread."""
self.__run_backup = self.run
self.run = self.__run # Force the Thread to install the trace
threading.Thread.start(self)
def __run(self):
"""Hacked run function, which installs the trace."""
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, event, arg):
return self.localtrace if event == "call" else None
def localtrace(self, frame, event, arg):
if self._will_kill and event == "line":
raise SystemExit()
return self.localtrace
def kill(self):
self._will_kill = True
def timeout(seconds, err_msg="Timeout after {} seconds."):
"""
Decorator for a normal funtion to limit its execution time.
:param seconds: Time limit for task execution.
:type seconds: ``int``
:param err_msg: Error message on timeout.
:type err_msg: ``str``
:return: Decorated function.
:rtype: ``callable``
"""
def timeout_decorator(func):
""""""
def _new_func(result, old_func, old_func_args, old_func_kwargs):
try:
result.append(old_func(*old_func_args, **old_func_kwargs))
except Exception:
result[0] = False
result.append(traceback.format_exc())
def wrapper(*args, **kwargs):
result = [True]
new_kwargs = {
"result": result,
"old_func": func,
"old_func_args": args,
"old_func_kwargs": kwargs,
}
thd = KThread(target=_new_func, args=(), kwargs=new_kwargs)
thd.start()
thd.join(seconds)
if thd.is_alive():
thd.kill()
thd.join()
raise TimeoutException(err_msg.format(seconds))
else:
return result
return functools.wraps(func)(wrapper)
return timeout_decorator
def wait(predicate, timeout, interval=0.05, raise_on_timeout=True):
"""
Wait until a predicate evaluates to True.
:param predicate: Input predicate.
:type predicate: ``callable``
:param timeout: Timeout duration.
:type timeout: ``int``
:param interval: Sleep interval for predicate check.
:type interval: ``float``
:param raise_on_timeout: Raise exception if hits timeout, defaults to True.
:type raise_on_timeout: ``bool``
:return: Predicate result.
:rtype: ``bool``
"""
start_time = time.time()
end_time = start_time + timeout
while True:
res = predicate()
error_msg = getattr(res, "error_msg", "")
if res is True:
return res
elif time.time() < end_time:
# no timeout yet
time.sleep(interval)
else:
if raise_on_timeout:
msg = "Timeout after {} seconds.".format(timeout)
if error_msg:
msg = "{}{}{}".format(msg, os.linesep, error_msg)
raise TimeoutException(msg)
else:
return res
def wait_until_predicate(predicate, timeout, interval=1.0):
"""
Inverting wait() method behavior to raise if predicate() is True
instead of raising on timeout.
:param predicate: any callable object
:type predicate: ``callable``
:param timeout: timeout in seconds
:type timeout: ``float``
:param interval: interval at which to check the predicate in seconds
:type interval: ``float``
:raises:
:exc:`RuntimeError` if the predicate is True.
"""
try:
res = wait(predicate, timeout, interval, raise_on_timeout=True)
except TimeoutException:
return
else:
raise RuntimeError(
"Early finish of wait(), predicate: {}.".format(res)
)
def retry_until_timeout(
exception,
item,
timeout,
args=None,
kwargs=None,
interval=0.05,
raise_on_timeout=True,
):
"""
Retry calling an item until timeout duration while ignoring exceptions.
:param exception: Exception class to catch.
:type exception: ``type``
:param item: Function to call.
:type item: ``callable``
:param args: Positional args to pass to ``item``
:type args: ``Optional[Iterable[Any]]``
:param kwargs: Keyword args to pass to ``item``
:type kwargs: ``Optional[Dict[str, Any]]``
:param interval: time to wait between successive call attempts, in seconds.
:type interval: ``int``
:param raise_on_timeout: Whether to raise a TimeoutException on timeout,
defaults to True.
:return: Result of item.
:rtype: ``Any``
"""
timeout_info = TimeoutExceptionInfo()
end_time = timeout_info.started + timeout
while True:
try:
res = item(*args or tuple(), **kwargs or {})
except exception as exc:
if time.time() < end_time:
# no timeout yet
time.sleep(interval)
else:
if raise_on_timeout:
raise TimeoutException(
"Timeout waiting for {0}"
" to return without {1}. {2}. {3}".format(
item.__name__,
exception.__name__,
timeout_info.msg(),
str(exc),
)
)
else:
return None
else:
return res
def utcnow():
"""Timezone aware UTC now."""
return datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)
_Interval = collections.namedtuple("_Interval", "start end")
class Interval(_Interval):
"""Class that represents a block of time."""
@property
def elapsed(self):
"""Return duration in seconds."""
if self.start and self.end:
return (self.end - self.start).total_seconds()
return None
class TimerCtxManager(object):
"""
Context manager for storing durations.
Uses tz aware utc timestamps.
"""
def __init__(self, timer, key):
if key in timer:
raise ValueError("Cannot overwrite `Interval` for: {}".format(key))
self.timer = timer
self.key = key
self.start_ts = None
def __enter__(self):
self.start_ts = utcnow()
def __exit__(self, exc_type, exc_value, _):
self.timer[self.key] = Interval(start=self.start_ts, end=utcnow())
class Timer(dict):
"""Dict wrapper with a method for recording durations."""
def record(self, key):
"""
Records duration for the given `key`.
.. code-block:: python
>>> timer = Timer()
>>> with timer.record('my-key'):
>>> ... custom code ...
>>> ... custom code ...
>>> timer['my-key'].elapsed
21.5
"""
return TimerCtxManager(timer=self, key=key)
def start(self, key):
"""Record the start timestamp for the given key."""
if key in self:
raise ValueError(
"`start` already recorded for key: `{}`".format(key)
)
self[key] = Interval(utcnow(), None)
def end(self, key):
"""
Record the end timestamp for the given key.
Can be called multiple times with the same key, which will keep
overwriting the previous `end` timestamp.
"""
if key not in self:
raise KeyError(
"`start` missing for {}, cannot record end.".format(key)
)
self[key] = Interval(self[key].start, utcnow())
DURATION_REGEX = re.compile(
r"((?P<hours>\d+)[H|h])?\s*"
r"((?P<minutes>\d+)[M|m])?\s*?"
r"((?P<seconds>\d+)[S|s])?"
)
DURATION_MSG = (
"Invalid duration pattern: {pattern}."
" Please use the format <hours>h <minutes>m <seconds>s"
" (e.g. `2h 30m`, `15m`, `3m 15s`, `10s`) with nonzero values."
)
def parse_duration(duration):
"""
Parse given duration string and return duration value in seconds.
:param duration: Duration value in format `<hours>H <minutes>M <seconds>S`
:type duration: ``str``
:return: Duration in seconds
:rtype: ``int``
"""
def _get_value(match_obj, group_name):
val = match_obj.group(group_name)
return int(val) if val is not None else 0
match = DURATION_REGEX.match(duration)
err_msg = DURATION_MSG.format(pattern=duration)
if not match:
raise ValueError(err_msg)
hours = _get_value(match, "hours")
minutes = _get_value(match, "minutes")
seconds = _get_value(match, "seconds")
result = (hours * 3600) + (minutes * 60) + seconds
if result <= 0:
raise ValueError(err_msg)
return (hours * 3600) + (minutes * 60) + seconds
def format_duration(duration):
"""
Format seconds in hours / minutes / seconds in readable format.
>>> format_duration(3730)
1 hours 2 minutes 10 seconds
:param duration: Total duration in seconds
:type duration: ``number``
:return: Duration in readable format.
:rtype: ``str``
"""
assert duration > 0, "`duration` must be nonzero number."
hours = duration / 3600
minutes = duration // 60 % 60
seconds = duration % 60
result = []
if hours >= 1:
result.append("{} hours".format(hours))
if minutes >= 1:
result.append("{} minutes".format(minutes))
if seconds:
result.append("{} seconds".format(seconds))
return " ".join(result)
def exponential_interval(
initial=0.1, multiplier=2, maximum=None, minimum=None
):
"""
Generator that returns exponentially increasing/decreasing values,
can be used for generating values for `time.sleep` for periodic checks.
:param initial: Initial value for the sequence.
:type initial: ``number``
:param multiplier: Multiplier for generating new values in the sequence.
Each new value will be generated by multiplication of
the multiplier and the last generated value of the sequence.
:type multiplier: ``number``
:param minimum: Optional minimum value for generated numbers.
:type minimum: ``number``
:param maximum: Optional maximum value for generated numbers.
:type maximum: ``number``
:return: Sequence of values
:rtype: ``generator`` of ``number``
"""
val = initial
while True:
if minimum is not None and val < minimum:
yield minimum
if maximum is not None and val > maximum:
yield maximum
else:
yield val
val *= multiplier
def get_sleeper(
interval, timeout=10, raise_timeout_with_msg=None, timeout_info=False
):
"""
Generator that implements sleep steps for replacing
*while True: do task; time.sleep()* code blocks. Depending on the interval
argument, it can sleeps with constant interval or start with min_interval
and then doubles the interval in each iteration up to max_interval.
It yields True until timeout is reached where it then yields False or
raises a TimeoutException based on input arguments.
:param interval: Sleep time between each yield in seconds.
:type interval: ``float`` or tuple of ``float`` as
(min_interval, max_interval)
:param timeout: Timeout in seconds
:type timeout: ``float``
:param raise_timeout_with_msg: Message or Function to be used for raising
an optional TimeoutException.
:type raise_timeout_with_msg: ``NoneType`` or ``str`` or ``callable``
:param timeout_info: Include timeout exception timing information in
exception message raised.
:type timeout_info: ``bool``
"""
start = time.time()
timeout_info_obj = TimeoutExceptionInfo(start)
end = start + timeout
incr_interval = False
if isinstance(interval, tuple):
interval, max_interval = interval
incr_interval = True
while True:
yield True
time.sleep(interval)
if time.time() > end:
if raise_timeout_with_msg:
if callable(raise_timeout_with_msg):
msg = raise_timeout_with_msg()
else:
msg = raise_timeout_with_msg
if timeout_info:
msg = "{}. {}".format(msg, timeout_info_obj.msg())
raise TimeoutException(msg)
break
if incr_interval:
interval = min(interval * 2, max_interval)
yield False
|
starter.py | import sys
import time
from threading import Thread
from crawler4py.dispatch.dispatch import Dispatch
from crawler4py.dispatch.monitor import Monitor
from crawler4py.download.downloader import Downloader
from crawler4py.extractor.extractor import Extractor
from crawler4py.log import Logger
from crawler4py.storage_dup import BaseStorageDup
from crawler4py.util.running_params import task_q
'''启动器'''
class Starter(object):
__instance = None
def __init__(self, url=None, **setting):
if not Starter.__instance:
# 目前简单模式属于废弃状态, 不必考虑, 后续不会开发,会移除调
super(Starter, self).__init__()
self.url = url
self.setting = setting
self.crawler_mode = setting.get("crawler_mode") if setting.get("crawler_mode") else 0
assert (url or self.crawler_mode), "简单模式下,url和爬虫类型不能同时为false"
dispatch_thread_size = setting.get("dispatch_thread_size") if setting.get(
"dispatch_thread_size") is not None else 1
if isinstance(dispatch_thread_size, list):
self.dispatch_sub = dispatch_thread_size[1:]
dispatch_thread_size = dispatch_thread_size[0]
else:
self.dispatch_sub = [True, True, True]
self.dispatch_thread_size = dispatch_thread_size
self.downloader_thread_size = setting.get("downloader_thread_size") if setting.get(
"downloader_thread_size") is not None else 1
self.extractor_thread_size = setting.get("extractor_thread_size") if setting.get(
"extractor_thread_size") is not None else 1
self.storage_dup_thread_size = setting.get("storage_dup_thread_size") if setting.get(
"storage_dup_thread_size") is not None else 1
@classmethod
def get_instance(cls, url=None, **setting):
if not Starter.__instance:
cls.__instance = Starter(url=url, **setting)
return cls.__instance
def start(self):
"""
启动
:return:
"""
Logger.get_instance(**self.setting) # 日志类 创建
crawler = Dispatch(**self.setting)
try:
sys.path.append(self.setting.get("base_dir"))
except AttributeError:
pass
if not self.crawler_mode:
task_q.put(self.url)
self.install(crawler)
crawler.start()
if self.crawler_mode:
time.sleep(5)
self.monitor()
def install(self, crawler: Dispatch):
"""
安装各个组件
:param crawler:
:return:
"""
for i in range(self.dispatch_thread_size):
crawler.installed(Dispatch(**self.setting), crawler.dispatch)
for i in range(self.downloader_thread_size):
crawler.installed(Downloader(**self.setting), crawler.downloader)
for i in range(self.extractor_thread_size):
crawler.installed(Extractor(**self.setting), crawler.extractor)
for i in range(self.storage_dup_thread_size):
crawler.installed(BaseStorageDup(**self.setting), crawler.storage_dup)
def monitor(self):
"""
监控器
:return:
"""
Thread(target=Monitor.get_instance().thread_monitor,
args=(self.downloader_thread_size, self.extractor_thread_size, self.storage_dup_thread_size,
self.dispatch_thread_size, self.dispatch_sub), name="thread_monitor").start()
Thread(target=Monitor.get_instance().sys_monitor, name="sys-monitor").start()
|
udp_streaming.py | import base64
import pickle
import socket
import struct
import wave
import cv2, imutils, time
import queue as pyqueue
import os
import threading
import pyaudio
fullpath = "/home/yagorezende/VSCodeProjects/TCC00314-Streaming/streaming_side/videos/" # os.path.abspath(os.getcwd()).replace("testing", "videos/")
# fila de frames, depois trocar isso para uma fila por cliente
queue = pyqueue.Queue(maxsize=30)
video_name = "KimiNoNaWa_240p.mp4"
filename = fullpath + video_name
# pegando audio
command = f"ffmpeg -i '{filename}' -ab 160k -ac 2 -ar 44100 -vn temp.wav"
os.system(command)
# [ SERVER CONFIG ]
# Configs do servidor UDP
BUFF_SIZE = 65536
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, BUFF_SIZE)
host_name = socket.gethostname()
host_ip = 'localhost' # socket.gethostbyname(host_name)
print(host_ip)
port = 9699
socket_address = (host_ip, port)
server_socket.bind(socket_address)
print('Listening at:', socket_address)
# [ ENDOF SERVER CONFIG ]
# [ VIDEO METADATA ]
vid = cv2.VideoCapture(filename) # arquivo de video carregado
# pegando a taxa de frames do video
FPS = vid.get(cv2.CAP_PROP_FPS) # fps desejado
print("FPS:", FPS)
TS = 1 / FPS # tempo por frame, importante para a sync com audio
print("TS:", TS, "seconds")
BREAK = False # TODO: descobrir oq é isso
print('FPS:', FPS, TS)
totalNoFrames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT)) # quantidade de frames
durationInSeconds = float(totalNoFrames) / float(FPS) # tempo de um frame
d = vid.get(cv2.CAP_PROP_POS_MSEC) # TODO: descobrir oq é isso 2
print(durationInSeconds, d)
# [ EDOF VIDEO METADATA ]
# adicionando os frames lidos e processados na fila de envio
def video_stream_gen():
"""
This function will deposit the video frames to a queue
:return: None
"""
WIDTH = 420
while vid.isOpened():
_, frame = vid.read()
# frame = imutils.resize(frame, width=WIDTH)
queue.put(frame)
# Controle
# print("Queue size:", queue.qsize())
print('Player closed')
BREAK = True
vid.release()
def audio_stream():
s = socket.socket()
s.bind((host_ip, (port - 1)))
s.listen(5)
CHUNK = 1024 * 4
wf = wave.open("temp.wav", 'rb')
p = pyaudio.PyAudio()
print('server listening at', (host_ip, (port - 1)))
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
input=True,
frames_per_buffer=CHUNK)
client_socket, addr = s.accept()
while True:
if client_socket:
while True:
data = wf.readframes(CHUNK)
a = pickle.dumps(data)
message = struct.pack("Q", len(a)) + a
client_socket.sendall(message)
# variáveis para manter a taxa de frames atual/corrente
fps, st, frames_to_count, cnt = (0, 0, 20, 0)
# thread para gerar os frames
t1 = threading.Thread(target=video_stream_gen)
t1.start()
# thread para gerar os audios
t2 = threading.Thread(target=audio_stream)
t2.start()
while True:
msg, client_addr = server_socket.recvfrom(BUFF_SIZE)
print('GOT connection from ', client_addr)
WIDTH = 420
# display video on server side
while True:
frame = queue.get()
# envia pelo socket
encoded, buffer = cv2.imencode('.jpeg', frame, [cv2.IMWRITE_JPEG_QUALITY, 80])
message = base64.b64encode(buffer)
server_socket.sendto(message, client_addr)
frame = cv2.putText(frame, f"FPS: {round(fps, 1)}", (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# print(fps) # variação do fps
if cnt == frames_to_count:
try:
fps = (frames_to_count / (time.time() - st))
st = time.time() # guardando referencia para fazer o delta time
cnt = 0
# Pulo do gato, controle do frame rate
if fps > FPS:
TS += 0.001 # acrescentando um delay de 1 millisec
elif fps < FPS:
TS -= 0.001 # reduzindo o delay em 1 millisec
else:
pass
except:
pass
cnt += 1
cv2.imshow(video_name, frame)
# tempo que o frame fica na tela
# Pulo do gato, delay para apresentar no frame rate correto
key = cv2.waitKey(int(TS * 1000)) & 0xFF
if key == ord("q"):
TS = False
os._exit(1)
break
|
test_util.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
import random
import re
import tempfile
import threading
import numpy as np
import six
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import versions
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util.protobuf import compare
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError(
"Expected op for node %s is different. %s vs %s" % (
node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError(
"Not all expected ops are present. Expected %s, found %s" % (
expected_ops.keys(), actual_ops.keys()))
return actual_ops
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, attr_tensor_string_value)):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"):
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
ops.reset_default_graph()
ops.get_default_graph().seed = random_seed.DEFAULT_GRAPH_SEED
def tearDown(self):
for thread in self._threads:
self.assertFalse(thread.is_alive(), "A checkedThread did not terminate")
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
Across different test runs, this method will return a different folder.
This will ensure that across different runs tests will not be able to
pollute each others environment.
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
def _AssertProtoEquals(self, a, b):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True)
def assertProtoEquals(self, expected_message_maybe_ascii, message):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form
message: the message to validate
"""
if type(expected_message_maybe_ascii) == type(message):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(expected_message_maybe_ascii, expected_message)
self._AssertProtoEquals(expected_message, message)
else:
assert False, ("Can't compare protos of type %s and %s" %
(type(expected_message_maybe_ascii), type(message)))
def assertProtoEqualsVersion(
self, expected, actual, producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
This method behaves different than session.Session: for performance reasons
`test_session` will by default (if `graph` is None) reuse the same session
across tests. This means you may want to either call the function
`reset_default_graph()` before tests, or if creating an explicit new graph,
pass it here (simply setting it with `as_default()` won't do it), which will
trigger the creation of a new session.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/gpu:0`. Otherwise, if `use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/gpu:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(graph=None,
config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
with sess.graph.device("/gpu:0"):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
with sess.graph.device("/gpu:0"):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._thread.join()
if self._exception is not None:
self._testcase.fail(
"Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
self.assertTrue(math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (
f1, f2, err, " (%s)" % msg if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
"""
self.assertEqual(len(farray1), len(farray2))
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err))
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6):
"""Asserts that two numpy arrays have near values.
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
rtol: relative tolerance
atol: absolute tolerance
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(
a.shape, b.shape,
"Shape mismatch: expected %s, got %s." % (a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b), np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol)
def assertAllCloseAccordingToType(self, a, b, rtol=1e-6, atol=1e-6):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
rtol: relative tolerance
atol: absolute tolerance
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, 1e-3)
atol = max(atol, 1e-3)
self.assertAllClose(a, b, rtol=rtol, atol=atol)
def assertAllEqual(self, a, b):
"""Asserts that two numpy arrays have the same values.
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(
a.shape, b.shape,
"Shape mismatch: expected %s, got %s." % (a.shape, b.shape))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(np_array.shape, tf_tensor.get_shape().as_list())
def assertDeviceEqual(self, device1, device2):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2,
"Devices %s and %s are not equal" % (device1, device2))
# Fix Python 3 compatibility issues
if six.PY3:
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
|
params.py | #!/usr/bin/env python3
"""ROS has a parameter server, we have files.
The parameter store is a persistent key value store, implemented as a directory with a writer lock.
On Android, we store params under params_dir = /data/params. The writer lock is a file
"<params_dir>/.lock" taken using flock(), and data is stored in a directory symlinked to by
"<params_dir>/d".
Each key, value pair is stored as a file with named <key> with contents <value>, located in
<params_dir>/d/<key>
Readers of a single key can just open("<params_dir>/d/<key>") and read the file contents.
Readers who want a consistent snapshot of multiple keys should take the lock.
Writers should take the lock before modifying anything. Writers should also leave the DB in a
consistent state after a crash. The implementation below does this by copying all params to a temp
directory <params_dir>/<tmp>, then atomically symlinking <params_dir>/<d> to <params_dir>/<tmp>
before deleting the old <params_dir>/<d> directory.
Writers that only modify a single key can simply take the lock, then swap the corresponding value
file in place without messing with <params_dir>/d.
"""
import time
import os
import errno
import sys
import shutil
import fcntl
import tempfile
import threading
from enum import Enum
from common.basedir import PARAMS
def mkdirs_exists_ok(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
class TxType(Enum):
PERSISTENT = 1
CLEAR_ON_MANAGER_START = 2
CLEAR_ON_PANDA_DISCONNECT = 3
class UnknownKeyName(Exception):
pass
keys = {
"AccessToken": [TxType.PERSISTENT],
"AthenadPid": [TxType.PERSISTENT],
"CalibrationParams": [TxType.PERSISTENT],
"CarAvoidanceEnabled": [TxType.PERSISTENT],
"CarParams": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarParamsCache": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarVin": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CommunityFeaturesToggle": [TxType.PERSISTENT],
"CompletedTrainingVersion": [TxType.PERSISTENT],
"ControlsParams": [TxType.PERSISTENT],
"DoUninstall": [TxType.CLEAR_ON_MANAGER_START],
"DongleId": [TxType.PERSISTENT],
"GitBranch": [TxType.PERSISTENT],
"GitCommit": [TxType.PERSISTENT],
"GitRemote": [TxType.PERSISTENT],
"GithubSshKeys": [TxType.PERSISTENT],
"HasAcceptedTerms": [TxType.PERSISTENT],
"HasCompletedSetup": [TxType.PERSISTENT],
"IsLdwEnabled": [TxType.PERSISTENT],
"IsGeofenceEnabled": [TxType.PERSISTENT],
"IsMetric": [TxType.PERSISTENT],
"IsOffroad": [TxType.CLEAR_ON_MANAGER_START],
"IsRHD": [TxType.PERSISTENT],
"IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
"IsUpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"IsUploadRawEnabled": [TxType.PERSISTENT],
"LastUpdateTime": [TxType.PERSISTENT],
"LimitSetSpeed": [TxType.PERSISTENT],
"LimitSetSpeedNeural": [TxType.PERSISTENT],
"LiveParameters": [TxType.PERSISTENT],
"LongitudinalControl": [TxType.PERSISTENT],
"OpenpilotEnabledToggle": [TxType.PERSISTENT],
"LaneChangeEnabled": [TxType.PERSISTENT],
"PandaFirmware": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaFirmwareHex": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaDongleId": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Passive": [TxType.PERSISTENT],
"RecordFront": [TxType.PERSISTENT],
"ReleaseNotes": [TxType.PERSISTENT],
"ShouldDoUpdate": [TxType.CLEAR_ON_MANAGER_START],
"SpeedControlEnabled": [TxType.PERSISTENT],
"SpeedLimitOffset": [TxType.PERSISTENT],
"SubscriberInfo": [TxType.PERSISTENT],
"TermsVersion": [TxType.PERSISTENT],
"TrainingVersion": [TxType.PERSISTENT],
"UpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"UpdateFailedCount": [TxType.CLEAR_ON_MANAGER_START],
"Version": [TxType.PERSISTENT],
"Offroad_ChargeDisabled": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_ConnectivityNeeded": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_ConnectivityNeededPrompt": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_TemperatureTooHigh": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_PandaFirmwareMismatch": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_InvalidTime": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
}
def fsync_dir(path):
fd = os.open(path, os.O_RDONLY)
try:
os.fsync(fd)
finally:
os.close(fd)
class FileLock():
def __init__(self, path, create):
self._path = path
self._create = create
self._fd = None
def acquire(self):
self._fd = os.open(self._path, os.O_CREAT if self._create else 0)
fcntl.flock(self._fd, fcntl.LOCK_EX)
def release(self):
if self._fd is not None:
os.close(self._fd)
self._fd = None
class DBAccessor():
def __init__(self, path):
self._path = path
self._vals = None
def keys(self):
self._check_entered()
return self._vals.keys()
def get(self, key):
self._check_entered()
try:
return self._vals[key]
except KeyError:
return None
def _get_lock(self, create):
lock = FileLock(os.path.join(self._path, ".lock"), create)
lock.acquire()
return lock
def _read_values_locked(self):
"""Callers should hold a lock while calling this method."""
vals = {}
try:
data_path = self._data_path()
keys = os.listdir(data_path)
for key in keys:
with open(os.path.join(data_path, key), "rb") as f:
vals[key] = f.read()
except (OSError, IOError) as e:
# Either the DB hasn't been created yet, or somebody wrote a bug and left the DB in an
# inconsistent state. Either way, return empty.
if e.errno == errno.ENOENT:
return {}
return vals
def _data_path(self):
return os.path.join(self._path, "d")
def _check_entered(self):
if self._vals is None:
raise Exception("Must call __enter__ before using DB")
class DBReader(DBAccessor):
def __enter__(self):
try:
lock = self._get_lock(False)
except OSError as e:
# Do not create lock if it does not exist.
if e.errno == errno.ENOENT:
self._vals = {}
return self
try:
# Read everything.
self._vals = self._read_values_locked()
return self
finally:
lock.release()
def __exit__(self, type, value, traceback): pass
class DBWriter(DBAccessor):
def __init__(self, path):
super(DBWriter, self).__init__(path)
self._lock = None
self._prev_umask = None
def put(self, key, value):
self._vals[key] = value
def delete(self, key):
self._vals.pop(key, None)
def __enter__(self):
mkdirs_exists_ok(self._path)
# Make sure we can write and that permissions are correct.
self._prev_umask = os.umask(0)
try:
os.chmod(self._path, 0o777)
self._lock = self._get_lock(True)
self._vals = self._read_values_locked()
except:
os.umask(self._prev_umask)
self._prev_umask = None
raise
return self
def __exit__(self, type, value, traceback):
self._check_entered()
try:
# data_path refers to the externally used path to the params. It is a symlink.
# old_data_path is the path currently pointed to by data_path.
# tempdir_path is a path where the new params will go, which the new data path will point to.
# new_data_path is a temporary symlink that will atomically overwrite data_path.
#
# The current situation is:
# data_path -> old_data_path
# We're going to write params data to tempdir_path
# tempdir_path -> params data
# Then point new_data_path to tempdir_path
# new_data_path -> tempdir_path
# Then atomically overwrite data_path with new_data_path
# data_path -> tempdir_path
old_data_path = None
new_data_path = None
tempdir_path = tempfile.mkdtemp(prefix=".tmp", dir=self._path)
try:
# Write back all keys.
os.chmod(tempdir_path, 0o777)
for k, v in self._vals.items():
with open(os.path.join(tempdir_path, k), "wb") as f:
f.write(v)
f.flush()
os.fsync(f.fileno())
fsync_dir(tempdir_path)
data_path = self._data_path()
try:
old_data_path = os.path.join(self._path, os.readlink(data_path))
except (OSError, IOError):
# NOTE(mgraczyk): If other DB implementations have bugs, this could cause
# copies to be left behind, but we still want to overwrite.
pass
new_data_path = "{}.link".format(tempdir_path)
os.symlink(os.path.basename(tempdir_path), new_data_path)
os.rename(new_data_path, data_path)
fsync_dir(self._path)
finally:
# If the rename worked, we can delete the old data. Otherwise delete the new one.
success = new_data_path is not None and os.path.exists(data_path) and (
os.readlink(data_path) == os.path.basename(tempdir_path))
if success:
if old_data_path is not None:
shutil.rmtree(old_data_path)
else:
shutil.rmtree(tempdir_path)
# Regardless of what happened above, there should be no link at new_data_path.
if new_data_path is not None and os.path.islink(new_data_path):
os.remove(new_data_path)
finally:
os.umask(self._prev_umask)
self._prev_umask = None
# Always release the lock.
self._lock.release()
self._lock = None
def read_db(params_path, key):
path = "%s/d/%s" % (params_path, key)
try:
with open(path, "rb") as f:
return f.read()
except IOError:
return None
def write_db(params_path, key, value):
if isinstance(value, str):
value = value.encode('utf8')
prev_umask = os.umask(0)
lock = FileLock(params_path+"/.lock", True)
lock.acquire()
try:
tmp_path = tempfile.mktemp(prefix=".tmp", dir=params_path)
with open(tmp_path, "wb") as f:
f.write(value)
f.flush()
os.fsync(f.fileno())
path = "%s/d/%s" % (params_path, key)
os.rename(tmp_path, path)
fsync_dir(os.path.dirname(path))
finally:
os.umask(prev_umask)
lock.release()
class Params():
def __init__(self, db=PARAMS):
self.db = db
# create the database if it doesn't exist...
if not os.path.exists(self.db+"/d"):
with self.transaction(write=True):
pass
def clear_all(self):
shutil.rmtree(self.db, ignore_errors=True)
with self.transaction(write=True):
pass
def transaction(self, write=False):
if write:
return DBWriter(self.db)
else:
return DBReader(self.db)
def _clear_keys_with_type(self, tx_type):
with self.transaction(write=True) as txn:
for key in keys:
if tx_type in keys[key]:
txn.delete(key)
def manager_start(self):
self._clear_keys_with_type(TxType.CLEAR_ON_MANAGER_START)
def panda_disconnect(self):
self._clear_keys_with_type(TxType.CLEAR_ON_PANDA_DISCONNECT)
def delete(self, key):
with self.transaction(write=True) as txn:
txn.delete(key)
def get(self, key, block=False, encoding=None):
if key not in keys:
raise UnknownKeyName(key)
while 1:
ret = read_db(self.db, key)
if not block or ret is not None:
break
# is polling really the best we can do?
time.sleep(0.05)
if ret is not None and encoding is not None:
ret = ret.decode(encoding)
return ret
def put(self, key, dat):
"""
Warning: This function blocks until the param is written to disk!
In very rare cases this can take over a second, and your code will hang.
Use the put_nonblocking helper function in time sensitive code, but
in general try to avoid writing params as much as possible.
"""
if key not in keys:
raise UnknownKeyName(key)
write_db(self.db, key, dat)
def put_nonblocking(key, val):
def f(key, val):
params = Params()
params.put(key, val)
t = threading.Thread(target=f, args=(key, val))
t.start()
return t
if __name__ == "__main__":
params = Params()
if len(sys.argv) > 2:
params.put(sys.argv[1], sys.argv[2])
else:
for k in keys:
pp = params.get(k)
if pp is None:
print("%s is None" % k)
elif all(ord(c) < 128 and ord(c) >= 32 for c in pp):
print("%s = %s" % (k, pp))
else:
print("%s = %s" % (k, pp.encode("hex")))
# Test multiprocess:
# seq 0 100000 | xargs -P20 -I{} python common/params.py DongleId {} && sleep 0.05
# while python common/params.py DongleId; do sleep 0.05; done
|
traybar.py | import os
from .win32_adapter import *
import threading
import uuid
class SysTrayIcon(object):
"""
menu_options: tuple of tuples (menu text, menu icon path or None, function name)
menu text and tray hover text should be Unicode
hover_text length is limited to 128; longer text will be truncated
Can be used as context manager to enable automatic termination of tray
if parent thread is closed:
with SysTrayIcon(icon, hover_text) as systray:
for item in ['item1', 'item2', 'item3']:
systray.update(hover_text=item)
do_something(item)
"""
QUIT = 'QUIT'
SPECIAL_ACTIONS = [QUIT]
FIRST_ID = 1023
def __init__(self,
icon,
hover_text,
menu_options=None,
on_quit=None,
default_menu_index=None,
window_class_name=None):
self._icon = icon
self._icon_shared = False
self._hover_text = hover_text
self._on_quit = on_quit
menu_options = menu_options or []
menu_options = menu_options + [['Quit', None, SysTrayIcon.QUIT, None]]
self._next_action_id = SysTrayIcon.FIRST_ID
self._menu_actions_by_id = set()
self._menu_options = self._add_ids_to_menu_options(list(menu_options))
self._menu_actions_by_id = dict(self._menu_actions_by_id)
window_class_name = window_class_name or ("SysTrayIconPy-%s" % (str(uuid.uuid4())))
self._default_menu_index = (default_menu_index or 0)
self._window_class_name = encode_for_locale(window_class_name)
self._message_dict = {RegisterWindowMessage("TaskbarCreated"): self._restart,
WM_DESTROY: self._destroy,
WM_CLOSE: self._destroy,
WM_COMMAND: self._command,
WM_USER+20: self._notify}
self._notify_id = None
self._message_loop_thread = None
self._hwnd = None
self._hicon = 0
self._hinst = None
self._window_class = None
self._menu = None
self._register_class()
def __enter__(self):
"""Context manager so SysTray can automatically close"""
self.start()
return self
def __exit__(self, *args):
"""Context manager so SysTray can automatically close"""
self.shutdown()
def WndProc(self, hwnd, msg, wparam, lparam):
hwnd = HANDLE(hwnd)
wparam = WPARAM(wparam)
lparam = LPARAM(lparam)
if msg in self._message_dict:
self._message_dict[msg](hwnd, msg, wparam.value, lparam.value)
return DefWindowProc(hwnd, msg, wparam, lparam)
def _register_class(self):
# Register the Window class.
self._window_class = WNDCLASS()
self._hinst = self._window_class.hInstance = GetModuleHandle(None)
self._window_class.lpszClassName = self._window_class_name
self._window_class.style = CS_VREDRAW | CS_HREDRAW
self._window_class.hCursor = LoadCursor(0, IDC_ARROW)
self._window_class.hbrBackground = COLOR_WINDOW
self._window_class.lpfnWndProc = LPFN_WNDPROC(self.WndProc)
RegisterClass(ctypes.byref(self._window_class))
def _create_window(self):
style = WS_OVERLAPPED | WS_SYSMENU
self._hwnd = CreateWindowEx(0, self._window_class_name,
self._window_class_name,
style,
0,
0,
CW_USEDEFAULT,
CW_USEDEFAULT,
0,
0,
self._hinst,
None)
UpdateWindow(self._hwnd)
self._refresh_icon()
def _message_loop_func(self):
self._create_window()
PumpMessages()
def start(self):
if self._hwnd:
return # already started
self._message_loop_thread = threading.Thread(target=self._message_loop_func)
self._message_loop_thread.start()
def shutdown(self):
if not self._hwnd:
return # not started
PostMessage(self._hwnd, WM_CLOSE, 0, 0)
self._message_loop_thread.join()
def update(self, icon=None, hover_text=None, menu_options=None):
""" update icon image and/or hover text and/or menu options"""
if icon:
self._icon = icon
self._load_icon()
if hover_text:
self._hover_text = hover_text
# "if menu_options" added to be allow the update of the menu options
if menu_options:
menu_options = menu_options + [['Quit', None, SysTrayIcon.QUIT, None]]
self._next_action_id = SysTrayIcon.FIRST_ID
self._menu_actions_by_id = set()
self._menu_options = self._add_ids_to_menu_options(list(menu_options))
self._menu_actions_by_id = dict(self._menu_actions_by_id)
self._menu = None # detroy the old menu created by right clicking the icon
self._refresh_icon()
def _add_ids_to_menu_options(self, menu_options):
result = []
for menu_option in menu_options:
option_text, option_icon, option_action, option_state = menu_option
if callable(option_action) or option_action in SysTrayIcon.SPECIAL_ACTIONS:
self._menu_actions_by_id.add((self._next_action_id, option_action))
result.append(menu_option + [self._next_action_id])
elif option_action == 'separator':
result.append((option_text,
option_icon,
option_action,
option_state,
self._next_action_id))
elif non_string_iterable(option_action):
result.append((option_text,
option_icon,
option_state,
self._add_ids_to_menu_options(option_action),
self._next_action_id))
else:
raise Exception('Unknown item', option_text, option_icon, option_action)
self._next_action_id += 1
return result
def _load_icon(self):
# release previous icon, if a custom one was loaded
# note: it's important *not* to release the icon if we loaded the default system icon (with
# the LoadIcon function) - this is why we assign self._hicon only if it was loaded using LoadImage
if not self._icon_shared and self._hicon != 0:
DestroyIcon(self._hicon)
self._hicon = 0
# Try and find a custom icon
hicon = 0
if self._icon is not None and os.path.isfile(self._icon):
ico_x = GetSystemMetrics(SM_CXSMICON)
ico_y = GetSystemMetrics(SM_CYSMICON)
icon = encode_for_locale(self._icon)
hicon = self._hicon = LoadImage(0, icon, IMAGE_ICON, ico_x, ico_y, LR_LOADFROMFILE)
self._icon_shared = False
# Can't find icon file - using default shared icon
if hicon == 0:
self._hicon = LoadIcon(0, IDI_APPLICATION)
self._icon_shared = True
self._icon = None
def _refresh_icon(self):
if self._hwnd is None:
return
if self._hicon == 0:
self._load_icon()
if self._notify_id:
message = NIM_MODIFY
else:
message = NIM_ADD
self._notify_id = NotifyData(self._hwnd,
0,
NIF_ICON | NIF_MESSAGE | NIF_TIP,
WM_USER+20,
self._hicon,
self._hover_text)
Shell_NotifyIcon(message, ctypes.byref(self._notify_id))
def _restart(self, hwnd, msg, wparam, lparam):
self._refresh_icon()
def _destroy(self, hwnd, msg, wparam, lparam):
if self._on_quit:
self._on_quit(self)
nid = NotifyData(self._hwnd, 0)
Shell_NotifyIcon(NIM_DELETE, ctypes.byref(nid))
PostQuitMessage(0) # Terminate the app.
# TODO * release self._menu with DestroyMenu and reset the memeber
# * release self._hicon with DestoryIcon and reset the member
# * release loaded menu icons (loaded in _load_menu_icon) with DeleteObject
# (we don't keep those objects anywhere now)
self._hwnd = None
self._notify_id = None
def _notify(self, hwnd, msg, wparam, lparam):
if lparam == WM_LBUTTONDBLCLK:
self._execute_menu_option(self._default_menu_index + SysTrayIcon.FIRST_ID)
elif lparam == WM_RBUTTONUP:
self._show_menu()
elif lparam == WM_LBUTTONUP:
pass
return True
def _show_menu(self):
if self._menu is None:
self._menu = CreatePopupMenu()
self._create_menu(self._menu, self._menu_options)
#SetMenuDefaultItem(self._menu, 1000, 0)
pos = POINT()
GetCursorPos(ctypes.byref(pos))
# See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/menus_0hdi.asp
SetForegroundWindow(self._hwnd)
TrackPopupMenu(self._menu,
TPM_LEFTALIGN,
pos.x,
pos.y,
0,
self._hwnd,
None)
PostMessage(self._hwnd, WM_NULL, 0, 0)
def _create_menu(self, menu, menu_options):
for option_text, option_icon, option_action, option_state, option_id in menu_options[::-1]:
if option_icon:
option_icon = self._prep_menu_icon(option_icon)
mi_fstate = 0
mi_ftype = 0
if option_state == 'default':
mi_fstate = mi_fstate | MFS_DEFAULT
if option_state == 'highlight':
mi_fstate = mi_fstate | MFS_HILITE
if option_state == 'disabled':
mi_fstate = mi_fstate | MFS_DISABLED
if option_action == 'separator':
mi_ftype = mi_ftype | MFT_SEPARATOR
if isinstance(option_action, tuple):
submenu = CreatePopupMenu()
self._create_menu(submenu, option_action)
item = PackMENUITEMINFO(text=option_text,
hbmpItem=option_icon,
hSubMenu=submenu)
InsertMenuItem(menu, 0, 1, ctypes.byref(item))
else:
item = PackMENUITEMINFO(text=option_text,
hbmpItem=option_icon,
wID=option_id,
fState=mi_fstate,
fType=mi_ftype)
InsertMenuItem(menu, 0, 1, ctypes.byref(item))
def _prep_menu_icon(self, icon):
icon = encode_for_locale(icon)
# First load the icon.
ico_x = GetSystemMetrics(SM_CXSMICON)
ico_y = GetSystemMetrics(SM_CYSMICON)
hicon = LoadImage(0, icon, IMAGE_ICON, ico_x, ico_y, LR_LOADFROMFILE)
hdcBitmap = CreateCompatibleDC(None)
hdcScreen = GetDC(None)
hbm = CreateCompatibleBitmap(hdcScreen, ico_x, ico_y)
hbmOld = SelectObject(hdcBitmap, hbm)
# Fill the background.
brush = GetSysColorBrush(COLOR_MENU)
FillRect(hdcBitmap, ctypes.byref(RECT(0, 0, 16, 16)), brush)
# draw the icon
DrawIconEx(hdcBitmap, 0, 0, hicon, ico_x, ico_y, 0, 0, DI_NORMAL)
SelectObject(hdcBitmap, hbmOld)
# No need to free the brush
DeleteDC(hdcBitmap)
DestroyIcon(hicon)
return hbm
def _command(self, hwnd, msg, wparam, lparam):
id = LOWORD(wparam)
self._execute_menu_option(id)
def _execute_menu_option(self, id):
menu_action = self._menu_actions_by_id[id]
if menu_action == SysTrayIcon.QUIT:
DestroyWindow(self._hwnd)
else:
menu_action(self)
def non_string_iterable(obj):
try:
iter(obj)
except TypeError:
return False
else:
return not isinstance(obj, str)
|
dataset-stat.py | """Analyze a text file"""
from tqdm import *
import nltk, sys
import numpy as np
from multiprocessing import Process, Manager, cpu_count
import itertools
from nltk.tokenize import ToktokTokenizer
toktok = ToktokTokenizer().tokenize
def get_stat(in_queue, out_list):
while True:
line_no, line = in_queue.get()
if line is None:
return
line = line.strip()
if args.sentence_separator is not None:
sents = line.split(args.sentence_separator)
else:
sents = nltk.sent_tokenize(line)
sent_count = len(sents)
token_count = len(line.split(" "))
char_count = len(line)
char_per_word_count = np.array([len(word) for word in line.split(" ")])
sent_token_count = [len(toktok(s)) for s in sents]
out_list.append((
sent_count, token_count, sent_token_count, char_count, char_per_word_count))
import argparse
parser = argparse.ArgumentParser(description="Analyze an input text file, computing statistics.")
parser.add_argument('-target_file', help='The text file to process', required=True)
parser.add_argument('-limit', help='Limit processing to this number of lines.', default=None, type=int)
parser.add_argument('-simple', help='', default=True, type=bool)
parser.add_argument('-sentence_separator', help='The sentence separator to use. By default, use NLTK.', default=None)
args = parser.parse_args()
if __name__ == '__main__':
manager = Manager()
results = manager.list()
# Use half of available CPUs
num_workers = int(cpu_count()/2)
work = manager.Queue(num_workers)
if args.sentence_separator is not None:
args.sentence_separator = str(args.sentence_separator)
pool = []
for i in range(num_workers):
p = Process(target=get_stat, args=(work, results))
p.start()
pool.append(p)
print("Computing stat on {}".format(args.target_file))
with tqdm(desc="Counting total sentences") as pbar:
with open(args.target_file) as f:
if args.limit is not None:
file_iter = itertools.islice(f, args.limit)
else:
file_iter = iter(f)
iters = itertools.chain(file_iter, (None,) * num_workers)
for id_pair in enumerate(iters):
work.put(id_pair)
pbar.update()
for p in pool:
p.join()
sent_counts = [r[0] for r in results]
unique, counts = np.unique(sent_counts, return_counts=True)
sentences_per_line = dict(zip(unique, counts))
tok_counts = [r[1] for r in results]
sent_tok_counts = []
for r in results:
sent_tok_counts += r[2]
char_counts = [r[3] for r in results]
# char_per_word_counts = np.array([r[5] for r in results]).flatten()
char_per_word_counts = [r[4] for r in results]
char_per_word_counts = [c for s in char_per_word_counts for c in s]
total_lines = len(results)
if args.simple:
print("Sentences per doc: {} ({})".format(np.round(np.mean(sent_counts), 3), np.round(np.std(sent_counts), 3)))
print("Mean token count per sentence: {} ({})".format(np.round(np.mean(sent_tok_counts), 2),
np.round(np.std(sent_tok_counts), 2)))
print("Mean tokens per doc: {} ({})".format(np.round(np.mean(tok_counts),2), np.round(np.std(tok_counts), 2)))
print("{} ({})\t{} ({})\t{} ({})\t".format(
np.round(np.mean(sent_counts), 3), np.round(np.std(sent_counts), 3),
np.round(np.mean(sent_tok_counts), 2), np.round(np.std(sent_tok_counts), 2),
np.round(np.mean(tok_counts), 2), np.round(np.std(tok_counts), 2)
))
else:
print("-"*30)
print("Total lines: {}".format(total_lines))
print("-"*30)
print("Total sentences in {}: {} ".format(args.target_file, np.sum(sent_counts)))
print("Sentences per doc: {} ({})".format(np.round(np.mean(sent_counts), 3), np.round(np.std(sent_counts), 3)))
print("Sentence counts: {}".format(sentences_per_line))
# count_1 = sentences_per_line[1]
# del sentences_per_line[1]
# count_multi = sum(sentences_per_line.values())
# print("Multi >1 {}%".format(np.round((count_multi/total_lines) * 100, 3)))
# del sentences_per_line[2]
# count_multi = sum(sentences_per_line.values())
# print("Multi >2 {}%".format(np.round((count_multi / total_lines) * 100, 3)))
print("-"*30)
print("Total tokens in {}: {} ".format(args.target_file, np.sum(tok_counts)))
print("Mean tokens per doc: {} ({})".format(np.round(np.mean(tok_counts),2), np.round(np.std(tok_counts), 2)))
print("-"*30)
print("Mean characters per doc: {} ({})".format(np.mean(char_counts), np.std(char_counts)))
print("Mean characters per word: {} ({})".format(np.mean(char_per_word_counts), np.std(char_per_word_counts)))
print("-"*30)
print("Mean token count per sentence: {} ({})".format(np.round(np.mean(sent_tok_counts), 2),
np.round(np.std(sent_tok_counts), 2)))
print("-"*30) |
lights.py | from matrix_lite import led
from time import sleep
from math import pi, sin
import threading
def lightsOff():
everloop = ['black'] * led.length
led.set(everloop)
def flashLights(color, times, intensity):
everloop = ['black'] * led.length
led.set(everloop)
for i in range(0,times):
for j in range(len(everloop)):
everloop[j] = {color:intensity}
led.set(everloop)
sleep(0.2)
everloop = ['black'] * led.length
led.set(everloop)
sleep(0.2)
#l = loopFlicker()
#t = threading.Thread(target = l.flicker, args=('b',1,35,30))
#t.start()
#l.isRunning = False
class loopFlicker(object):
def __init__(self):
self.isRunning = True
def flicker(self, color, minHue, maxHue, speed):
everloop = ['black'] * led.length
adjustedColor = 0
asc = True
while self.isRunning:
if asc and adjustedColor != maxHue:
adjustedColor += 1
elif asc and adjustedColor == maxHue:
adjustedColor -=1
asc = not asc
elif not asc and adjustedColor != minHue:
adjustedColor -= 1
elif not asc and adjustedColor == minHue:
adjustedColor +=1
asc = not asc
for i in range(len(everloop)):
everloop[i] = {color:adjustedColor}
led.set(everloop)
sleep(1/speed)
|
local.py | import threading
from random import randint
local = threading.local()
def run(local, barrier):
local.my_value = randint(0, 10**2)
t = threading.current_thread()
print(f'Thread {t.name} has value {local.my_value}')
barrier.wait()
print(f'Thread {t.name} still has value {local.my_value}')
count = 3
barrier = threading.Barrier(count)
threads = [
threading.Thread(
target=run, name=f'T{name}', args=(local, barrier)
) for name in range(count)
]
for t in threads:
t.start()
"""
$ python local.py
Thread T0 has value 61
Thread T1 has value 52
Thread T2 has value 38
Thread T2 still has value 38
Thread T0 still has value 61
Thread T1 still has value 52
"""
|
tutorial_remotesensing.py | """
mss.tutorials.tutorial_remotesensing
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This python script generates an automatic demonstration of how to work with remote sensing tool in topview.
This file is part of mss.
:copyright: Copyright 2021 Hrithik Kumar Verma
:copyright: Copyright 2021-2022 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pyautogui as pag
import multiprocessing
import sys
from sys import platform
from pyscreeze import ImageNotFoundException
from tutorials import screenrecorder as sr
from mslib.msui import mss_pyui
def initial_ops():
"""
Executes the initial operations such as closing all opened windows and showing the desktop.
"""
pag.sleep(5)
if platform == "linux" or platform == "linux2":
pag.hotkey('winleft', 'd')
print("\n INFO : Automation is running on Linux system..\n")
elif platform == "darwin":
pag.hotkey('option', 'command', 'm')
print("\n INFO : Automation is running on Mac OS..\n")
elif platform == "win32":
pag.hotkey('win', 'd')
print("\n INFO : Automation is running on Windows OS..\n")
else:
pag.alert(text="Sorry, no support on this platform!", title="Platform Exception", button='OK')
def call_recorder():
"""
Calls the screen recorder class to start the recording of the automation.
"""
rec = sr.ScreenRecorder(80, 20, int(pag.size()[0] / 1.5), int(pag.size()[1]))
rec.capture()
rec.stop_capture()
def call_mss():
"""
Calls the main MSS GUI window since operations are to be performed on it only.
"""
mss_pyui.main()
def automate_rs():
"""
This is the main automating script of the MSS remote sensing tutorial which will be recorded and saved
to a file having dateframe nomenclature with a .mp4 extension(codec).
"""
# Giving time for loading of the MSS GUI.
pag.sleep(10)
# Platform specific things
if platform == 'linux' or platform == 'linux2':
enter = 'enter'
wms_path = 'pictures/tutorial_wms/linux/'
rs_path = 'pictures/remote_sensing/linux/'
win = 'winleft'
ctrl = 'ctrl'
elif platform == 'win32':
enter = 'enter'
wms_path = 'pictures/tutorial_wms/win32/'
rs_path = 'pictures/remote_sensing/win32/'
win = 'win'
ctrl = 'ctrl'
elif platform == 'darwin':
enter = 'return'
wms_path = 'pictures/tutorial_wms/linux/'
ctrl = 'command'
# Maximizing the window
try:
pag.hotkey('ctrl', 'command', 'f') if platform == 'darwin' else pag.hotkey(win, 'up')
except Exception:
print("\nException : Enable Shortcuts for your system or try again!")
pag.sleep(2)
pag.hotkey('ctrl', 'h')
pag.sleep(3)
# Opening Remote Sensing dockwidget
try:
x, y = pag.locateCenterOnScreen(f'{wms_path}selecttoopencontrol.png')
pag.click(x, y, interval=2)
pag.sleep(1)
pag.press('down', presses=3, interval=1)
pag.sleep(1)
pag.press(enter)
pag.sleep(2)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'select to open control\' button/option not found on the screen.")
# Adding waypoints for demonstrating remote sensing
try:
x, y = pag.locateCenterOnScreen('pictures/add_waypoint.PNG')
pag.click(x, y, interval=2)
pag.move(-50, 150, duration=1)
pag.click(interval=2)
pag.sleep(1)
pag.move(65, 65, duration=1)
pag.click(interval=2)
pag.sleep(1)
pag.move(-150, 30, duration=1)
pag.click(interval=2)
pag.sleep(1)
pag.move(200, 150, duration=1)
pag.click(interval=2)
pag.sleep(2)
except (ImageNotFoundException, OSError, Exception):
print("\nException : Add waypoint button in topview not found on the screen.")
# Showing Solar Angle Colors
try:
x, y = pag.locateCenterOnScreen(f'{rs_path}showangle.png')
pag.sleep(1)
pag.click(x, y, duration=2)
pag.sleep(1)
for _ in range(2):
pag.click(x + 100, y, duration=1)
pag.press('down', interval=1)
pag.sleep(1)
pag.press(enter, interval=1)
pag.sleep(2)
for _ in range(3):
pag.click(x + 200, y, duration=1)
pag.press('down', interval=1)
pag.sleep(1)
pag.press(enter, interval=1)
pag.sleep(2)
pag.click(x + 200, y, duration=1)
pag.press('up', presses=3, interval=1)
pag.sleep(1)
pag.press(enter, interval=1)
pag.sleep(2)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Show angle\' checkbox not found on the screen.")
# Changing azimuth angles
try:
x, y = pag.locateCenterOnScreen(f'{rs_path}azimuth.png')
pag.click(x + 70, y, duration=1)
azimuth_x, azimuth_y = pag.position()
pag.sleep(2)
pag.hotkey(ctrl, 'a')
pag.sleep(2)
pag.typewrite('45', interval=1)
pag.press(enter)
pag.sleep(3)
pag.click(duration=1)
pag.hotkey(ctrl, 'a')
pag.typewrite('90', interval=1)
pag.press(enter)
pag.sleep(3)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Azimuth\' spinbox not found on the screen.")
# Changing elevation angles
try:
x, y = pag.locateCenterOnScreen(f'{rs_path}elevation.png')
pag.click(x + 70, y, duration=1)
pag.sleep(2)
pag.hotkey(ctrl, 'a')
pag.sleep(2)
pag.typewrite('-1', interval=1)
pag.press(enter)
pag.sleep(3)
pag.click(duration=1)
pag.hotkey(ctrl, 'a')
pag.typewrite('-3', interval=1)
pag.press(enter)
pag.sleep(3)
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Elevation\' spinbox not found on the screen.")
# Drawing tangents to the waypoints and path
try:
x, y = pag.locateCenterOnScreen(f'{rs_path}drawtangent.png')
pag.click(x, y, duration=1)
pag.sleep(2)
# Changing color of tangents
pag.click(x + 160, y, duration=1)
pag.sleep(1)
pag.press(enter)
pag.sleep(1)
# Changing Kilometers of the tangent distance
pag.click(x + 250, y, duration=1)
pag.sleep(1)
pag.hotkey(ctrl, 'a')
pag.sleep(1)
pag.typewrite('20', interval=1)
pag.press(enter)
pag.sleep(3)
# Zooming into the map
try:
x, y = pag.locateCenterOnScreen('pictures/zoom.PNG')
pag.click(x, y, interval=2)
pag.move(None, 150, duration=1)
pag.dragRel(230, 150, duration=2)
pag.sleep(5)
except ImageNotFoundException:
print("\n Exception : Zoom button could not be located on the screen")
# Rotating the tangent through various angles
try:
pag.click(azimuth_x, azimuth_y, duration=1)
pag.sleep(1)
pag.hotkey(ctrl, 'a')
pag.sleep(1)
pag.typewrite('120', interval=0.5)
pag.sleep(2)
for _ in range(10):
pag.press('down')
pag.sleep(2)
pag.sleep(1)
pag.click(azimuth_x + 500, y, duration=1)
pag.sleep(1)
except UnboundLocalError:
print('Azimuth spinbox coordinates are not stored. Hence cannot change values.')
except (ImageNotFoundException, OSError, Exception):
print("\nException :\'Tangent\' checkbox not found on the screen.")
print("\nAutomation is over for this tutorial. Watch next tutorial for other functions.")
# Close Everything!
try:
if platform == 'linux' or platform == 'linux2':
for _ in range(2):
pag.hotkey('altleft', 'f4')
pag.sleep(3)
pag.press('left')
pag.sleep(3)
pag.press('enter')
pag.sleep(2)
pag.keyDown('altleft')
pag.press('tab')
pag.press('left')
pag.keyUp('altleft')
pag.press('q')
if platform == 'win32':
for _ in range(2):
pag.hotkey('alt', 'f4')
pag.sleep(3)
pag.press('left')
pag.sleep(3)
pag.press('enter')
pag.sleep(2)
pag.hotkey('alt', 'tab')
pag.press('q')
elif platform == 'darwin':
for _ in range(2):
pag.hotkey('command', 'w')
pag.sleep(3)
pag.press('left')
pag.sleep(3)
pag.press('return')
pag.sleep(2)
pag.hotkey('command', 'tab')
pag.press('q')
except Exception:
print("Cannot automate : Enable Shortcuts for your system or try again")
def main():
"""
This function runs the above functions as different processes at the same time and can be
controlled from here. (This is the main process.)
"""
p1 = multiprocessing.Process(target=call_mss)
p2 = multiprocessing.Process(target=automate_rs)
p3 = multiprocessing.Process(target=call_recorder)
print("\nINFO : Starting Automation.....\n")
p3.start()
pag.sleep(3)
initial_ops()
p1.start()
p2.start()
p2.join()
p1.join()
p3.join()
print("\n\nINFO : Automation Completes Successfully!")
sys.exit()
if __name__ == '__main__':
main()
|
client_socket.py | # TODO documentation
from __future__ import print_function
import sys
import socket
import threading
import select
from fprime_gds.common.handlers import DataHandler
from fprime.constants import DATA_ENCODING
# Constants for public use
GUI_TAG = "GUI"
FSW_TAG = "FSW"
class ThreadedTCPSocketClient(DataHandler):
'''
Threaded TCP client that connects to the socket server that serves packets from the flight
software
'''
def __init__(self, sock=None, dest=FSW_TAG):
"""
Threaded client socket constructor
Keyword Arguments:
sock {Socket} -- A socket for the client to use. Created own if
None (default: {None})
"""
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
# NOTE can't do this b/c EINPROGRESS: self.sock.setblocking(0)
self.dest = dest
self.__distributors = []
self.__select_timeout = 1
self.__data_recv_thread = threading.Thread(target=self.recv)
self.stop_event = threading.Event()
def get_data_bytes(self, string_data):
'''
Convert the data bytes from string to bytes
:param string_data: data in string format
:return: data in bytes format
'''
if sys.version_info >= (3, 0):
return string_data.encode(DATA_ENCODING)
return string_data
def get_data_string(self, bytes_data):
'''
Convert the data bytes from string to bytes
:param bytes_data: data in bytes format
:return: data in string format
'''
if sys.version_info >= (3, 0):
return bytes_data.decode(DATA_ENCODING)
return bytes_data
def register_distributor(self, distributor):
"""Registers a fprime.gds.distributor object with this socket
Arguments:
fprime.gds.distributor {Distributor} -- Distributor must implement data_callback
"""
self.__distributors.append(distributor)
def register_to_server(self, register_as):
'''
Registers the caller to the server as type register_as
This function assumes the socket connects to an fprime TCP server
Args:
register_as (string): How to identify this process to the TCP server
Can be either "FSW" or "GUI"
'''
data = "Register %s\n"%register_as
self.sock.send(self.get_data_bytes(data))
def connect(self, host, port):
"""Connect to host at given port and start the threaded recv method.
Arguments:
host {string} -- IP of the host server
port {int} -- Port of the host server
"""
try:
self.sock.connect((host, port))
self.__data_recv_thread.start()
except:
print("There was a problem connecting to the TCP Server")
exit(-1)
def disconnect(self):
"""Disconnect the socket client from the server and stop the internal thread.
"""
self.stop_event.set()
self.__data_recv_thread.join()
self.sock.close()
def data_callback(self, data, sender=None):
"""
Handles incoming data by sending it to a socket.
:param data: data to send to the client socket
:param sender: sender source of the data
"""
self.send(data, self.dest)
def send(self, data, dest):
"""
Send data to the server
All necessary headers are added in this function.
Arguments:
data {binary} -- The data to send (What you want the destination
to receive)
dest {String} -- Where to send the data to. Either "FSW" or "GUI"
"""
self.sock.send(b"A5A5 %s %s" % (self.get_data_bytes(dest), data))
def recv(self):
"""
Method run constantly by the enclosing thread. Looks for data from the server.
"""
while not self.stop_event.is_set():
ready = select.select([self.sock], [], [], self.__select_timeout)
if ready[0]:
chunk = self.sock.recv(1024)
for d in self.__distributors:
d.on_recv(chunk)
|
preview_utils.py | import os
import re
import threading
import time
import traceback
import sublime
if sublime.platform() == 'windows':
import winreg
import ctypes
from ctypes import wintypes
# wrapper for GetSystemDirectoryW
def get_system_root():
buffer = ctypes.create_unicode_buffer(wintypes.MAX_PATH + 1)
ctypes.windll.kernel32.GetSystemDirectoryW(buffer, len(buffer))
return buffer.value
from ..latextools_utils import cache, get_setting
from ..latextools_utils.distro_utils import using_miktex
from ..latextools_utils.external_command import (
get_texpath, execute_command, check_output, __sentinel__
)
from ..latextools_utils.system import which
_lt_settings = {}
def _get_convert_command():
if hasattr(_get_convert_command, "result"):
return _get_convert_command.result
texpath = get_texpath() or os.environ['PATH']
_get_convert_command.result = (
which('magick', path=texpath) or
which('convert', path=texpath)
)
# DO NOT RUN THE CONVERT COMMAND IN THE SYSTEM ROOT ON WINDOWS
if (sublime.platform() == 'windows' and
_get_convert_command.result.lower().endswith('convert.exe')):
system_root = get_system_root().lower()
if _get_convert_command.result.lower().startswith(system_root):
_get_convert_command.result = None
return _get_convert_command.result
def convert_installed():
"""Return whether ImageMagick/convert is available in the PATH."""
return _get_convert_command() is not None
def run_convert_command(args):
"""Executes ImageMagick convert or magick command as appropriate with the
given args"""
if not isinstance(args, list):
raise TypeError('args must be a list')
convert_command = _get_convert_command()
if os.path.splitext(os.path.basename(convert_command))[0] == 'magick':
args.insert(0, convert_command)
args.insert(1, 'convert')
else:
args.insert(0, convert_command)
execute_command(args, shell=sublime.platform() == 'windows')
_GS_COMMAND = None
_GS_VERSION_LOCK = threading.Lock()
_GS_VERSION = None
_GS_VERSION_REGEX = re.compile(r'Ghostscript (?P<major>\d+)\.(?P<minor>\d{2})')
def _get_gs_command():
global _GS_COMMAND
if _GS_COMMAND is not None:
return _GS_COMMAND
_GS_COMMAND = __get_gs_command()
# load the GS version on a background thread
t = threading.Thread(target=_update_gs_version)
t.daemon = True
t.start()
return _GS_COMMAND
def _update_gs_version():
global _GS_VERSION
with _GS_VERSION_LOCK:
if _GS_VERSION is not None:
return
try:
raw_version = check_output([_GS_COMMAND, '-version'])
m = _GS_VERSION_REGEX.search(raw_version)
if m:
_GS_VERSION = tuple(int(x) for x in m.groups())
except Exception:
print('Error finding Ghostscript version for {0}'.format(
_GS_COMMAND))
traceback.print_exc()
# broken out to be called from system_check
def __get_gs_command():
texpath = get_texpath() or os.environ['PATH']
if sublime.platform() == 'windows':
# use Ghostscript from the texpath if possible
result = (
which('gswin32c', path=texpath) or
which('gswin64c', path=texpath) or
which('mgs', path=texpath) or
which('gs', path=texpath)
)
if result is None and not using_miktex():
result = _get_tl_gs_path(texpath)
# try to find Ghostscript from the registry
if result is None:
result = _get_gs_exe_from_registry()
else:
result = which('gs', path=texpath)
return result
def _get_tl_gs_path(texpath):
"""Tries to find the gs installed by TeXLive"""
pdflatex = which('pdflatex', path=texpath)
if pdflatex is None:
return None
# assumed structure
# texlive/<year>/
tlgs_path = os.path.normpath(os.path.join(
os.path.dirname(pdflatex),
'..', '..', 'tlpkg', 'tlgs', 'bin'
))
return (
which('gswin32c', path=tlgs_path) or
which('gswin64c', path=tlgs_path)
)
def _get_gs_exe_from_registry():
result = None
hndl = None
product_family = None
major_version = -1
minor_version = -1
# find the most recent version of Ghostscript installed
for product in ["GPL Ghostscript", "AFPL Ghostscript"]:
try:
hndl = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
'SOFTWARE\\{0}'.format(product)
)
try:
for i in range(winreg.QueryInfoKey(hndl)[0]):
version = winreg.EnumKey(hndl, i)
try:
major, minor = map(int, version.split('.'))
if (
major > major_version or
(
major == major_version and
minor > minor_version
)
):
major_version = major
minor_version = minor
product_family = product
except ValueError:
continue
finally:
winreg.CloseKey(hndl)
except OSError:
continue
if product_family is not None:
try:
hndl = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
'SOFTWARE\\{product_family}\\{major_version}.'
'{minor_version:02}'.format(**locals())
)
try:
gs_path = os.path.dirname(
winreg.QueryValue(hndl, 'GS_DLL')
)
result = (
which('gswin32c', path=gs_path) or
which('gswin64c', path=gs_path)
)
finally:
winreg.CloseKey(hndl)
except OSError:
print(
'Could not find GS_DLL value for '
'{product_family}'.format(**locals())
)
return result
def ghostscript_installed():
return _get_gs_command() is not None
def get_ghostscript_version():
global _GS_VERSION
with _GS_VERSION_LOCK:
if _GS_VERSION is None:
_update_gs_version()
return _GS_VERSION if _GS_VERSION is not None else (-1, -1)
def run_ghostscript_command(args, stdout=__sentinel__, stderr=__sentinel__):
"""Executes a Ghostscript command with the given args"""
if not isinstance(args, list):
raise TypeError('args must be a list')
# add some default args to run in quiet batch mode
args.insert(0, _get_gs_command())
args.insert(1, '-q')
args.insert(1, '-dQUIET')
args.insert(1, '-dNOPROMPT')
args.insert(1, '-dNOPAUSE')
args.insert(1, '-dBATCH')
args.insert(1, '-dSAFER')
return execute_command(
args, shell=sublime.platform() == 'windows',
stdout=stdout, stderr=stderr
)
class SettingsListener(object):
"""
Required class attributes:
- view: the view to listen to
- attr_updates: these attributes are listened on the view and the
lt settings
- lt_attr_updates: these attributes are listened on the lt settings
"""
def _init_list_add_on_change(self, key, view_attr, lt_attr):
view = self.view
# this can end up being called *before* plugin_loaded() because
# ST creates the ViewEventListeners *before* calling plugin_loaded()
global _lt_settings
if not isinstance(_lt_settings, sublime.Settings):
try:
_lt_settings = sublime.load_settings(
"LaTeXTools.sublime-settings"
)
except Exception:
traceback.print_exc()
self.v_attr_updates = view_attr
self.lt_attr_updates = lt_attr
for attr_name, d in self.v_attr_updates.items():
settings_name = d["setting"]
self.__dict__[attr_name] = get_setting(settings_name, view=view)
for attr_name, d in self.lt_attr_updates.items():
if attr_name in self.__dict__:
continue
settings_name = d["setting"]
self.__dict__[attr_name] = _lt_settings.get(settings_name)
_lt_settings.add_on_change(
key, lambda: self._on_setting_change(False))
self.view.settings().add_on_change(
key, lambda: self._on_setting_change(True))
def _on_setting_change(self, for_view):
settings = self.view.settings() if for_view else _lt_settings
attr_updates = (self.v_attr_updates if for_view
else self.lt_attr_updates)
for attr_name in attr_updates.keys():
attr = attr_updates[attr_name]
settings_name = attr["setting"]
value = settings.get(settings_name)
if for_view and value is None:
continue
if self.__dict__[attr_name] == value:
continue
if not for_view and self.view.settings().has(settings_name):
continue
# update the value and call the after function
self.__dict__[attr_name] = value
sublime.set_timeout_async(attr["call_after"])
break
_last_delete_try = {}
def try_delete_temp_files(key, temp_path):
try:
last_try = _last_delete_try[key]
except KeyError:
try:
last_try = cache.read_global(key + "_temp_delete")
except:
last_try = 0
_last_delete_try[key] = time.time()
cache_size = get_setting(key + "_temp_size", 50, view={})
period = get_setting("preview_temp_delete_period", 24, view={})
# if the period is negative don't clear automatically
if period < 0:
return
# convert the units
cache_size *= 10**6 # MB -> B
period *= 60 * 60 # h -> s
# the remaining size as tenth of the cache size
max_remaining_size = cache_size / 10.
if time.time() <= last_try + period:
return
cache.write_global(key + "_temp_delete", last_try)
tr = threading.Thread(
target=lambda: delete_temp_files(
temp_path, cache_size, max_remaining_size))
tr.start()
def _temp_folder_size(temp_path):
size = 0
for file_name in os.listdir(temp_path):
file_path = os.path.join(temp_path, file_name)
if os.path.isfile(file_path):
size += os.path.getsize(file_path)
return size
def _modified_time(file_path):
try:
mtime = os.path.getmtime(file_path)
except:
mtime = 0
return mtime
def delete_temp_files(temp_path, cache_size, max_remaining_size,
total_size=None, delete_all=False):
if total_size is None and not delete_all:
total_size = _temp_folder_size(temp_path)
if total_size <= cache_size:
return
del_files = [
os.path.join(temp_path, file_name)
for file_name in os.listdir(temp_path)
]
# sort the delete files by their modification time
del_files.sort(key=_modified_time, reverse=True)
# delete the files until the max boundary is reached
# oldest files first
while del_files and (delete_all or total_size > max_remaining_size):
file_path = del_files.pop()
if os.path.isfile(file_path):
total_size -= os.path.getsize(file_path)
os.remove(file_path)
|
example_userdata_stream.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_userdata_stream.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
# https://docs.python.org/3/library/logging.html#logging-levels
logging.basicConfig(filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
logging.getLogger('unicorn-log').setLevel(logging.INFO)
logging.getLogger('unicorn-log').addHandler(logging.StreamHandler())
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
else:
print(oldest_stream_data_from_stream_buffer)
# configure api key and secret for binance.com
binance_com_api_key = ""
binance_com_api_secret = ""
# configure api key and secret for binance.je
binance_je_api_key = ""
binance_je_api_secret = ""
# configure api key and secret for binance.us
binance_us_api_key = ""
binance_us_api_secret = ""
# create instances of BinanceWebSocketApiManager
binance_com_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.com")
binance_je_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.je")
binance_us_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.us")
# set api key and secret in api manager
binance_com_websocket_api_manager.set_private_api_config(binance_com_api_key, binance_com_api_secret)
binance_je_websocket_api_manager.set_private_api_config(binance_je_api_key, binance_je_api_secret)
binance_us_websocket_api_manager.set_private_api_config(binance_us_api_key, binance_us_api_secret)
# create the userData streams
binance_com_user_data_stream_id = binance_com_websocket_api_manager.create_stream('arr', '!userData')
binance_je_user_data_stream_id = binance_je_websocket_api_manager.create_stream('arr', '!userData')
binance_us_user_data_stream_id = binance_us_websocket_api_manager.create_stream('arr', '!userData')
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_com_websocket_api_manager,))
worker_thread.start()
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_je_websocket_api_manager,))
worker_thread.start()
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_us_websocket_api_manager,))
worker_thread.start()
# monitor the streams
while True:
binance_com_websocket_api_manager.print_stream_info(binance_com_user_data_stream_id)
binance_com_websocket_api_manager.print_summary()
binance_je_websocket_api_manager.print_summary()
binance_us_websocket_api_manager.print_summary()
time.sleep(1)
|
test_logging.py | # Copyright 2001-2019 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2019 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import copy
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok, assert_python_failure
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support.logging_helper import TestHandler
import textwrap
import threading
import time
import unittest
import warnings
import weakref
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import asyncore
import smtpd
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
self._threading_key = threading_helper.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
threading_helper.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, encoding='utf-8', delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args, encoding="utf-8")
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, encoding='utf-8', delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
# The implementation relies on os.register_at_fork existing, but we test
# based on os.fork existing because that is what users and this test use.
# This helps ensure that when fork exists (the important concept) that the
# register_at_fork mechanism is also present and used.
@unittest.skipIf(not hasattr(os, 'fork'), 'Test requires os.fork().')
def test_post_fork_child_no_deadlock(self):
"""Ensure child logging locks are not held; bpo-6721 & bpo-36533."""
class _OurHandler(logging.Handler):
def __init__(self):
super().__init__()
self.sub_handler = logging.StreamHandler(
stream=open('/dev/null', 'wt', encoding='utf-8'))
def emit(self, record):
self.sub_handler.acquire()
try:
self.sub_handler.emit(record)
finally:
self.sub_handler.release()
self.assertEqual(len(logging._handlers), 0)
refed_h = _OurHandler()
self.addCleanup(refed_h.sub_handler.stream.close)
refed_h.name = 'because we need at least one for this test'
self.assertGreater(len(logging._handlers), 0)
self.assertGreater(len(logging._at_fork_reinit_lock_weakset), 1)
test_logger = logging.getLogger('test_post_fork_child_no_deadlock')
test_logger.addHandler(refed_h)
test_logger.setLevel(logging.DEBUG)
locks_held__ready_to_fork = threading.Event()
fork_happened__release_locks_and_end_thread = threading.Event()
def lock_holder_thread_fn():
logging._acquireLock()
try:
refed_h.acquire()
try:
# Tell the main thread to do the fork.
locks_held__ready_to_fork.set()
# If the deadlock bug exists, the fork will happen
# without dealing with the locks we hold, deadlocking
# the child.
# Wait for a successful fork or an unreasonable amount of
# time before releasing our locks. To avoid a timing based
# test we'd need communication from os.fork() as to when it
# has actually happened. Given this is a regression test
# for a fixed issue, potentially less reliably detecting
# regression via timing is acceptable for simplicity.
# The test will always take at least this long. :(
fork_happened__release_locks_and_end_thread.wait(0.5)
finally:
refed_h.release()
finally:
logging._releaseLock()
lock_holder_thread = threading.Thread(
target=lock_holder_thread_fn,
name='test_post_fork_child_no_deadlock lock holder')
lock_holder_thread.start()
locks_held__ready_to_fork.wait()
pid = os.fork()
if pid == 0:
# Child process
try:
test_logger.info(r'Child process did not deadlock. \o/')
finally:
os._exit(0)
else:
# Parent process
test_logger.info(r'Parent process returned from fork. \o/')
fork_happened__release_locks_and_end_thread.set()
lock_holder_thread.join()
support.wait_process(pid, exitcode=0)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamWithIntName(object):
level = logging.NOTSET
name = 2
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
"""
Test setting the handler's stream
"""
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
# test that setting to existing value returns None
actual = h.setStream(old)
self.assertIsNone(actual)
def test_can_represent_stream_with_int_name(self):
h = logging.StreamHandler(StreamWithIntName())
self.assertEqual(repr(h), '<StreamHandler 2 (NOTSET)>')
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self._quit = False
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.daemon = True
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
while not self._quit:
asyncore.loop(poll_interval, map=self._map, count=1)
def stop(self):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
"""
self._quit = True
threading_helper.join_thread(self._thread)
self._thread = None
self.close()
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.daemon = True
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self):
"""
Tell the server thread to stop, and wait for it to do so.
"""
self.shutdown()
if self._thread is not None:
threading_helper.join_thread(self._thread)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever
TIMEOUT = support.LONG_TIMEOUT
def test_basic(self):
sockmap = {}
server = TestSMTPServer((socket_helper.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (socket_helper.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
def test_race_between_set_target_and_flush(self):
class MockRaceConditionHandler:
def __init__(self, mem_hdlr):
self.mem_hdlr = mem_hdlr
self.threads = []
def removeTarget(self):
self.mem_hdlr.setTarget(None)
def handle(self, msg):
thread = threading.Thread(target=self.removeTarget)
self.threads.append(thread)
thread.start()
target = MockRaceConditionHandler(self.mem_hdlr)
try:
self.mem_hdlr.setTarget(target)
for _ in range(10):
time.sleep(0.005)
self.mem_logger.info("not flushed")
self.mem_logger.warning("flushed")
finally:
for thread in target.threads:
threading_helper.join_thread(thread)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = warnings_helper.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger, and uses kwargs instead of args.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
kwargs={{"encoding": "utf-8"}}
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, encoding="utf-8", **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_config_set_handler_names(self):
test_config = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
handlers=hand1
[handler_hand1]
class=StreamHandler
formatter=form1
[formatter_form1]
format=%(levelname)s ++ %(message)s
"""
self.apply_config(test_config)
self.assertEqual(logging.getLogger().handlers[0].name, 'hand1')
def test_defaults_do_no_interpolation(self):
"""bpo-33802 defaults should not get interpolated"""
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
encoding="utf-8",
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop()
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
os_helper.unlink(self.address)
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop()
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
os_helper.unlink(self.address)
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop()
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
os_helper.unlink(self.address)
@unittest.skipUnless(socket_helper.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop()
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
class myCustomFormatter:
def __init__(self, fmt, datefmt=None):
pass
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = warnings_helper.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as '()' key and 'validate' set to False
custom_formatter_class_validate = {
'version': 1,
'formatters': {
'form1': {
'()': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as 'class' key and 'validate' set to False
custom_formatter_class_validate2 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom class that is not inherited from logging.Formatter
custom_formatter_class_validate3 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.myCustomFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom function and 'validate' set to False
custom_formatter_with_function = {
'version': 1,
'formatters': {
'form1': {
'()': formatFunc,
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn,
"encoding": "utf-8",
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
threading_helper.join_thread(t)
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.assertRaises(ValueError, self.apply_config, self.out_of_order)
def test_out_of_order_with_dollar_style(self):
config = copy.deepcopy(self.out_of_order)
config['formatters']['mySimpleFormatter']['format'] = "${asctime} (${name}) ${levelname}: ${message}"
self.apply_config(config)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_custom_formatter_class_with_validate(self):
self.apply_config(self.custom_formatter_class_validate)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2(self):
self.apply_config(self.custom_formatter_class_validate2)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2_with_wrong_fmt(self):
config = self.custom_formatter_class_validate.copy()
config['formatters']['form1']['style'] = "$"
# Exception should not be raise as we have configured 'validate' to False
self.apply_config(config)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate3(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_class_validate3)
def test_custom_formatter_function_with_validate(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_with_function)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
def test_namedtuple(self):
# see bpo-39142
from collections import namedtuple
class MyHandler(logging.StreamHandler):
def __init__(self, resource, *args, **kwargs):
super().__init__(*args, **kwargs)
self.resource: namedtuple = resource
def emit(self, record):
record.msg += f' {self.resource.type}'
return super().emit(record)
Resource = namedtuple('Resource', ['type', 'labels'])
resource = Resource(type='my_type', labels=['a'])
config = {
'version': 1,
'handlers': {
'myhandler': {
'()': MyHandler,
'resource': resource
}
},
'root': {'level': 'INFO', 'handlers': ['myhandler']},
}
with support.captured_stderr() as stderr:
self.apply_config(config)
logging.info('some log')
self.assertEqual(stderr.getvalue(), 'some log my_type\n')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
handler.close()
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_StreamHandler(self):
# Test that traceback only appends once (bpo-34334).
listener = logging.handlers.QueueListener(self.queue, self.root_hdlr)
listener.start()
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.que_logger.exception(self.next_message(), exc_info=exc)
listener.stop()
self.assertEqual(self.stream.getvalue().strip().count('Traceback'), 1)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_multiple_handlers(self):
# Test that queue handler format doesn't affect other handler formats (bpo-35726).
self.que_hdlr.setFormatter(self.root_formatter)
self.que_logger.addHandler(self.root_hdlr)
listener = logging.handlers.QueueListener(self.queue, self.que_hdlr)
listener.start()
self.que_logger.error("error")
listener.stop()
self.assertEqual(self.stream.getvalue().strip(), "que -> ERROR: error")
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
def test_calls_task_done_after_stop(self):
# Issue 36813: Make sure queue.join does not deadlock.
log_queue = queue.Queue()
listener = logging.handlers.QueueListener(log_queue)
listener.start()
listener.stop()
with self.assertRaises(ValueError):
# Make sure all tasks are done and .join won't block.
log_queue.task_done()
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class AssertErrorMessage:
def assert_error_message(self, exception, message, *args, **kwargs):
try:
self.assertRaises((), *args, **kwargs)
except exception as e:
self.assertEqual(message, str(e))
class FormatterTest(unittest.TestCase, AssertErrorMessage):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
'custom': {
'custom': 1234
}
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)#15s')
self.assertTrue(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(ValueError, f.format, r)
f = logging.Formatter("{message}", style='{')
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('${message}', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${message}', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}--', style='$')
self.assertTrue(f.usesTime())
def test_format_validate(self):
# Check correct formatting
# Percentage style
f = logging.Formatter("%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
self.assertEqual(f._fmt, "%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
f = logging.Formatter("%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
self.assertEqual(f._fmt, "%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
f = logging.Formatter("%(process)#+027.23X")
self.assertEqual(f._fmt, "%(process)#+027.23X")
f = logging.Formatter("%(foo)#.*g")
self.assertEqual(f._fmt, "%(foo)#.*g")
# StrFormat Style
f = logging.Formatter("$%{message}%$ - {asctime!a:15} - {customfield['key']}", style="{")
self.assertEqual(f._fmt, "$%{message}%$ - {asctime!a:15} - {customfield['key']}")
f = logging.Formatter("{process:.2f} - {custom.f:.4f}", style="{")
self.assertEqual(f._fmt, "{process:.2f} - {custom.f:.4f}")
f = logging.Formatter("{customfield!s:#<30}", style="{")
self.assertEqual(f._fmt, "{customfield!s:#<30}")
f = logging.Formatter("{message!r}", style="{")
self.assertEqual(f._fmt, "{message!r}")
f = logging.Formatter("{message!s}", style="{")
self.assertEqual(f._fmt, "{message!s}")
f = logging.Formatter("{message!a}", style="{")
self.assertEqual(f._fmt, "{message!a}")
f = logging.Formatter("{process!r:4.2}", style="{")
self.assertEqual(f._fmt, "{process!r:4.2}")
f = logging.Formatter("{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}", style="{")
self.assertEqual(f._fmt, "{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}")
f = logging.Formatter("{process!s:{w},.{p}}", style="{")
self.assertEqual(f._fmt, "{process!s:{w},.{p}}")
f = logging.Formatter("{foo:12.{p}}", style="{")
self.assertEqual(f._fmt, "{foo:12.{p}}")
f = logging.Formatter("{foo:{w}.6}", style="{")
self.assertEqual(f._fmt, "{foo:{w}.6}")
f = logging.Formatter("{foo[0].bar[1].baz}", style="{")
self.assertEqual(f._fmt, "{foo[0].bar[1].baz}")
f = logging.Formatter("{foo[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{foo[k1].bar[k2].baz}")
f = logging.Formatter("{12[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{12[k1].bar[k2].baz}")
# Dollar style
f = logging.Formatter("${asctime} - $message", style="$")
self.assertEqual(f._fmt, "${asctime} - $message")
f = logging.Formatter("$bar $$", style="$")
self.assertEqual(f._fmt, "$bar $$")
f = logging.Formatter("$bar $$$$", style="$")
self.assertEqual(f._fmt, "$bar $$$$") # this would print two $($$)
# Testing when ValueError being raised from incorrect format
# Percentage Style
self.assertRaises(ValueError, logging.Formatter, "%(asctime)Z")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)b")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*3s")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)_")
self.assertRaises(ValueError, logging.Formatter, '{asctime}')
self.assertRaises(ValueError, logging.Formatter, '${message}')
self.assertRaises(ValueError, logging.Formatter, '%(foo)#12.3*f') # with both * and decimal number as precision
self.assertRaises(ValueError, logging.Formatter, '%(foo)0*.8*f')
# StrFormat Style
# Testing failure for '-' in field name
self.assert_error_message(
ValueError,
"invalid format: invalid field name/expression: 'name-thing'",
logging.Formatter, "{name-thing}", style="{"
)
# Testing failure for style mismatch
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '%(asctime)s', style='{'
)
# Testing failure for invalid conversion
self.assert_error_message(
ValueError,
"invalid conversion: 'Z'"
)
self.assertRaises(ValueError, logging.Formatter, '{asctime!s:#30,15f}', style='{')
self.assert_error_message(
ValueError,
"invalid format: expected ':' after conversion specifier",
logging.Formatter, '{asctime!aa:15}', style='{'
)
# Testing failure for invalid spec
self.assert_error_message(
ValueError,
"invalid format: bad specifier: '.2ff'",
logging.Formatter, '{process:.2ff}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{process:.2Z}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<##30,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<#30#,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:{{w}},{{p}}}', style='{')
# Testing failure for mismatch braces
self.assert_error_message(
ValueError,
"invalid format: expected '}' before end of string",
logging.Formatter, '{process', style='{'
)
self.assert_error_message(
ValueError,
"invalid format: Single '}' encountered in format string",
logging.Formatter, 'process}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo/bar}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo:{{w}}.{{p}}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!X:{{w}}.{{p}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:random}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{dom}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{d}om}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo.!a:d}', style='{')
# Dollar style
# Testing failure for mismatch bare $
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, '$bar $$$', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'bar $', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'foo $.', style='$'
)
# Testing failure for mismatch style
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '{asctime}', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '%(asctime)s', style='$')
# Testing failure for incorrect fields
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, 'foo', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '${asctime', style='$')
def test_defaults_parameter(self):
fmts = ['%(custom)s %(message)s', '{custom} {message}', '$custom $message']
styles = ['%', '{', '$']
for fmt, style in zip(fmts, styles):
f = logging.Formatter(fmt, style=style, defaults={'custom': 'Default'})
r = self.get_record()
self.assertEqual(f.format(r), 'Default Message with 2 placeholders')
r = self.get_record("custom")
self.assertEqual(f.format(r), '1234 Message with 2 placeholders')
# Without default
f = logging.Formatter(fmt, style=style)
r = self.get_record()
self.assertRaises(ValueError, f.format, r)
# Non-existing default is ignored
f = logging.Formatter(fmt, style=style, defaults={'Non-existing': 'Default'})
r = self.get_record("custom")
self.assertEqual(f.format(r), '1234 Message with 2 placeholders')
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
def test_default_msec_format_none(self):
class NoMsecFormatter(logging.Formatter):
default_msec_format = None
default_time_format = '%d/%m/%Y %H:%M:%S'
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 123, utc)
r.created = time.mktime(dt.astimezone(None).timetuple())
f = NoMsecFormatter()
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '21/04/1993 08:03:00')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
self.assertRaises(ValueError, logging.disable, "doesnotexists")
class _NotAnIntOrString:
pass
self.assertRaises(TypeError, logging.disable, _NotAnIntOrString())
logging.disable("WARN")
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_subclass_logger_cache(self):
# bpo-37258
message = []
class MyLogger(logging.getLoggerClass()):
def __init__(self, name='MyLogger', level=logging.NOTSET):
super().__init__(name, level)
message.append('initialized')
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('just_some_logger')
self.assertEqual(message, ['initialized'])
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger.addHandler(h)
try:
logger.setLevel(logging.DEBUG)
logger.debug("hello")
self.assertEqual(stream.getvalue().strip(), "hello")
stream.truncate(0)
stream.seek(0)
logger.setLevel(logging.INFO)
logger.debug("hello")
self.assertEqual(stream.getvalue(), "")
finally:
logger.removeHandler(h)
h.close()
logging.setLoggerClass(logging.Logger)
def test_logging_at_shutdown(self):
# bpo-20037: Doing text I/O late at interpreter shutdown must not crash
code = textwrap.dedent("""
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()
""")
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
def test_logging_at_shutdown_open(self):
# bpo-26789: FileHandler keeps a reference to the builtin open()
# function to be able to open or reopen the file during Python
# finalization.
filename = os_helper.TESTFN
self.addCleanup(os_helper.unlink, filename)
code = textwrap.dedent(f"""
import builtins
import logging
class A:
def __del__(self):
logging.error("log in __del__")
# basicConfig() opens the file, but logging.shutdown() closes
# it at Python exit. When A.__del__() is called,
# FileHandler._open() must be called again to re-open the file.
logging.basicConfig(filename={filename!r}, encoding="utf-8")
a = A()
# Simulate the Python finalization which removes the builtin
# open() function.
del builtins.open
""")
assert_python_ok("-c", code)
with open(filename, encoding="utf-8") as fp:
self.assertEqual(fp.read().rstrip(), "ERROR:root:log in __del__")
def test_recursion_error(self):
# Issue 36272
code = textwrap.dedent("""
import logging
def rec():
logging.error("foo")
rec()
rec()
""")
rc, out, err = assert_python_failure("-c", code)
err = err.decode()
self.assertNotIn("Cannot recover from stack overflow.", err)
self.assertEqual(rc, 1)
def test_get_level_names_mapping(self):
mapping = logging.getLevelNamesMapping()
self.assertEqual(logging._nameToLevel, mapping) # value is equivalent
self.assertIsNot(logging._nameToLevel, mapping) # but not the internal data
new_mapping = logging.getLevelNamesMapping() # another call -> another copy
self.assertIsNot(mapping, new_mapping) # verify not the same object as before
self.assertEqual(mapping, new_mapping) # but equivalent in value
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
@staticmethod # pickled as target of child process in the following test
def _extract_logrecord_process_name(key, logMultiprocessing, conn=None):
prev_logMultiprocessing = logging.logMultiprocessing
logging.logMultiprocessing = logMultiprocessing
try:
import multiprocessing as mp
name = mp.current_process().name
r1 = logging.makeLogRecord({'msg': f'msg1_{key}'})
del sys.modules['multiprocessing']
r2 = logging.makeLogRecord({'msg': f'msg2_{key}'})
results = {'processName' : name,
'r1.processName': r1.processName,
'r2.processName': r2.processName,
}
finally:
logging.logMultiprocessing = prev_logMultiprocessing
if conn:
conn.send(results)
else:
return results
def test_multiprocessing(self):
multiprocessing_imported = 'multiprocessing' in sys.modules
try:
# logMultiprocessing is True by default
self.assertEqual(logging.logMultiprocessing, True)
LOG_MULTI_PROCESSING = True
# When logMultiprocessing == True:
# In the main process processName = 'MainProcess'
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
results = self._extract_logrecord_process_name(1, LOG_MULTI_PROCESSING)
self.assertEqual('MainProcess', results['processName'])
self.assertEqual('MainProcess', results['r1.processName'])
self.assertEqual('MainProcess', results['r2.processName'])
# In other processes, processName is correct when multiprocessing in imported,
# but it is (incorrectly) defaulted to 'MainProcess' otherwise (bpo-38762).
import multiprocessing
parent_conn, child_conn = multiprocessing.Pipe()
p = multiprocessing.Process(
target=self._extract_logrecord_process_name,
args=(2, LOG_MULTI_PROCESSING, child_conn,)
)
p.start()
results = parent_conn.recv()
self.assertNotEqual('MainProcess', results['processName'])
self.assertEqual(results['processName'], results['r1.processName'])
self.assertEqual('MainProcess', results['r2.processName'])
p.join()
finally:
if multiprocessing_imported:
import multiprocessing
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.setLevel(self.original_logging_level)
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', encoding='utf-8')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a', encoding='utf-8')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='%(asctime)s - %(message)s')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, '%(asctime)s - %(message)s')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def test_force(self):
old_string_io = io.StringIO()
new_string_io = io.StringIO()
old_handlers = [logging.StreamHandler(old_string_io)]
new_handlers = [logging.StreamHandler(new_string_io)]
logging.basicConfig(level=logging.WARNING, handlers=old_handlers)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
logging.basicConfig(level=logging.INFO, handlers=new_handlers,
force=True)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
self.assertEqual(old_string_io.getvalue().strip(),
'WARNING:root:warn')
self.assertEqual(new_string_io.getvalue().strip(),
'WARNING:root:warn\nINFO:root:info')
def test_encoding(self):
try:
encoding = 'utf-8'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='strict',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data,
'The Øresund Bridge joins Copenhagen to Malmö')
def test_encoding_errors(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='ignore',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, 'The resund Bridge joins Copenhagen to Malm')
def test_encoding_errors_default(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertEqual(handler.errors, 'backslashreplace')
logging.debug('😂: ☃️: The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, r'\U0001f602: \u2603\ufe0f: The \xd8resund '
r'Bridge joins Copenhagen to Malm\xf6')
def test_encoding_errors_none(self):
# Specifying None should behave as 'strict'
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors=None,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertIsNone(handler.errors)
message = []
def dummy_handle_error(record):
_, v, _ = sys.exc_info()
message.append(str(v))
handler.handleError = dummy_handle_error
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
self.assertTrue(message)
self.assertIn("'ascii' codec can't encode "
"character '\\xd8' in position 4:", message[0])
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
# didn't write anything due to the encoding error
self.assertEqual(data, r'')
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest, AssertErrorMessage):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assert_error_message(
TypeError, 'Level not an integer or a valid string: None',
self.logger.setLevel, None)
self.assert_error_message(
TypeError, 'Level not an integer or a valid string: (0, 0)',
self.logger.setLevel, (0, 0))
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_find_caller_with_stacklevel(self):
the_level = 1
def innermost():
self.logger.warning('test', stacklevel=the_level)
def inner():
innermost()
def outer():
inner()
records = self.recording.records
outer()
self.assertEqual(records[-1].funcName, 'innermost')
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'inner')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'outer')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'test_find_caller_with_stacklevel')
self.assertGreater(records[-1].lineno, lineno)
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_is_enabled_for_disabled_logger(self):
old_disabled = self.logger.disabled
old_disable = self.logger.manager.disable
self.logger.disabled = True
self.logger.manager.disable = 21
self.addCleanup(setattr, self.logger, 'disabled', old_disabled)
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('root'))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
# Set level to NOTSET and ensure caches are empty
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Verify logger2 follows parent and not root
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
# Disable logging in manager and ensure caches are clear
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Ensure no loggers are enabled
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, encoding='utf-8', delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
def test_emit_after_closing_in_write_mode(self):
# Issue #42378
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, encoding='utf-8', mode='w')
fh.setFormatter(logging.Formatter('%(message)s'))
fh.emit(self.next_rec()) # '1'
fh.close()
fh.emit(self.next_rec()) # '2'
with open(self.fn) as fp:
self.assertEqual(fp.read().strip(), '1')
class RotatingFileHandlerTest(BaseFileTest):
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, encoding="utf-8", maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn, encoding="utf-8")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
def test_namer_rotator_inheritance(self):
class HandlerWithNamerAndRotator(logging.handlers.RotatingFileHandler):
def namer(self, name):
return name + ".test"
def rotator(self, source, dest):
if os.path.exists(source):
os.replace(source, dest + ".rotated")
rh = HandlerWithNamerAndRotator(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
self.assertEqual(rh.namer(self.fn), self.fn + ".test")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(rh.namer(self.fn + ".1") + ".rotated")
self.assertFalse(os.path.exists(rh.namer(self.fn + ".1")))
rh.close()
@support.requires_zlib()
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(
self.fn, 'S', encoding="utf-8", backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', encoding="utf-8", delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', encoding="utf-8", delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', encoding="utf-8", delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when='MIDNIGHT', interval=1, backupCount=0,
utc=True, atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when='W%d' % day, interval=1, backupCount=0,
utc=True, atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
not_exported = {
'logThreads', 'logMultiprocessing', 'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger', 'root',
'threading'}
support.check__all__(self, logging, not_exported=not_exported)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, IPv6SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase
]
if hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
audiowrite.py | import io
from distutils.command.config import config
import numpy as np
import threading
from pathlib import Path
import contextlib
import soundfile
from scipy.io.wavfile import write as wav_write
from pb_chime5.mapping import Dispatcher
from pb_chime5.io.audioread import normalize_path
int16_max = np.iinfo(np.int16).max
int16_min = np.iinfo(np.int16).min
def dump_audio(
obj,
path,
*,
sample_rate=16000,
dtype=np.int16,
start=None,
normalize=True,
format=None,
mkdir=False,
):
"""
If normalize is False and the dytpe is float, the values of obj should be in
the range [-1, 1).
Params:
obj: Shape (channels, samples) or (samples,)
path:
sample_rate:
dtype:
start:
normalize:
>>> from pb_chime5.utils.process_caller import run_process
>>> from pb_chime5.io import load_audio
>>> a = np.array([1, 2, -4, 4], dtype=np.int16)
>>> import io, os
>>> # file = io.BytesIO()
>>> file = Path('tmp_audio_data.wav')
>>> dump_audio(a, file, normalize=False)
>>> load_audio(file) * 2**15
array([ 1., 2., -4., 4.])
>>> print(run_process(f'file {file}').stdout)
tmp_audio_data.wav: RIFF (little-endian) data, WAVE audio, Microsoft PCM, 16 bit, mono 16000 Hz
<BLANKLINE>
>>> dump_audio(a, file, normalize=True)
>>> load_audio(file)
array([ 0.24996948, 0.49996948, -0.99996948, 0.99996948])
>>> print(run_process(f'file {file}').stdout)
tmp_audio_data.wav: RIFF (little-endian) data, WAVE audio, Microsoft PCM, 16 bit, mono 16000 Hz
<BLANKLINE>
>>> data = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) / 32
>>> data
array([0. , 0.03125, 0.0625 , 0.09375, 0.125 , 0.15625, 0.1875 ,
0.21875, 0.25 , 0.28125])
>>> dump_audio(data, file, normalize=False)
>>> load_audio(file)
array([0. , 0.03125, 0.0625 , 0.09375, 0.125 , 0.15625, 0.1875 ,
0.21875, 0.25 , 0.28125])
>>> print(run_process(f'file {file}').stdout)
tmp_audio_data.wav: RIFF (little-endian) data, WAVE audio, Microsoft PCM, 16 bit, mono 16000 Hz
<BLANKLINE>
>>> dump_audio(np.array([16, 24]) / 32, file, normalize=False, start=1)
>>> load_audio(file)
array([0. , 0.5 , 0.75 , 0.09375, 0.125 , 0.15625, 0.1875 ,
0.21875, 0.25 , 0.28125])
>>> print(run_process(f'file {file}').stdout)
tmp_audio_data.wav: RIFF (little-endian) data, WAVE audio, Microsoft PCM, 16 bit, mono 16000 Hz
<BLANKLINE>
>>> dump_audio(np.array([16, 24, 24, 24]) / 32, file, normalize=False, start=9)
>>> load_audio(file)
array([0. , 0.5 , 0.75 , 0.09375, 0.125 , 0.15625, 0.1875 ,
0.21875, 0.25 , 0.5 , 0.75 , 0.75 , 0.75 ])
>>> load_audio(file).shape
(13,)
>>> dump_audio(np.array([16, 24, 24, 24]) / 32, file, normalize=False, start=20)
>>> load_audio(file)
array([0. , 0.5 , 0.75 , 0.09375, 0.125 , 0.15625, 0.1875 ,
0.21875, 0.25 , 0.5 , 0.75 , 0.75 , 0.75 , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0.5 ,
0.75 , 0.75 , 0.75 ])
>>> load_audio(file).shape
(24,)
>>> print(run_process(f'file {file}').stdout)
tmp_audio_data.wav: RIFF (little-endian) data, WAVE audio, Microsoft PCM, 16 bit, mono 16000 Hz
<BLANKLINE>
>>> os.remove('tmp_audio_data.wav')
>>> dump_audio(np.array([16, 24, 24, 24]) / 32, file, normalize=False, start=20)
>>> load_audio(file)
array([0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.5 , 0.75,
0.75, 0.75])
>>> load_audio(file).shape
(24,)
>>> print(run_process(f'file {file}').stdout)
tmp_audio_data.wav: RIFF (little-endian) data, WAVE audio, Microsoft PCM, 16 bit, mono 16000 Hz
<BLANKLINE>
>>> data = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) / 32
>>> data
array([0. , 0.03125, 0.0625 , 0.09375, 0.125 , 0.15625, 0.1875 ,
0.21875, 0.25 , 0.28125])
>>> dump_audio(data, file, normalize=False, dtype=None)
>>> load_audio(file)
array([0. , 0.03125, 0.0625 , 0.09375, 0.125 , 0.15625, 0.1875 ,
0.21875, 0.25 , 0.28125])
>>> print(run_process(f'soxi {file}').stdout)
<BLANKLINE>
Input File : 'tmp_audio_data.wav'
Channels : 1
Sample Rate : 16000
Precision : 53-bit
Duration : 00:00:00.00 = 10 samples ~ 0.046875 CDDA sectors
File Size : 160
Bit Rate : 2.05M
Sample Encoding: 64-bit Floating Point PCM
<BLANKLINE>
<BLANKLINE>
>>> dump_audio(data.astype(np.float32), file, normalize=False, dtype=None)
>>> load_audio(file, dtype=None)
array([0. , 0.03125, 0.0625 , 0.09375, 0.125 , 0.15625, 0.1875 ,
0.21875, 0.25 , 0.28125], dtype=float32)
>>> print(run_process(f'soxi {file}').stdout)
<BLANKLINE>
Input File : 'tmp_audio_data.wav'
Channels : 1
Sample Rate : 16000
Precision : 24-bit
Duration : 00:00:00.00 = 10 samples ~ 0.046875 CDDA sectors
File Size : 120
Bit Rate : 1.54M
Sample Encoding: 32-bit Floating Point PCM
<BLANKLINE>
<BLANKLINE>
"""
path = normalize_path(path, as_str=True)
obj = np.asarray(obj)
if normalize:
if not obj.dtype.kind in ['f', 'i']:
raise TypeError(
'Only float and int is currently supported with normalize. '
f'Got dtype {obj.dtype}'
)
# Normalization can change the type (e.g. int to float).
# When saving as float, normalize is a bad idea.
# The normalization is adjusted for int16
assert dtype == np.int16, (
'Currently is only normalize allowed for dtype == np.int16'
f'and not for dtype == {dtype}'
)
# Correction, because the allowed values are in the range [-1, 1).
# => "1" is not a vaild value
correction = (2**15 - 1) / (2**15)
obj = obj * (correction / np.amax(np.abs(obj)))
# ToDo: better exception when path is file descriptor
if start is None or not Path(path).exists():
if obj.ndim == 1:
channels = 1
else:
channels = obj.shape[0]
sf_args = dict(
mode='w',
channels=channels,
samplerate=sample_rate,
)
else:
sf_args = dict(
mode='r+'
)
sf_args['format'] = format
dtype_map = Dispatcher({
np.int16: 'PCM_16',
np.dtype('int16'): 'PCM_16',
np.int32: 'PCM_32',
np.dtype('int32'): 'PCM_32',
np.float32: 'FLOAT',
np.dtype('float32'): 'FLOAT',
np.float64: 'DOUBLE',
np.dtype('float64'): 'DOUBLE',
})
if dtype in [np.int16]:
pass
elif dtype in [np.float32, np.float64, np.int32]:
sf_args['subtype'] = dtype_map[dtype]
elif dtype is None:
sf_args['subtype'] = dtype_map[obj.dtype]
else:
raise TypeError(dtype)
# soundfile.write()
with contextlib.ExitStack() as exit_stack:
try:
f = exit_stack.enter_context(soundfile.SoundFile(path, **sf_args))
except RuntimeError:
# Not sure, why this is a RuntimeError. Maybe a bug in SoundFile.
if mkdir:
Path(path).parent.mkdir(exist_ok=True, parents=True)
f = exit_stack.enter_context(soundfile.SoundFile(path, **sf_args))
else:
raise
if start is not None:
f.seek(start)
f.write(obj.T)
return
def dumps_audio(
obj,
*,
sample_rate=16000,
dtype=np.int16,
start=None,
normalize=True,
format='wav', # see soundfile.available_formats()
):
"""
>>> dumps_audio([1, 2])
b'RIFF(\\x00\\x00\\x00WAVEfmt \\x10\\x00\\x00\\x00\\x01\\x00\\x01\\x00\\x80>\\x00\\x00\\x00}\\x00\\x00\\x02\\x00\\x10\\x00data\\x04\\x00\\x00\\x00\\xff?\\xff\\x7f'
"""
path = io.BytesIO()
dump_audio(
**locals()
)
return path.getvalue()
def audiowrite(data, path, sample_rate=16000, normalize=False, threaded=True):
""" Write the audio data ``data`` to the wav file ``path``
The file can be written in a threaded mode. In this case, the writing
process will be started at a separate thread. Consequently, the file will
not be written when this function exits.
:param data: A numpy array with the audio data
:param path: The wav file the data should be written to
:param sample_rate: Sample rate of the audio data
:param normalize: Normalize the audio first so that the values are within
the range of [INTMIN, INTMAX]. E.g. no clipping occurs
:param threaded: If true, the write process will be started as a separate
thread
:return: The number of clipped samples
"""
assert isinstance(path, (str, Path, io.BytesIO)), path
assert data.dtype.kind in ['i', 'f'], (data.shape, data.dtype)
if isinstance(path, Path):
path = str(path)
data = data.copy()
if normalize:
if not data.dtype.kind == 'f':
data = data.astype(np.float)
data /= np.maximum(np.amax(np.abs(data)), 1e-6)
if data.dtype.kind == 'f':
data *= int16_max
sample_to_clip = np.sum(data > int16_max)
if sample_to_clip > 0:
print('Warning, clipping {} sample{}.'.format(
sample_to_clip, '' if sample_to_clip == 1 else 's'
))
data = np.clip(data, int16_min, int16_max)
data = data.astype(np.int16)
if threaded:
threading.Thread(target=wav_write, args=(path, sample_rate, data)
).start()
else:
try:
wav_write(path, sample_rate, data)
except Exception: # _struct.error
if data.ndim == 2:
assert data.shape[1] < 20, (
f"channels bigger than 20 looks wrong "
f"(shape: {data.shape}). "
f"Maybe you must call audiowrite(data.T, ...)"
)
raise
return sample_to_clip
|
wrappers.py | # Copyright 2019 The PlaNet Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environment wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import datetime
import io
import os
import sys
import traceback
import uuid
import gym
import gym.spaces
import numpy as np
import skimage.transform
import tensorflow as tf
from planet.tools import nested
from planet.tools.preprocess import softmax
class ObservationDict(object):
def __init__(self, env, key='observ'):
self._env = env
self._key = key
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
spaces = {self._key: self._env.observation_space}
return gym.spaces.Dict(spaces)
@property
def action_space(self):
return self._env.action_space
def step(self, action):
obs, reward, done, info = self._env.step(action)
obs = {self._key: np.array(obs)}
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
obs = {self._key: np.array(obs)}
return obs
class ConcatObservation(object):
"""Select observations from a dict space and concatenate them."""
def __init__(self, env, keys):
self._env = env
self._keys = keys
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
spaces = self._env.observation_space.spaces
spaces = [spaces[key] for key in self._keys]
low = np.concatenate([space.low for space in spaces], 0)
high = np.concatenate([space.high for space in spaces], 0)
dtypes = [space.dtype for space in spaces]
if not all(dtype == dtypes[0] for dtype in dtypes):
message = 'Spaces must have the same data type; are {}.'
raise KeyError(message.format(', '.join(str(x) for x in dtypes)))
return gym.spaces.Box(low, high, dtype=dtypes[0])
def step(self, action):
obs, reward, done, info = self._env.step(action)
obs = self._select_keys(obs)
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
obs = self._select_keys(obs)
return obs
def _select_keys(self, obs):
return np.concatenate([obs[key] for key in self._keys], 0)
class SelectObservations(object):
def __init__(self, env, keys):
self._env = env
self._keys = keys
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
spaces = self._env.observation_space.spaces
return gym.spaces.Dict({key: spaces[key] for key in self._keys})
@property
def action_space(self):
return self._env.action_space
def step(self, action, *args, **kwargs):
obs, reward, done, info = self._env.step(action, *args, **kwargs)
obs = {key: obs[key] for key in self._keys}
return obs, reward, done, info
def reset(self, *args, **kwargs):
obs = self._env.reset(*args, **kwargs)
obs = {key: obs[key] for key in self._keys}
return obs
class PixelObservations(object):
def __init__(self, env, size=(64, 64), dtype=np.uint8, key='image'):
assert isinstance(env.observation_space, gym.spaces.Dict)
self._env = env
self._size = size
self._dtype = dtype
self._key = key
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
high = {np.uint8: 255, np.float: 1.0}[self._dtype]
image = gym.spaces.Box(0, high, self._size + (3,), dtype=self._dtype)
spaces = self._env.observation_space.spaces.copy()
assert self._key not in spaces
spaces[self._key] = image
return gym.spaces.Dict(spaces)
@property
def action_space(self):
return self._env.action_space
def step(self, action):
obs, reward, done, info = self._env.step(action)
obs[self._key] = self._render_image()
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
obs[self._key] = self._render_image()
return obs
def _render_image(self):
image = self._env.render('rgb_array')
if image.shape[:2] != self._size:
kwargs = dict(
output_shape=self._size, mode='edge', order=1, preserve_range=True)
image = skimage.transform.resize(image, **kwargs).astype(image.dtype)
if self._dtype and image.dtype != self._dtype:
if image.dtype in (np.float32, np.float64) and self._dtype == np.uint8:
image = (image * 255).astype(self._dtype)
elif image.dtype == np.uint8 and self._dtype in (np.float32, np.float64):
image = image.astype(self._dtype) / 255
else:
message = 'Cannot convert observations from {} to {}.'
raise NotImplementedError(message.format(image.dtype, self._dtype))
return image
class ObservationToRender(object):
def __init__(self, env, key='image'):
self._env = env
self._key = key
self._image = None
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
return gym.spaces.Dict({})
def step(self, action):
obs, reward, done, info = self._env.step(action)
self._image = obs.pop(self._key)
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
self._image = obs.pop(self._key)
return obs
def render(self, *args, **kwargs):
return self._image
class OverwriteRender(object):
def __init__(self, env, render_fn):
self._env = env
self._render_fn = render_fn
self._env.render('rgb_array') # Set up viewer.
def __getattr__(self, name):
return getattr(self._env, name)
def render(self, *args, **kwargs):
return self._render_fn(self._env, *args, **kwargs)
class ActionRepeat(object):
"""Repeat the agent action multiple steps."""
def __init__(self, env, amount):
self._env = env
self._amount = amount
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
done = False
total_reward = 0
current_step = 0
while current_step < self._amount and not done:
observ, reward, done, info = self._env.step(action)
total_reward += reward
current_step += 1
return observ, total_reward, done, info
class NormalizeActions(object):
def __init__(self, env):
self._env = env
low, high = env.action_space.low, env.action_space.high
self._enabled = np.logical_and(np.isfinite(low), np.isfinite(high))
self._low = np.where(self._enabled, low, -np.ones_like(low))
self._high = np.where(self._enabled, high, np.ones_like(low))
def __getattr__(self, name):
return getattr(self._env, name)
@property
def action_space(self):
space = self._env.action_space
low = np.where(self._enabled, -np.ones_like(space.low), space.low)
high = np.where(self._enabled, np.ones_like(space.high), space.high)
return gym.spaces.Box(low, high)
def step(self, action):
action = (action + 1) / 2 * (self._high - self._low) + self._low
return self._env.step(action)
class DeepMindWrapper(object):
"""Wraps a DM Control environment into a Gym interface."""
metadata = {'render.modes': ['rgb_array']}
reward_range = (-np.inf, np.inf)
def __init__(self, env, render_size=(64, 64), camera_id=0):
self._env = env
self._render_size = render_size
self._camera_id = camera_id
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
components = {}
for key, value in self._env.observation_spec().items():
components[key] = gym.spaces.Box(
-np.inf, np.inf, value.shape, dtype=np.float32)
return gym.spaces.Dict(components)
@property
def action_space(self):
action_spec = self._env.action_spec()
return gym.spaces.Box(
action_spec.minimum, action_spec.maximum, dtype=np.float32)
def step(self, action):
time_step = self._env.step(action)
obs = dict(time_step.observation)
reward = time_step.reward or 0
done = time_step.last()
info = {'discount': time_step.discount}
return obs, reward, done, info
def reset(self):
time_step = self._env.reset()
return dict(time_step.observation)
def render(self, *args, **kwargs):
if kwargs.get('mode', 'rgb_array') != 'rgb_array':
raise ValueError("Only render mode 'rgb_array' is supported.")
del args # Unused
del kwargs # Unused
return self._env.physics.render(
*self._render_size, camera_id=self._camera_id)
class MaximumDuration(object):
def __init__(self, env, duration):
self._env = env
self._duration = duration
self._step = None
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
if self._step is None:
raise RuntimeError('Must reset environment.')
observ, reward, done, info = self._env.step(action)
self._step += 1
if self._step >= self._duration:
done = True
self._step = None
return observ, reward, done, info
def reset(self):
self._step = 0
return self._env.reset()
class MinimumDuration(object):
def __init__(self, env, duration):
self._env = env
self._duration = duration
self._step = None
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
observ, reward, done, info = self._env.step(action)
self._step += 1
if self._step < self._duration:
done = False
return observ, reward, done, info
def reset(self):
self._step = 0
return self._env.reset()
class ProcessObservation(object):
def __init__(self, env, process_fn):
self._env = env
self._process_fn = process_fn
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
return nested.map(
lambda box: gym.spaces.Box(
self._process_fn(box.low),
self._process_fn(box.high),
dtype=self._process_fn(box.low).dtype),
self._env.observation_space)
def step(self, action):
observ, reward, done, info = self._env.step(action)
observ = self._process_fn(observ)
return observ, reward, done, info
def reset(self):
observ = self._env.reset()
observ = self._process_fn(observ)
return observ
class PadActions(object):
"""Pad action space to the largest action space."""
def __init__(self, env, spaces):
self._env = env
self._action_space = self._pad_box_space(spaces)
@property
def observation_space(self):
return self._env.observation_space
@property
def action_space(self):
return self._action_space
def step(self, action, *args, **kwargs):
action = action[:len(self._env.action_space.low)]
return self._env.step(action, *args, **kwargs)
def reset(self, *args, **kwargs):
return self._env.reset(*args, **kwargs)
def _pad_box_space(self, spaces):
assert all(len(space.low.shape) == 1 for space in spaces)
length = max(len(space.low) for space in spaces)
low, high = np.inf * np.ones(length), -np.inf * np.ones(length)
for space in spaces:
low[:len(space.low)] = np.minimum(space.low, low[:len(space.low)])
high[:len(space.high)] = np.maximum(space.high, high[:len(space.high)])
return gym.spaces.Box(low, high, dtype=np.float32)
class CollectGymDataset(object):
"""Collect transition tuples and store episodes as Numpy files."""
def __init__(self, env, outdir):
self._env = env
self._outdir = outdir and os.path.expanduser(outdir)
self._episode = None
self._transition = None
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action, *args, **kwargs):
if kwargs.get('blocking', True):
transition = self._env.step(action, *args, **kwargs)
return self._process_step(action, *transition)
else:
future = self._env.step(action, *args, **kwargs)
return lambda: self._process_step(action, *future())
def reset(self, *args, **kwargs):
if kwargs.get('blocking', True):
observ = self._env.reset(*args, **kwargs)
return self._process_reset(observ)
else:
future = self._env.reset(*args, **kwargs)
return lambda: self._process_reset(future())
def _process_step(self, action, observ, reward, done, info):
self._transition.update({'action': action, 'reward': reward})
self._transition.update(info)
self._episode.append(self._transition)
self._transition = {}
if not done:
self._transition.update(self._process_observ(observ))
else:
episode = self._get_episode()
info['episode'] = episode
if self._outdir:
filename = self._get_filename()
self._write(episode, filename)
return observ, reward, done, info
def _process_reset(self, observ):
self._episode = []
self._transition = {}
self._transition.update(self._process_observ(observ))
return observ
def _process_observ(self, observ):
if not isinstance(observ, dict):
observ = {'observ': observ}
return observ
def _get_filename(self):
timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
identifier = str(uuid.uuid4()).replace('-', '')
filename = '{}-{}.npz'.format(timestamp, identifier)
filename = os.path.join(self._outdir, filename)
return filename
def _get_episode(self):
episode = {k: [t[k] for t in self._episode] for k in self._episode[0]}
episode = {k: np.array(v) for k, v in episode.items()}
for key, sequence in episode.items():
if sequence.dtype == 'object':
message = "Sequence '{}' is not numeric:\n{}"
raise RuntimeError(message.format(key, sequence))
return episode
def _write(self, episode, filename):
if not tf.gfile.Exists(self._outdir):
tf.gfile.MakeDirs(self._outdir)
with io.BytesIO() as file_:
np.savez_compressed(file_, **episode)
file_.seek(0)
with tf.gfile.Open(filename, 'w') as ff:
ff.write(file_.read())
name = os.path.splitext(os.path.basename(filename))[0]
print('Recorded episode {}.'.format(name))
class ConvertTo32Bit(object):
"""Convert data types of an OpenAI Gym environment to 32 bit."""
def __init__(self, env):
self._env = env
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
observ, reward, done, info = self._env.step(action)
observ = nested.map(self._convert_observ, observ)
reward = self._convert_reward(reward)
return observ, reward, done, info
def reset(self):
observ = self._env.reset()
observ = nested.map(self._convert_observ, observ)
return observ
def _convert_observ(self, observ):
if not np.isfinite(observ).all():
raise ValueError('Infinite observation encountered.')
if observ.dtype == np.float64:
return observ.astype(np.float32)
if observ.dtype == np.int64:
return observ.astype(np.int32)
return observ
def _convert_reward(self, reward):
if not np.isfinite(reward).all():
raise ValueError('Infinite reward encountered.')
return np.array(reward, dtype=np.float32)
class Async(object):
"""Step environment in a separate process for lock free paralellism."""
# Message types for communication via the pipe.
_ACCESS = 1
_CALL = 2
_RESULT = 3
_EXCEPTION = 4
_CLOSE = 5
def __init__(self, constructor, strategy='thread'):
"""Step environment in a separate process for lock free parallelism.
The environment will be created in the external process by calling the
specified callable. This can be an environment class, or a function
creating the environment and potentially wrapping it. The returned
environment should not access global variables.
Args:
constructor: Callable that creates and returns an OpenAI gym environment.
Attributes:
observation_space: The cached observation space of the environment.
action_space: The cached action space of the environment.
"""
if strategy == 'thread':
import multiprocessing.dummy as mp
elif strategy == 'process':
import multiprocessing as mp
else:
raise NotImplementedError(strategy)
self._conn, conn = mp.Pipe()
self._process = mp.Process(target=self._worker, args=(constructor, conn))
atexit.register(self.close)
self._process.start()
self._observ_space = None
self._action_space = None
@property
def observation_space(self):
if not self._observ_space:
self._observ_space = self.__getattr__('observation_space')
return self._observ_space
@property
def action_space(self):
if not self._action_space:
self._action_space = self.__getattr__('action_space')
return self._action_space
def __getattr__(self, name):
"""Request an attribute from the environment.
Note that this involves communication with the external process, so it can
be slow.
Args:
name: Attribute to access.
Returns:
Value of the attribute.
"""
self._conn.send((self._ACCESS, name))
return self._receive()
def call(self, name, *args, **kwargs):
"""Asynchronously call a method of the external environment.
Args:
name: Name of the method to call.
*args: Positional arguments to forward to the method.
**kwargs: Keyword arguments to forward to the method.
Returns:
Promise object that blocks and provides the return value when called.
"""
payload = name, args, kwargs
self._conn.send((self._CALL, payload))
return self._receive
def close(self):
"""Send a close message to the external process and join it."""
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
self._process.join()
def step(self, action, blocking=True):
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple.
"""
promise = self.call('step', action)
if blocking:
return promise()
else:
return promise
def reset(self, blocking=True):
"""Reset the environment.
Args:
blocking: Whether to wait for the result.
Returns:
New observation when blocking, otherwise callable that returns the new
observation.
"""
promise = self.call('reset')
if blocking:
return promise()
else:
return promise
def _receive(self):
"""Wait for a message from the worker process and return its payload.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The received message is of an unknown type.
Returns:
Payload object of the message.
"""
try:
message, payload = self._conn.recv()
except ConnectionResetError:
raise RuntimeError('Environment worker crashed.')
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == self._RESULT:
return payload
raise KeyError('Received message of unexpected type {}'.format(message))
def _worker(self, constructor, conn):
"""The process waits for actions and sends back environment results.
Args:
constructor: Constructor for the OpenAI Gym environment.
conn: Connection for communication to the main process.
Raises:
KeyError: When receiving a message of unknown type.
"""
try:
env = constructor()
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACCESS:
name = payload
result = getattr(env, name)
conn.send((self._RESULT, result))
continue
if message == self._CALL:
name, args, kwargs = payload
result = getattr(env, name)(*args, **kwargs)
conn.send((self._RESULT, result))
continue
if message == self._CLOSE:
assert payload is None
break
raise KeyError('Received message of unknown type {}'.format(message))
except Exception:
stacktrace = ''.join(traceback.format_exception(*sys.exc_info()))
print('Error in environment process: {}'.format(stacktrace))
conn.send((self._EXCEPTION, stacktrace))
conn.close()
class DiscreteWrapper(object):
"""Wraps a discrete action-space environment into a continuous control task."""
def __init__(self, env):
self._env = env
def __getattr__(self, name):
return getattr(self._env, name)
@property
def action_space(self):
return gym.spaces.Box(low=-1, high=1,
shape=(self._env.action_space.n,),
dtype=np.float32)
def step(self, action):
return self._env.step(np.argmax(action))
|
keylime_agent.py | #!/usr/bin/python3
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
'''
import asyncio
import http.server
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import threading
from urllib.parse import urlparse
import base64
import configparser
import uuid
import os
import sys
import time
import hashlib
import zipfile
import io
import importlib
import shutil
from keylime import common
from keylime import keylime_logging
from keylime import crypto
from keylime import openstack
from keylime import revocation_notifier
from keylime import registrar_client
from keylime import secure_mount
from keylime.tpm import tpm_obj
from keylime.tpm.tpm_abstract import TPM_Utilities
# Configure logger
logger = keylime_logging.init_logging('cloudagent')
try:
import simplejson as json
except ImportError:
raise("Simplejson is mandatory, please install")
# read the config file
config = common.get_config()
# get the tpm object
tpm = tpm_obj.getTPM(need_hw_tpm=True)
tpm_version = tpm.get_tpm_version()
# lock required for multithreaded operation
uvLock = threading.Lock()
class Handler(BaseHTTPRequestHandler):
parsed_path = ''
def do_HEAD(self):
"""Not supported"""
common.echo_json_response(self, 405, "HEAD not supported")
def do_GET(self):
"""This method services the GET request typically from either the Tenant or the Cloud Verifier.
Only tenant and cloudverifier uri's are supported. Both requests require a nonce parameter.
The Cloud verifier requires an additional mask paramter. If the uri or parameters are incorrect, a 400 response is returned.
"""
logger.info('GET invoked from ' +
str(self.client_address) + ' with uri:' + self.path)
rest_params = common.get_restful_params(self.path)
if rest_params is None:
common.echo_json_response(
self, 405, "Not Implemented: Use /keys/ or /quotes/ interfaces")
return
if "keys" in rest_params and rest_params['keys'] == 'verify':
if self.server.K is None:
logger.info(
'GET key challenge returning 400 response. bootstrap key not available')
common.echo_json_response(
self, 400, "Bootstrap key not yet available.")
return
challenge = rest_params['challenge']
response = {}
response['hmac'] = crypto.do_hmac(self.server.K, challenge)
common.echo_json_response(self, 200, "Success", response)
logger.info('GET key challenge returning 200 response.')
# If agent pubkey requested
elif "keys" in rest_params and rest_params["keys"] == "pubkey":
response = {}
response['pubkey'] = self.server.rsapublickey_exportable
common.echo_json_response(self, 200, "Success", response)
logger.info('GET pubkey returning 200 response.')
return
elif "quotes" in rest_params:
nonce = rest_params['nonce']
pcrmask = rest_params['mask'] if 'mask' in rest_params else None
vpcrmask = rest_params['vmask'] if 'vmask' in rest_params else None
# if the query is not messed up
if nonce is None:
logger.warning(
'GET quote returning 400 response. nonce not provided as an HTTP parameter in request')
common.echo_json_response(
self, 400, "nonce not provided as an HTTP parameter in request")
return
# Sanitization assurance (for tpm.run() tasks below)
if not (nonce.isalnum() and (pcrmask is None or pcrmask.isalnum()) and (vpcrmask is None or vpcrmask.isalnum())):
logger.warning(
'GET quote returning 400 response. parameters should be strictly alphanumeric')
common.echo_json_response(
self, 400, "parameters should be strictly alphanumeric")
return
# identity quotes are always shallow
hash_alg = tpm.defaults['hash']
if not tpm.is_vtpm() or rest_params["quotes"] == 'identity':
quote = tpm.create_quote(
nonce, self.server.rsapublickey_exportable, pcrmask, hash_alg)
imaMask = pcrmask
else:
quote = tpm.create_deep_quote(
nonce, self.server.rsapublickey_exportable, vpcrmask, pcrmask)
imaMask = vpcrmask
# Allow for a partial quote response (without pubkey)
enc_alg = tpm.defaults['encrypt']
sign_alg = tpm.defaults['sign']
if "partial" in rest_params and (rest_params["partial"] is None or int(rest_params["partial"], 0) == 1):
response = {
'quote': quote,
'tpm_version': tpm_version,
'hash_alg': hash_alg,
'enc_alg': enc_alg,
'sign_alg': sign_alg,
}
else:
response = {
'quote': quote,
'tpm_version': tpm_version,
'hash_alg': hash_alg,
'enc_alg': enc_alg,
'sign_alg': sign_alg,
'pubkey': self.server.rsapublickey_exportable,
}
# return a measurement list if available
if TPM_Utilities.check_mask(imaMask, common.IMA_PCR):
if not os.path.exists(common.IMA_ML):
logger.warn(
"IMA measurement list not available: %s" % (common.IMA_ML))
else:
with open(common.IMA_ML, 'r') as f:
ml = f.read()
response['ima_measurement_list'] = ml
common.echo_json_response(self, 200, "Success", response)
logger.info('GET %s quote returning 200 response.' %
(rest_params["quotes"]))
return
else:
logger.warning(
'GET returning 400 response. uri not supported: ' + self.path)
common.echo_json_response(self, 400, "uri not supported")
return
def do_POST(self):
"""This method services the POST request typically from either the Tenant or the Cloud Verifier.
Only tenant and cloudverifier uri's are supported. Both requests require a nonce parameter.
The Cloud verifier requires an additional mask parameter. If the uri or parameters are incorrect, a 400 response is returned.
"""
rest_params = common.get_restful_params(self.path)
if rest_params is None:
common.echo_json_response(
self, 405, "Not Implemented: Use /keys/ interface")
return
content_length = int(self.headers.get('Content-Length', 0))
if content_length <= 0:
logger.warning(
'POST returning 400 response, expected content in message. url: ' + self.path)
common.echo_json_response(self, 400, "expected content in message")
return
post_body = self.rfile.read(content_length)
json_body = json.loads(post_body)
b64_encrypted_key = json_body['encrypted_key']
decrypted_key = crypto.rsa_decrypt(
self.server.rsaprivatekey, base64.b64decode(b64_encrypted_key))
have_derived_key = False
if rest_params["keys"] == "ukey":
self.server.add_U(decrypted_key)
self.server.auth_tag = json_body['auth_tag']
self.server.payload = json_body.get('payload', None)
have_derived_key = self.server.attempt_decryption(self)
elif rest_params["keys"] == "vkey":
self.server.add_V(decrypted_key)
have_derived_key = self.server.attempt_decryption(self)
else:
logger.warning(
'POST returning response. uri not supported: ' + self.path)
common.echo_json_response(self, 400, "uri not supported")
return
logger.info('POST of %s key returning 200' %
(('V', 'U')[rest_params["keys"] == "ukey"]))
common.echo_json_response(self, 200, "Success")
# no key yet, then we're done
if not have_derived_key:
return
# woo hoo we have a key
# ok lets write out the key now
secdir = secure_mount.mount() # confirm that storage is still securely mounted
# clean out the secure dir of any previous info before we extract files
if os.path.isdir("%s/unzipped" % secdir):
shutil.rmtree("%s/unzipped" % secdir)
# write out key file
f = open(secdir+"/"+self.server.enc_keyname, 'w')
f.write(base64.b64encode(self.server.K).decode())
f.close()
# stow the U value for later
tpm.write_key_nvram(self.server.final_U)
# optionally extend a hash of they key and payload into specified PCR
tomeasure = self.server.K
# if we have a good key, now attempt to write out the encrypted payload
dec_path = "%s/%s" % (secdir,
config.get('cloud_agent', "dec_payload_file"))
enc_path = "%s/encrypted_payload" % common.WORK_DIR
dec_payload = None
enc_payload = None
if self.server.payload is not None:
dec_payload = crypto.decrypt(
self.server.payload, bytes(self.server.K))
enc_payload = self.server.payload
elif os.path.exists(enc_path):
# if no payload provided, try to decrypt one from a previous run stored in encrypted_payload
with open(enc_path, 'rb') as f:
enc_payload = f.read()
try:
dec_payload = crypto.decrypt(enc_payload, self.server.K)
logger.info("Decrypted previous payload in %s to %s" %
(enc_path, dec_path))
except Exception as e:
logger.warning(
"Unable to decrypt previous payload %s with derived key: %s" % (enc_path, e))
os.remove(enc_path)
enc_payload = None
# also write out encrypted payload to be decrytped next time
if enc_payload is not None:
with open(enc_path, 'wb') as f:
f.write(self.server.payload.encode('utf-8'))
# deal with payload
payload_thread = None
if dec_payload is not None:
tomeasure = tomeasure + dec_payload
# see if payload is a zip
zfio = io.BytesIO(dec_payload)
if config.getboolean('cloud_agent', 'extract_payload_zip') and zipfile.is_zipfile(zfio):
logger.info(
"Decrypting and unzipping payload to %s/unzipped" % secdir)
with zipfile.ZipFile(zfio, 'r')as f:
f.extractall('%s/unzipped' % secdir)
# run an included script if one has been provided
initscript = config.get('cloud_agent', 'payload_script')
if initscript != "":
def initthread():
import subprocess
env = os.environ.copy()
env['AGENT_UUID'] = self.server.agent_uuid
proc = subprocess.Popen(["/bin/bash", initscript], env=env, shell=False, cwd='%s/unzipped' % secdir,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
line = proc.stdout.readline()
if line == '' and proc.poll() is not None:
break
if line:
logger.debug("init-output: %s" % line.strip())
# should be a no-op as poll already told us it's done
proc.wait()
if not os.path.exists("%s/unzipped/%s" % (secdir, initscript)):
logger.info(
"No payload script %s found in %s/unzipped" % (initscript, secdir))
else:
logger.info(
"Executing payload script: %s/unzipped/%s" % (secdir, initscript))
payload_thread = threading.Thread(target=initthread)
else:
logger.info("Decrypting payload to %s" % dec_path)
with open(dec_path, 'wb') as f:
f.write(dec_payload)
zfio.close()
# now extend a measurement of the payload and key if there was one
pcr = config.getint('cloud_agent', 'measure_payload_pcr')
if pcr > 0 and pcr < 24:
logger.info("extending measurement of payload into PCR %s" % pcr)
measured = tpm.hashdigest(tomeasure)
tpm.extendPCR(pcr, measured)
if payload_thread is not None:
payload_thread.start()
return
def get_query_tag_value(self, path, query_tag):
"""This is a utility method to query for specific the http parameters in the uri.
Returns the value of the parameter, or None if not found."""
data = {}
parsed_path = urlparse(self.path)
query_tokens = parsed_path.query.split('&')
# find the 'ids' query, there can only be one
for tok in query_tokens:
query_tok = tok.split('=')
query_key = query_tok[0]
if query_key is not None and query_key == query_tag:
# ids tag contains a comma delimited list of ids
data[query_tag] = query_tok[1]
break
return data.get(query_tag, None)
def log_message(self, logformat, *args):
return
# consider using PooledProcessMixIn
# https://github.com/muayyad-alsadi/python-PooledProcessMixIn
class CloudAgentHTTPServer(ThreadingMixIn, HTTPServer):
"""Http Server which will handle each request in a separate thread."""
''' Do not modify directly unless you acquire uvLock. Set chosen for uniqueness of contained values'''
u_set = set([])
v_set = set([])
rsaprivatekey = None
rsapublickey = None
rsapublickey_exportable = None
done = threading.Event()
auth_tag = None
payload = None
enc_keyname = None
K = None
final_U = None
agent_uuid = None
def __init__(self, server_address, RequestHandlerClass, agent_uuid):
"""Constructor overridden to provide ability to pass configuration arguments to the server"""
secdir = secure_mount.mount()
keyname = "%s/%s" % (secdir, config.get('cloud_agent', 'rsa_keyname'))
# read or generate the key depending on configuration
if os.path.isfile(keyname):
# read in private key
logger.debug("Using existing key in %s" % keyname)
f = open(keyname, "rb")
rsa_key = crypto.rsa_import_privkey(f.read())
else:
logger.debug("key not found, generating a new one")
rsa_key = crypto.rsa_generate(2048)
with open(keyname, "wb") as f:
f.write(crypto.rsa_export_privkey(rsa_key))
self.rsaprivatekey = rsa_key
self.rsapublickey_exportable = crypto.rsa_export_pubkey(
self.rsaprivatekey)
# attempt to get a U value from the TPM NVRAM
nvram_u = tpm.read_key_nvram()
if nvram_u is not None:
logger.info("Existing U loaded from TPM NVRAM")
self.add_U(nvram_u)
http.server.HTTPServer.__init__(
self, server_address, RequestHandlerClass)
self.enc_keyname = config.get('cloud_agent', 'enc_keyname')
self.agent_uuid = agent_uuid
def add_U(self, u):
"""Threadsafe method for adding a U value received from the Tenant
Do not modify u_set of v_set directly.
"""
with uvLock:
# be very careful printing K, U, or V as they leak in logs stored on unprotected disks
if common.INSECURE_DEBUG:
logger.debug("Adding U len %d data:%s" %
(len(u), base64.b64encode(u)))
self.u_set.add(u)
def add_V(self, v):
"""Threadsafe method for adding a U value received from the Cloud Verifier
Do not modify u_set of v_set directly.
"""
with uvLock:
# be very careful printing K, U, or V as they leak in logs stored on unprotected disks
if common.INSECURE_DEBUG:
logger.debug(F"Adding V: {base64.b64encode(v)}")
self.v_set.add(v)
def attempt_decryption(self, handler):
"""On reception of a U or V value, this method is called to attempt the decryption of the Cloud Init script
At least one U and V value must be received in order to attempt encryption. Multiple U and V values are stored
to prevent an attacker from sending U/V values to deny service.
"""
with uvLock:
both_u_and_v_present = False
return_value = False
for u in self.u_set:
for v in self.v_set:
both_u_and_v_present = True
return_value = self.decrypt_check(u, v)
if return_value:
# reset u and v sets
self.u_set = set([])
self.v_set = set([])
return return_value
# TODO check on whether this happens or not. NVRAM causes trouble
if both_u_and_v_present:
pass
#logger.critical("Possible attack from: " + str(handler.client_address) + ". Both U (potentially stale from TPM NVRAM) and V present but unsuccessful in attempt to decrypt check value.")
return return_value
def decrypt_check(self, decrypted_U, decrypted_V):
"""Decrypt the Cloud init script with the passed U and V values.
This method will access the received auth tag, and may fail if decoy U and V values were received.
Do not call directly unless you acquire uvLock. Returns None if decryption unsuccessful, else returns the
decrypted agent UUID.
"""
if self.auth_tag is None:
return None
if len(decrypted_U) != len(decrypted_V):
logger.warning("Invalid U len %d or V len %d. skipping..." %
(len(decrypted_U), len(decrypted_V)))
return None
candidate_key = crypto.strbitxor(decrypted_U, decrypted_V)
# be very careful printing K, U, or V as they leak in logs stored on unprotected disks
if common.INSECURE_DEBUG:
logger.debug(F"U: {base64.b64encode(decrypted_U)}")
logger.debug(F"V: {base64.b64encode(decrypted_V)}")
logger.debug(F"K: {base64.b64encode(candidate_key)}")
logger.debug("auth_tag: " + self.auth_tag)
ex_mac = crypto.do_hmac(candidate_key, self.agent_uuid)
if ex_mac == self.auth_tag:
logger.info("Successfully derived K for UUID %s", self.agent_uuid)
self.final_U = decrypted_U
self.K = candidate_key
return True
else:
logger.error("Failed to derive K for UUID %s", self.agent_uuid)
return False
def main(argv=sys.argv):
if os.getuid() != 0 and common.REQUIRE_ROOT:
logger.critical("This process must be run as root.")
return
# get params for initialization
registrar_ip = config.get('registrar', 'registrar_ip')
registrar_port = config.get('registrar', 'registrar_port')
# initialize the tmpfs partition to store keys if it isn't already available
secdir = secure_mount.mount()
# change dir to working dir
common.ch_dir(common.WORK_DIR, logger)
# initialize tpm
(ek, ekcert, aik, ek_tpm, aik_name) = tpm.tpm_init(self_activate=False, config_pw=config.get(
'cloud_agent', 'tpm_ownerpassword')) # this tells initialize not to self activate the AIK
virtual_agent = tpm.is_vtpm()
# try to get some TPM randomness into the system entropy pool
tpm.init_system_rand()
if ekcert is None:
if virtual_agent:
ekcert = 'virtual'
elif tpm.is_emulator():
ekcert = 'emulator'
# now we need the UUID
try:
agent_uuid = config.get('cloud_agent', 'agent_uuid')
except configparser.NoOptionError:
agent_uuid = None
if agent_uuid == 'openstack':
agent_uuid = openstack.get_openstack_uuid()
elif agent_uuid == 'hash_ek':
agent_uuid = hashlib.sha256(ek).hexdigest()
elif agent_uuid == 'generate' or agent_uuid is None:
agent_uuid = str(uuid.uuid4())
if common.DEVELOP_IN_ECLIPSE:
agent_uuid = "C432FBB3-D2F1-4A97-9EF7-75BD81C866E9"
if common.STUB_VTPM and common.TPM_CANNED_VALUES is not None:
# Use canned values for stubbing
jsonIn = common.TPM_CANNED_VALUES
if "add_vtpm_to_group" in jsonIn:
# The value we're looking for has been canned!
agent_uuid = jsonIn['add_vtpm_to_group']['retout']
else:
# Our command hasn't been canned!
raise Exception("Command %s not found in canned json!" %
("add_vtpm_to_group"))
logger.info("Agent UUID: %s" % agent_uuid)
# register it and get back a blob
keyblob = registrar_client.doRegisterAgent(
registrar_ip, registrar_port, agent_uuid, tpm_version, ek, ekcert, aik, ek_tpm, aik_name)
if keyblob is None:
raise Exception("Registration failed")
# get the ephemeral registrar key
key = tpm.activate_identity(keyblob)
if key is None:
raise Exception("Activation failed")
# tell the registrar server we know the key
retval = False
if virtual_agent:
deepquote = tpm.create_deep_quote(
hashlib.sha1(key).hexdigest(), agent_uuid+aik+ek)
retval = registrar_client.doActivateVirtualAgent(
registrar_ip, registrar_port, agent_uuid, deepquote)
else:
retval = registrar_client.doActivateAgent(
registrar_ip, registrar_port, agent_uuid, key)
if not retval:
raise Exception("Registration failed on activate")
serveraddr = (config.get('cloud_agent', 'cloudagent_ip'),
config.getint('cloud_agent', 'cloudagent_port'))
server = CloudAgentHTTPServer(serveraddr, Handler, agent_uuid)
serverthread = threading.Thread(target=server.serve_forever)
logger.info(
f"Starting Cloud Agent on {serveraddr[0]}:{serveraddr[1]} use <Ctrl-C> to stop")
serverthread.start()
# want to listen for revocations?
if config.getboolean('cloud_agent', 'listen_notfications'):
cert_path = config.get('cloud_agent', 'revocation_cert')
if cert_path == "default":
cert_path = '%s/unzipped/RevocationNotifier-cert.crt' % (secdir)
elif cert_path[0] != '/':
# if it is a relative, convert to absolute in work_dir
cert_path = os.path.abspath('%s/%s' % (common.WORK_DIR, cert_path))
def perform_actions(revocation):
actionlist = []
# load the actions from inside the keylime module
actionlisttxt = config.get('cloud_agent', 'revocation_actions')
if actionlisttxt.strip() != "":
actionlist = actionlisttxt.split(',')
actionlist = ["revocation_actions.%s" % i for i in actionlist]
# load actions from unzipped
if os.path.exists("%s/unzipped/action_list" % secdir):
with open("%s/unzipped/action_list" % secdir, 'r') as f:
actionlisttxt = f.read()
if actionlisttxt.strip() != "":
localactions = actionlisttxt.strip().split(',')
for action in localactions:
if not action.startswith('local_action_'):
logger.warning(
"invalid local action: %s. must start with local_action_" % action)
else:
actionlist.append(action)
uzpath = "%s/unzipped" % secdir
if uzpath not in sys.path:
sys.path.append(uzpath)
for action in actionlist:
logger.info("executing revocation action %s" % action)
try:
module = importlib.import_module(action)
execute = getattr(module, 'execute')
asyncio.get_event_loop().run_until_complete(execute(revocation))
except Exception as e:
logger.warn(
"Exception during execution of revocation action %s: %s" % (action, e))
try:
while True:
try:
revocation_notifier.await_notifications(
perform_actions, revocation_cert_path=cert_path)
except Exception as e:
logger.exception(e)
logger.warn(
"No connection to revocation server, retrying in 10s...")
time.sleep(10)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
tpm.flush_keys()
server.shutdown()
else:
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
tpm.flush_keys()
server.shutdown()
|
_env_runner.py | """Change based on yarr.runners._env_runner to choose between multiple GPUs for agent evaluation """
import copy
from copy import deepcopy
import logging
import os
import time
import multiprocessing
from multiprocessing import Process, Manager
from typing import Any, List
import numpy as np
from yarr.agents.agent import Agent
from yarr.envs.env import Env
# from yarr.utils.rollout_generator import RolloutGenerator
from extar.utils.rollouts import RolloutGenerator
import torch
class _EnvRunner(object):
def __init__(self,
train_env: Env,
eval_env: Env,
agent: Agent,
timesteps: int,
episodes: int,
episode_length: int,
kill_signal: Any,
step_signal: Any,
rollout_generator: RolloutGenerator,
save_load_lock,
current_replay_ratio,
target_replay_ratio,
weightsdir: str = None,
device_list: List[int] = None,
receive=False,
incoming=None,
train_step=None,
):
self._train_env = train_env
self._eval_env = eval_env
self._agent = agent
self._agent_step = 0
self.receive = receive
self.waiting = incoming
self.curr_train_step = train_step
self._episodes = episodes
self._episode_length = episode_length
self._rollout_generator = rollout_generator
self._weightsdir = weightsdir
self._previous_loaded_weight_folder = ''
self._timesteps = timesteps
self._p_args = {}
self.p_failures = {}
manager = Manager()
self.write_lock = manager.Lock()
self.stored_transitions = manager.list()
self.agent_summaries = manager.list()
self._kill_signal = kill_signal
self._step_signal = step_signal
self._save_load_lock = save_load_lock
self._current_replay_ratio = current_replay_ratio
self._target_replay_ratio = target_replay_ratio
self._device_list = torch.device("cpu") if device_list is None else [torch.device("cuda:%d" % idx) for idx in device_list]
self._n_device = None if device_list is None else len(device_list)
if self._n_device is not None:
logging.info(f'_EnvRunner is splitting {self._n_device} devices with index {device_list}')
else:
logging.info('Warning! Agent NOT using GPU in internal _EnvRunner')
# def restart_process(self, name: str):
# p = Process(target=self._run_env, args=self._p_args[name], name=name)
# p.start()
# return p where the fuck did this come from??
def restart(self, name: str):
p = Process(target=self._run_env, args=self._p_args[name], name=name)
p.start()
return p
def spin_up_envs(self, name: str, num_envs: int, eval: bool):
ps = []
for i in range(num_envs):
n = name + str(i)
self._p_args[n] = (n, eval, i)
self.p_failures[n] = 0
p = Process(target=self._run_env, args=self._p_args[n], name=n)
p.start()
ps.append(p)
return ps
def spinup_train_and_eval(self, n_train: int, n_eval: int, name: str = '_env'):
# add logic to split devices
ps = []
for i in range(n_train):
proc_name = 'train' + name + str(i)
self._p_args[proc_name] = (proc_name, False, i)
self.p_failures[proc_name] = 0
p = Process(target=self._run_env, args=self._p_args[proc_name], name=proc_name)
p.start()
ps.append(p)
for j in range(i, i + n_eval):
proc_name = 'eval' + name + str(j)
self._p_args[proc_name] = (proc_name, True, j)
self.p_failures[proc_name] = 0
p = Process(target=self._run_env, args=self._p_args[proc_name], name=proc_name)
p.start()
ps.append(p)
return ps
def _load_save(self, device=None):
if self._weightsdir is None:
logging.info("'weightsdir' was None, so not loading weights.")
return
while True:
weight_folders = []
with self._save_load_lock:
if os.path.exists(self._weightsdir):
weight_folders = os.listdir(self._weightsdir)
if len(weight_folders) > 0:
weight_folders = sorted(map(int, weight_folders))
# Only load if there has been a new weight saving
if self._previous_loaded_weight_folder != weight_folders[-1]:
self._previous_loaded_weight_folder = weight_folders[-1]
d = os.path.join(self._weightsdir, str(weight_folders[-1]))
self.current_d = weight_folders[-1]
try:
self._agent.load_weights(d)
except FileNotFoundError:
# Rare case when agent hasn't finished writing.
time.sleep(1)
self._agent.load_weights(d)
logging.info('Agent %s: Loaded weights: %s' % (self._name, d))
break
logging.info('Waiting for weights to become available.')
time.sleep(1)
def receive_online_agent(self, incoming, train_step, device=None):
self._agent.load_agent(incoming)
logging.info('Debugging: the device agent got loaded to v.s. the device this runner process Should use:', self._agent._device)
self._agent_step = train_step
logging.info('Debugging: agent step vs step signal inside envrunner:', train_step, self._step_signal.value )
def _get_type(self, x):
if x.dtype == np.float64:
return np.float32
return x.dtype
def _run_env(self, name: str, eval: bool, proc_idx: int):
self._name = name
self._agent = copy.deepcopy(self._agent)
eval_device = None if self._n_device is None else self._device_list[ int(proc_idx % self._n_device) ]
#self._curr_device = eval_device
self._agent.build(training=False, device=eval_device)
if self.receive:
self.receive_online_agent(self.waiting, self.curr_train_step)
logging.info('%s: Launching env.' % name)
np.random.seed()
logging.info('Agent information:')
logging.info(self._agent)
env = self._eval_env if eval else self._train_env
env.eval = eval
env.launch()
for ep in range(self._episodes):
self._load_save()
logging.debug('%s: Starting episode %d.' % (name, ep))
episode_rollout = []
generator = self._rollout_generator.generator(
self._step_signal, env, self._agent,
self._episode_length, self._timesteps, eval)
try:
for replay_transition in generator:
while True:
if self._kill_signal.value:
env.shutdown()
return
if (eval or self._target_replay_ratio is None or
self._step_signal.value <= 0 or (
self._current_replay_ratio.value >
self._target_replay_ratio)):
break
time.sleep(1)
logging.debug(
'Agent. Waiting for replay_ratio %f to be more than %f' %
(self._current_replay_ratio.value, self._target_replay_ratio))
with self.write_lock:
if len(self.agent_summaries) == 0:
# Only store new summaries if the previous ones
# have been popped by the main env runner.
for s in self._agent.act_summaries():
self.agent_summaries.append(s)
episode_rollout.append(replay_transition)
except StopIteration as e:
continue
except Exception as e:
env.shutdown()
raise e
with self.write_lock:
for transition in episode_rollout:
self.stored_transitions.append((name, transition, eval))
env.shutdown()
def kill(self):
self._kill_signal.value = True
def update_failures(self, p, max_fails):
assert p.name in self.p_failures.keys(), f'Process {p.name} not found in current runner'
self.p_failures[p.name] += 1
n_failures = self.p_failures[p.name]
if n_failures > max_fails:
logging.error('Process %s failed too many times (%d times > %d)' %
(p.name, n_failures, max_fails))
raise RuntimeError('Too many process failures.')
logging.warning('Env %s failed (%d times <= %d). restarting' %
(p.name, n_failures, max_fails))
return self.restart(p.name)
|
ui_controller.py | import shelve
import re
import sys
import threading
import time
import socket_temperature_connect
import socket_oscilloscope_connect
# import usb_connect
import serial_connect
from main_window import Ui_MainWindow
from PyQt5.QtWidgets import QApplication, QMainWindow, QTableWidgetItem, QMessageBox
from PyQt5.QtCore import QTimer, QThread, pyqtSignal
class MainUI(QMainWindow, Ui_MainWindow):
def __init__(self):
super(MainUI, self).__init__()
self.setupUi(self)
# 测试变量
self.test_info = False
self.test_text = ''
# 日志名字
self.log_name = ''
# 打开配置文件
self.init_scope = shelve.open('init/init_scope')
self.init_temp = shelve.open('init/init_temp')
self.init_power = shelve.open('init/init_power')
self.init_debug = shelve.open('init/init_debug')
# Oscilloscope标签页数据
self.scope_ip = ''
self.scope_setup = ''
# Temperature标签页数据
self.temp_ip = ''
self.temp_channel1_temp = ''
self.temp_channel2_temp = ''
self.temp_channel3_temp = ''
self.temp_channel4_temp = ''
self.temp_is_channel1_temp = False
self.temp_is_channel2_temp = False
self.temp_is_channel3_temp = False
self.temp_is_channel4_temp = False
# Power标签页数据
self.power_high_voltage = ''
self.power_mid_voltage = ''
self.power_low_voltage = ''
self.power_vid = ''
self.power_pid = ''
# Debug标签页数据
self.debug_port = ''
self.debug_mode = []
# 读取初始化文件并显示在软件上
self.init_setting()
self.pushbutton_signal_manage()
self.lineedit_signal_manage()
def pushbutton_signal_manage(self):
self.pushButton_info_start.clicked.connect(
lambda: self.pushbutton_slot_manage(self.pushButton_info_start)
)
def pushbutton_slot_manage(self, button):
if button == self.pushButton_info_start:
self.start()
def lineedit_signal_manage(self):
pass
# self.lineEdit_scope_ip.textChanged.connect(
# lambda: self.lineedit_slot_manage(self.lineEdit_scope_ip)
# )
def lineedit_slot_manage(self, lineedit):
pass
# regex_ip = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}$')
# if lineedit == self.lineEdit_scope_ip:
# if not regex_ip.search(self.lineEdit_scope_ip.text()):
# QMessageBox.critical(self, 'Wrong', 'IP address format error')
#
# if lineedit == self.lineEdit_temp_ip:
# if not regex_ip.search(self.lineEdit_temp_ip.text()):
# QMessageBox.critical(self, 'Wrong', 'IP address format error')
# 关闭软件自动保存
def closeEvent(self, QCloseEvent):
self.data_save()
print('save success!')
# 开启软件时,将上一次关闭时保存的配置配置到软件上
def init_setting(self):
# Oscilloscope数据显示
try:
self.scope_ip = self.init_scope['scope_ip']
self.lineEdit_scope_ip.setText(self.scope_ip)
except KeyError:
pass
try:
self.scope_setup = self.init_scope['scope_setup']
self.lineEdit_scope_setup.setText(self.scope_setup)
except KeyError:
pass
# Temperature数据显示
try:
self.temp_ip = self.init_temp['temp_ip']
self.lineEdit_temp_ip.setText(self.temp_ip)
except KeyError:
pass
try:
self.temp_channel1_temp = self.init_temp['temp_channel1_temp']
self.lineEdit_temp_channl1.setText(self.temp_channel1_temp)
except KeyError:
pass
try:
self.temp_channel2_temp = self.init_temp['temp_channel2_temp']
self.lineEdit_temp_channl2.setText(self.temp_channel2_temp)
except KeyError:
pass
try:
self.temp_channel3_temp = self.init_temp['temp_channel3_temp']
self.lineEdit_temp_channl3.setText(self.temp_channel3_temp)
except KeyError:
pass
try:
self.temp_channel4_temp = self.init_temp['temp_channel4_temp']
self.lineEdit_temp_channl4.setText(self.temp_channel4_temp)
except KeyError:
pass
try:
self.temp_is_channel1_temp = self.init_temp['temp_is_channel1_temp']
self.checkBox_temp_channel1.setCheckState(self.temp_is_channel1_temp)
except KeyError:
pass
try:
self.temp_is_channel2_temp = self.init_temp['temp_is_channel2_temp']
self.checkBox_temp_channel2.setCheckState(self.temp_is_channel2_temp)
except KeyError:
pass
try:
self.temp_is_channel3_temp = self.init_temp['temp_is_channel3_temp']
self.checkBox_temp_channel3.setCheckState(self.temp_is_channel3_temp)
except KeyError:
pass
try:
self.temp_is_channel4_temp = self.init_temp['temp_is_channel4_temp']
self.checkBox_temp_channel4.setCheckState(self.temp_is_channel4_temp)
except KeyError:
pass
# Power数据显示
try:
self.power_high_voltage = self.init_power['power_high_voltage']
self.lineEdit_power_high_voltage.setText(self.power_high_voltage)
except KeyError:
pass
try:
self.power_mid_voltage = self.init_power['power_mid_voltage']
self.lineEdit_power_mid_voltage.setText(self.power_mid_voltage)
except KeyError:
pass
try:
self.power_low_voltage = self.init_power['power_low_voltage']
self.lineEdit_power_low_voltage.setText(self.power_low_voltage)
except KeyError:
pass
try:
self.power_vid = self.init_power['power_vid']
self.lineEdit_power_vid.setText(self.power_vid)
except KeyError:
pass
try:
self.power_pid = self.init_power['power_pid']
self.lineEdit_power_pid.setText(self.power_pid)
except KeyError:
pass
# Debug数据显示
try:
self.debug_port = self.init_debug['debug_port']
self.lineEdit_debug_port.setText(self.debug_port)
except KeyError:
pass
try:
self.debug_mode = self.init_debug['debug_mode']
for i in range(len(self.debug_mode)):
for j in range(len(self.debug_mode[0])):
self.tableWidget_debug_mode.setItem(i, j, QTableWidgetItem(self.debug_mode[i][j]))
except KeyError:
pass
# 界面数据保存到变量中,再保存到配置文件中
def data_save(self):
# 打开配置文件
self.init_scope = shelve.open('init/init_scope')
self.init_temp = shelve.open('init/init_temp')
self.init_power = shelve.open('init/init_power')
self.init_debug = shelve.open('init/init_debug')
# Oscilloscope标签页数据保存
self.scope_ip = self.lineEdit_scope_ip.text()
self.scope_setup = self.lineEdit_scope_setup.text()
self.init_scope['scope_ip'] = self.scope_ip
self.init_scope['scope_setup'] = self.scope_setup
# Temperature标签页数据保存
self.temp_ip = self.lineEdit_temp_ip.text()
self.temp_channel1_temp = self.lineEdit_temp_channl1.text()
self.temp_channel2_temp = self.lineEdit_temp_channl2.text()
self.temp_channel3_temp = self.lineEdit_temp_channl3.text()
self.temp_channel4_temp = self.lineEdit_temp_channl4.text()
self.temp_is_channel1_temp = self.checkBox_temp_channel1.checkState()
self.temp_is_channel2_temp = self.checkBox_temp_channel2.checkState()
self.temp_is_channel3_temp = self.checkBox_temp_channel3.checkState()
self.temp_is_channel4_temp = self.checkBox_temp_channel4.checkState()
self.init_temp['temp_ip'] = self.temp_ip
self.init_temp['temp_channel1_temp'] = self.temp_channel1_temp
self.init_temp['temp_channel2_temp'] = self.temp_channel2_temp
self.init_temp['temp_channel3_temp'] = self.temp_channel3_temp
self.init_temp['temp_channel4_temp'] = self.temp_channel4_temp
self.init_temp['temp_is_channel1_temp'] = self.temp_is_channel1_temp
self.init_temp['temp_is_channel2_temp'] = self.temp_is_channel2_temp
self.init_temp['temp_is_channel3_temp'] = self.temp_is_channel3_temp
self.init_temp['temp_is_channel4_temp'] = self.temp_is_channel4_temp
# Power标签页数据保存
self.power_high_voltage = self.lineEdit_power_high_voltage.text()
self.power_mid_voltage = self.lineEdit_power_mid_voltage.text()
self.power_low_voltage = self.lineEdit_power_low_voltage.text()
self.power_vid = self.lineEdit_power_vid.text()
self.power_pid = self.lineEdit_power_pid.text()
self.init_power['power_high_voltage'] = self.power_high_voltage
self.init_power['power_mid_voltage'] = self.power_mid_voltage
self.init_power['power_low_voltage'] = self.power_low_voltage
self.init_power['power_vid'] = self.power_vid
self.init_power['power_pid'] = self.power_pid
# Debug标签页数据保存
self.debug_port = self.lineEdit_debug_port.text()
debug_mode = []
try:
for i in range(self.tableWidget_debug_mode.rowCount()):
list_ = []
for j in range(self.tableWidget_debug_mode.columnCount()):
text = self.tableWidget_debug_mode.item(i, j).text()
if text == '':
break
list_.append(text)
if list_ == []:
break
debug_mode.append(list_)
except:
pass
self.init_debug['debug_port'] = self.debug_port
self.debug_mode = debug_mode
self.init_debug['debug_mode'] = debug_mode
# 关闭配置文件
self.init_scope.close()
self.init_temp.close()
self.init_power.close()
self.init_debug.close()
def start(self):
self.log_name = 'log/' + time.strftime("%Y-%m-%d %H-%M-%S", time.localtime()) + '_' + 'log.txt'
self.data_save()
threading.Thread(target=self.run).start()
def run(self):
self.temp = socket_temperature_connect.Temperature()
threading.Thread(target=self.temp_info).start()
self.scope = socket_oscilloscope_connect.Oscilloscope()
threading.Thread(target=self.scope_info).start()
self.power = usb_connect.Power()
threading.Thread(target=self.power_info).start()
self.debug = serial_connect.Debug()
threading.Thread(target=self.debug_info).start()
self.temp.task_generate()
self.power.task_generate()
self.debug.task_generate()
self.temp.start()
for i in self.temp.task:
self.temp.run(i)
for j in self.power.task:
self.power.run(j)
for k in self.debug.task:
self.debug.run(k)
name = 'temp_'+str(i[0])+'-'+'power_'+str(j[1])+'-'+'debug_'+str(k[1])
self.scope.run(name)
self.temp.stop()
# 将信息打印到窗口
def temp_info(self):
while True:
if self.temp.is_info:
text = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' -> ' + self.temp.info
f = open(self.log_name, 'a')
f.write(text + '\n')
f.close()
self.textBrowser_info_text.append(text)
self.textBrowser_info_text.moveCursor(self.textBrowser_info_text.textCursor().End)
self.temp.is_info = False
def scope_info(self):
while True:
if self.scope.is_info:
text = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' -> ' + self.scope.info
f = open(self.log_name, 'a')
f.write(text + '\n')
f.close()
self.textBrowser_info_text.append(text)
self.textBrowser_info_text.moveCursor(self.textBrowser_info_text.textCursor().End)
self.scope.is_info = False
def power_info(self):
while True:
if self.power.is_info:
text = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' -> ' + self.power.info
f = open(self.log_name, 'a')
f.write(text + '\n')
f.close()
self.textBrowser_info_text.append(text)
self.textBrowser_info_text.moveCursor(self.textBrowser_info_text.textCursor().End)
self.power.is_info = False
def debug_info(self):
while True:
if self.debug.is_info:
text = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' -> ' + self.debug.info
f = open(self.log_name, 'a')
f.write(text + '\n')
f.close()
self.textBrowser_info_text.append(text)
self.textBrowser_info_text.moveCursor(self.textBrowser_info_text.textCursor().End)
self.debug.is_info = False
if __name__ == '__main__':
app = QApplication(sys.argv)
MainUI = MainUI()
MainUI.show()
sys.exit(app.exec_())
|
test_statsd.py | # -*- coding: utf-8 -*-
# pylint: disable=line-too-long,too-many-public-methods
# Unless explicitly stated otherwise all files in this repository are licensed under the BSD-3-Clause License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2015-Present Datadog, Inc
"""
Tests for dogstatsd.py
"""
# Standard libraries
from collections import deque
from threading import Thread
import os
import socket
import errno
import time
import unittest
# Third-party libraries
import mock
from mock import call, mock_open, patch
import pytest
# Datadog libraries
from datadog import initialize, statsd
from datadog import __version__ as version
from datadog.dogstatsd.base import DogStatsd, UDP_OPTIMAL_PAYLOAD_LENGTH
from datadog.dogstatsd.context import TimedContextManagerDecorator
from datadog.util.compat import is_higher_py35, is_p3k
from tests.util.contextmanagers import preserve_environment_variable, EnvVars
from tests.unit.dogstatsd.fixtures import load_fixtures
class FakeSocket(object):
""" A fake socket for testing. """
def __init__(self):
self.payloads = deque()
def send(self, payload):
if is_p3k():
assert isinstance(payload, bytes)
else:
assert isinstance(payload, str)
self.payloads.append(payload)
def recv(self, count=1):
try:
out = []
for _ in range(count):
out.append(self.payloads.popleft().decode('utf-8'))
return '\n'.join(out)
except IndexError:
return None
def close(self):
pass
def __repr__(self):
return str(self.payloads)
class BrokenSocket(FakeSocket):
def send(self, payload):
raise socket.error("Socket error")
class OverflownSocket(FakeSocket):
def send(self, payload):
error = socket.error("Socket error")
error.errno = errno.EAGAIN
raise error
def telemetry_metrics(metrics=1, events=0, service_checks=0, bytes_sent=0, bytes_dropped=0, packets_sent=1, packets_dropped=0, transport="udp", tags=""):
tags = "," + tags if tags else ""
return "\n".join([
"datadog.dogstatsd.client.metrics:{}|c|#client:py,client_version:{},client_transport:{}{}".format(metrics, version, transport, tags),
"datadog.dogstatsd.client.events:{}|c|#client:py,client_version:{},client_transport:{}{}".format(events, version, transport, tags),
"datadog.dogstatsd.client.service_checks:{}|c|#client:py,client_version:{},client_transport:{}{}".format(service_checks, version, transport, tags),
"datadog.dogstatsd.client.bytes_sent:{}|c|#client:py,client_version:{},client_transport:{}{}".format(bytes_sent, version, transport, tags),
"datadog.dogstatsd.client.bytes_dropped:{}|c|#client:py,client_version:{},client_transport:{}{}".format(bytes_dropped, version, transport, tags),
"datadog.dogstatsd.client.packets_sent:{}|c|#client:py,client_version:{},client_transport:{}{}".format(packets_sent, version, transport, tags),
"datadog.dogstatsd.client.packets_dropped:{}|c|#client:py,client_version:{},client_transport:{}{}".format(packets_dropped, version, transport, tags)
])
class TestDogStatsd(unittest.TestCase):
def setUp(self):
"""
Set up a default Dogstatsd instance and mock the proc filesystem.
"""
#
self.statsd = DogStatsd(telemetry_min_flush_interval=0)
self.statsd.socket = FakeSocket()
self.statsd._reset_telemetry()
# Mock the proc filesystem
route_data = load_fixtures('route')
self._procfs_mock = patch('datadog.util.compat.builtins.open', mock_open())
self._procfs_mock.start().return_value.readlines.return_value = route_data.split("\n")
def tearDown(self):
"""
Unmock the proc filesystem.
"""
self._procfs_mock.stop()
def assert_equal_telemetry(self, expected_payload, actual_payload, telemetry=None):
if telemetry is None:
telemetry = telemetry_metrics(bytes_sent=len(expected_payload))
if expected_payload:
expected_payload = "\n".join([expected_payload, telemetry])
else:
expected_payload = telemetry
return self.assertEqual(expected_payload, actual_payload)
def assert_almost_equal(self, val1, val2, delta):
return self.assertTrue(0 <= abs(val1 - val2) <= delta, "%s - %s not within %s" % (val1, val2, delta))
def recv(self, count=1):
packets = []
for _ in range(count):
packets.append(self.statsd.socket.recv())
return "\n".join(packets)
def test_initialization(self):
"""
`initialize` overrides `statsd` default instance attributes.
"""
options = {
'statsd_host': "myhost",
'statsd_port': 1234
}
# Default values
self.assertEqual(statsd.host, "localhost")
self.assertEqual(statsd.port, 8125)
# After initialization
initialize(**options)
self.assertEqual(statsd.host, "myhost")
self.assertEqual(statsd.port, 1234)
# Add namespace
options['statsd_namespace'] = "mynamespace"
initialize(**options)
self.assertEqual(statsd.host, "myhost")
self.assertEqual(statsd.port, 1234)
self.assertEqual(statsd.namespace, "mynamespace")
# Set `statsd` host to the system's default route
initialize(statsd_use_default_route=True, **options)
self.assertEqual(statsd.host, "172.17.0.1")
self.assertEqual(statsd.port, 1234)
# Add UNIX socket
options['statsd_socket_path'] = '/var/run/dogstatsd.sock'
initialize(**options)
self.assertEqual(statsd.socket_path, options['statsd_socket_path'])
self.assertIsNone(statsd.host)
self.assertIsNone(statsd.port)
def test_dogstatsd_initialization_with_env_vars(self):
"""
Dogstatsd can retrieve its config from env vars when
not provided in constructor.
"""
# Setup
with preserve_environment_variable('DD_AGENT_HOST'):
os.environ['DD_AGENT_HOST'] = 'myenvvarhost'
with preserve_environment_variable('DD_DOGSTATSD_PORT'):
os.environ['DD_DOGSTATSD_PORT'] = '4321'
dogstatsd = DogStatsd()
# Assert
self.assertEqual(dogstatsd.host, "myenvvarhost")
self.assertEqual(dogstatsd.port, 4321)
def test_default_route(self):
"""
Dogstatsd host can be dynamically set to the default route.
"""
self.assertEqual(
DogStatsd(use_default_route=True).host,
"172.17.0.1"
)
def test_set(self):
self.statsd.set('set', 123)
self.assert_equal_telemetry('set:123|s', self.recv(2))
def test_gauge(self):
self.statsd.gauge('gauge', 123.4)
self.assert_equal_telemetry('gauge:123.4|g', self.recv(2))
def test_counter(self):
self.statsd.increment('page.views')
self.assert_equal_telemetry('page.views:1|c', self.recv(2))
self.statsd._reset_telemetry()
self.statsd.increment('page.views', 11)
self.assert_equal_telemetry('page.views:11|c', self.recv(2))
self.statsd._reset_telemetry()
self.statsd.decrement('page.views')
self.assert_equal_telemetry('page.views:-1|c', self.recv(2))
self.statsd._reset_telemetry()
self.statsd.decrement('page.views', 12)
self.assert_equal_telemetry('page.views:-12|c', self.recv(2))
def test_histogram(self):
self.statsd.histogram('histo', 123.4)
self.assert_equal_telemetry('histo:123.4|h', self.recv(2))
def test_pipe_in_tags(self):
self.statsd.gauge('gt', 123.4, tags=['pipe|in:tag', 'red'])
self.assert_equal_telemetry('gt:123.4|g|#pipe_in:tag,red', self.recv(2))
def test_tagged_gauge(self):
self.statsd.gauge('gt', 123.4, tags=['country:china', 'age:45', 'blue'])
self.assert_equal_telemetry('gt:123.4|g|#country:china,age:45,blue', self.recv(2))
def test_tagged_counter(self):
self.statsd.increment('ct', tags=[u'country:españa', 'red'])
self.assert_equal_telemetry(u'ct:1|c|#country:españa,red', self.recv(2))
def test_tagged_histogram(self):
self.statsd.histogram('h', 1, tags=['red'])
self.assert_equal_telemetry('h:1|h|#red', self.recv(2))
def test_sample_rate(self):
self.statsd._telemetry = False # disabling telemetry since sample_rate imply randomness
self.statsd.increment('c', sample_rate=0)
self.assertFalse(self.statsd.socket.recv())
for _ in range(10000):
self.statsd.increment('sampled_counter', sample_rate=0.3)
self.assert_almost_equal(3000, len(self.statsd.socket.payloads), 150)
self.assertEqual('sampled_counter:1|c|@0.3', self.recv())
def test_default_sample_rate(self):
self.statsd._telemetry = False # disabling telemetry since sample_rate imply randomness
self.statsd.default_sample_rate = 0.3
for _ in range(10000):
self.statsd.increment('sampled_counter')
self.assert_almost_equal(3000, len(self.statsd.socket.payloads), 150)
self.assertEqual('sampled_counter:1|c|@0.3', self.recv())
def test_tags_and_samples(self):
self.statsd._telemetry = False # disabling telemetry since sample_rate imply randomness
for _ in range(100):
self.statsd.gauge('gst', 23, tags=["sampled"], sample_rate=0.9)
self.assertEqual('gst:23|g|@0.9|#sampled', self.recv())
def test_timing(self):
self.statsd.timing('t', 123)
self.assert_equal_telemetry('t:123|ms', self.recv(2))
def test_event(self):
self.statsd.event('Title', u'L1\nL2', priority='low', date_happened=1375296969)
event = u'_e{5,6}:Title|L1\\nL2|d:1375296969|p:low'
self.assert_equal_telemetry(event, self.recv(2), telemetry=telemetry_metrics(metrics=0, events=1, bytes_sent=len(event)))
self.statsd._reset_telemetry()
self.statsd.event('Title', u'♬ †øU †øU ¥ºu T0µ ♪',
aggregation_key='key', tags=['t1', 't2:v2'])
event = u'_e{5,19}:Title|♬ †øU †øU ¥ºu T0µ ♪|k:key|#t1,t2:v2'
self.assert_equal_telemetry(event, self.recv(2), telemetry=telemetry_metrics(metrics=0, events=1, bytes_sent=len(event)))
def test_event_constant_tags(self):
self.statsd.constant_tags = ['bar:baz', 'foo']
self.statsd.event('Title', u'L1\nL2', priority='low', date_happened=1375296969)
event = u'_e{5,6}:Title|L1\\nL2|d:1375296969|p:low|#bar:baz,foo'
self.assert_equal_telemetry(event, self.recv(2), telemetry=telemetry_metrics(metrics=0, events=1, tags="bar:baz,foo", bytes_sent=len(event)))
self.statsd._reset_telemetry()
self.statsd.event('Title', u'♬ †øU †øU ¥ºu T0µ ♪',
aggregation_key='key', tags=['t1', 't2:v2'])
event = u'_e{5,19}:Title|♬ †øU †øU ¥ºu T0µ ♪|k:key|#t1,t2:v2,bar:baz,foo'
self.assert_equal_telemetry(event, self.recv(2), telemetry=telemetry_metrics(metrics=0, events=1, tags="bar:baz,foo", bytes_sent=len(event)))
def test_service_check(self):
now = int(time.time())
self.statsd.service_check(
'my_check.name', self.statsd.WARNING,
tags=['key1:val1', 'key2:val2'], timestamp=now,
hostname='i-abcd1234', message=u"♬ †øU \n†øU ¥ºu|m: T0µ ♪")
check = u'_sc|my_check.name|{0}|d:{1}|h:i-abcd1234|#key1:val1,key2:val2|m:{2}'.format(self.statsd.WARNING, now, u"♬ †øU \\n†øU ¥ºu|m\\: T0µ ♪")
self.assert_equal_telemetry(check, self.recv(2), telemetry=telemetry_metrics(metrics=0, service_checks=1, bytes_sent=len(check)))
def test_service_check_constant_tags(self):
self.statsd.constant_tags = ['bar:baz', 'foo']
now = int(time.time())
self.statsd.service_check(
'my_check.name', self.statsd.WARNING,
timestamp=now,
hostname='i-abcd1234', message=u"♬ †øU \n†øU ¥ºu|m: T0µ ♪")
check = u'_sc|my_check.name|{0}|d:{1}|h:i-abcd1234|#bar:baz,foo|m:{2}'.format(self.statsd.WARNING, now, u"♬ †øU \\n†øU ¥ºu|m\\: T0µ ♪")
self.assert_equal_telemetry(
check,
self.recv(2),
telemetry=telemetry_metrics(metrics=0, service_checks=1, tags="bar:baz,foo", bytes_sent=len(check))
)
self.statsd._reset_telemetry()
self.statsd.service_check(
'my_check.name', self.statsd.WARNING,
tags=['key1:val1', 'key2:val2'], timestamp=now,
hostname='i-abcd1234', message=u"♬ †øU \n†øU ¥ºu|m: T0µ ♪")
check = u'_sc|my_check.name|{0}|d:{1}|h:i-abcd1234|#key1:val1,key2:val2,bar:baz,foo|m:{2}'.format(self.statsd.WARNING, now, u"♬ †øU \\n†øU ¥ºu|m\\: T0µ ♪")
self.assert_equal_telemetry(
check,
self.recv(2),
telemetry=telemetry_metrics(metrics=0, service_checks=1, tags="bar:baz,foo", bytes_sent=len(check))
)
def test_metric_namespace(self):
"""
Namespace prefixes all metric names.
"""
self.statsd.namespace = "foo"
self.statsd.gauge('gauge', 123.4)
self.assert_equal_telemetry('foo.gauge:123.4|g', self.recv(2))
# Test Client level contant tags
def test_gauge_constant_tags(self):
self.statsd.constant_tags = ['bar:baz', 'foo']
self.statsd.gauge('gauge', 123.4)
metric = 'gauge:123.4|g|#bar:baz,foo'
self.assert_equal_telemetry(metric, self.recv(2), telemetry=telemetry_metrics(tags="bar:baz,foo", bytes_sent=len(metric)))
def test_counter_constant_tag_with_metric_level_tags(self):
self.statsd.constant_tags = ['bar:baz', 'foo']
self.statsd.increment('page.views', tags=['extra'])
metric = 'page.views:1|c|#extra,bar:baz,foo'
self.assert_equal_telemetry(metric, self.recv(2), telemetry=telemetry_metrics(tags="bar:baz,foo", bytes_sent=len(metric)))
def test_gauge_constant_tags_with_metric_level_tags_twice(self):
metric_level_tag = ['foo:bar']
self.statsd.constant_tags = ['bar:baz']
self.statsd.gauge('gauge', 123.4, tags=metric_level_tag)
metric = 'gauge:123.4|g|#foo:bar,bar:baz'
self.assert_equal_telemetry(metric, self.recv(2), telemetry=telemetry_metrics(tags="bar:baz", bytes_sent=len(metric)))
self.statsd._reset_telemetry()
# sending metrics multiple times with same metric-level tags
# should not duplicate the tags being sent
self.statsd.gauge('gauge', 123.4, tags=metric_level_tag)
metric = "gauge:123.4|g|#foo:bar,bar:baz"
self.assert_equal_telemetry(metric, self.recv(2), telemetry=telemetry_metrics(tags="bar:baz", bytes_sent=len(metric)))
def test_socket_error(self):
self.statsd.socket = BrokenSocket()
with mock.patch("datadog.dogstatsd.base.log") as mock_log:
self.statsd.gauge('no error', 1)
mock_log.error.assert_not_called()
mock_log.warning.assert_called_once_with(
"Error submitting packet: %s, dropping the packet and closing the socket",
mock.ANY,
)
def test_socket_overflown(self):
self.statsd.socket = OverflownSocket()
with mock.patch("datadog.dogstatsd.base.log") as mock_log:
self.statsd.gauge('no error', 1)
mock_log.error.assert_not_called()
calls = [call("Socket send would block: %s, dropping the packet", mock.ANY)]
mock_log.debug.assert_has_calls(calls * 2)
def test_distributed(self):
"""
Measure the distribution of a function's run time using distribution custom metric.
"""
# In seconds
@self.statsd.distributed('distributed.test')
def func(arg1, arg2, kwarg1=1, kwarg2=1):
"""docstring"""
time.sleep(0.5)
return (arg1, arg2, kwarg1, kwarg2)
self.assertEqual('func', func.__name__)
self.assertEqual('docstring', func.__doc__)
result = func(1, 2, kwarg2=3)
# Assert it handles args and kwargs correctly.
self.assertEqual(result, (1, 2, 1, 3))
packet = self.recv(2).split("\n")[0] # ignore telemetry packet
name_value, type_ = packet.split('|')
name, value = name_value.split(':')
self.assertEqual('d', type_)
self.assertEqual('distributed.test', name)
self.assert_almost_equal(0.5, float(value), 0.1)
# Repeat, force timer value in milliseconds
@self.statsd.distributed('distributed.test', use_ms=True)
def func(arg1, arg2, kwarg1=1, kwarg2=1):
"""docstring"""
time.sleep(0.5)
return (arg1, arg2, kwarg1, kwarg2)
func(1, 2, kwarg2=3)
packet = self.recv(2).split("\n")[0] # ignore telemetry packet
name_value, type_ = packet.split('|')
name, value = name_value.split(':')
self.assertEqual('d', type_)
self.assertEqual('distributed.test', name)
self.assert_almost_equal(500, float(value), 100)
def test_timed(self):
"""
Measure the distribution of a function's run time.
"""
# In seconds
@self.statsd.timed('timed.test')
def func(arg1, arg2, kwarg1=1, kwarg2=1):
"""docstring"""
time.sleep(0.5)
return (arg1, arg2, kwarg1, kwarg2)
self.assertEqual('func', func.__name__)
self.assertEqual('docstring', func.__doc__)
result = func(1, 2, kwarg2=3)
# Assert it handles args and kwargs correctly.
self.assertEqual(result, (1, 2, 1, 3))
packet = self.recv(2).split("\n")[0] # ignore telemetry packet
name_value, type_ = packet.split('|')
name, value = name_value.split(':')
self.assertEqual('ms', type_)
self.assertEqual('timed.test', name)
self.assert_almost_equal(0.5, float(value), 0.1)
# Repeat, force timer value in milliseconds
@self.statsd.timed('timed.test', use_ms=True)
def func(arg1, arg2, kwarg1=1, kwarg2=1):
"""docstring"""
time.sleep(0.5)
return (arg1, arg2, kwarg1, kwarg2)
func(1, 2, kwarg2=3)
packet = self.recv(2).split("\n")[0] # ignore telemetry packet
name_value, type_ = packet.split('|')
name, value = name_value.split(':')
self.assertEqual('ms', type_)
self.assertEqual('timed.test', name)
self.assert_almost_equal(500, float(value), 100)
def test_timed_in_ms(self):
"""
Timed value is reported in ms when statsd.use_ms is True.
"""
# Arm statsd to use_ms
self.statsd.use_ms = True
# Sample a function run time
@self.statsd.timed('timed.test')
def func(arg1, arg2, kwarg1=1, kwarg2=1):
"""docstring"""
time.sleep(0.5)
return (arg1, arg2, kwarg1, kwarg2)
func(1, 2, kwarg2=3)
# Assess the packet
packet = self.recv(2).split("\n")[0] # ignore telemetry packet
name_value, type_ = packet.split('|')
name, value = name_value.split(':')
self.assertEqual('ms', type_)
self.assertEqual('timed.test', name)
self.assert_almost_equal(500, float(value), 100)
# Repeat, force timer value in seconds
@self.statsd.timed('timed.test', use_ms=False)
def func(arg1, arg2, kwarg1=1, kwarg2=1):
"""docstring"""
time.sleep(0.5)
return (arg1, arg2, kwarg1, kwarg2)
func(1, 2, kwarg2=3)
packet = self.recv()
name_value, type_ = packet.split('|')
name, value = name_value.split(':')
self.assertEqual('ms', type_)
self.assertEqual('timed.test', name)
self.assert_almost_equal(0.5, float(value), 0.1)
def test_timed_no_metric(self, ):
"""
Test using a decorator without providing a metric.
"""
@self.statsd.timed()
def func(arg1, arg2, kwarg1=1, kwarg2=1):
"""docstring"""
time.sleep(0.5)
return (arg1, arg2, kwarg1, kwarg2)
self.assertEqual('func', func.__name__)
self.assertEqual('docstring', func.__doc__)
result = func(1, 2, kwarg2=3)
# Assert it handles args and kwargs correctly.
self.assertEqual(result, (1, 2, 1, 3))
packet = self.recv(2).split("\n")[0] # ignore telemetry packet
name_value, type_ = packet.split('|')
name, value = name_value.split(':')
self.assertEqual('ms', type_)
self.assertEqual('tests.unit.dogstatsd.test_statsd.func', name)
self.assert_almost_equal(0.5, float(value), 0.1)
@pytest.mark.skipif(not is_higher_py35(), reason="Coroutines are supported on Python 3.5 or higher.")
def test_timed_coroutine(self):
"""
Measure the distribution of a coroutine function's run time.
Warning: Python > 3.5 only.
"""
import asyncio
source = """
@self.statsd.timed('timed.test')
async def print_foo():
"docstring"
import time
time.sleep(0.5)
print("foo")
"""
exec(source, {}, locals())
loop = asyncio.get_event_loop()
loop.run_until_complete(locals()['print_foo']())
loop.close()
# Assert
packet = self.recv(2).split("\n")[0] # ignore telemetry packet
name_value, type_ = packet.split('|')
name, value = name_value.split(':')
self.assertEqual('ms', type_)
self.assertEqual('timed.test', name)
self.assert_almost_equal(0.5, float(value), 0.1)
def test_timed_context(self):
"""
Measure the distribution of a context's run time.
"""
# In seconds
with self.statsd.timed('timed_context.test') as timer:
self.assertTrue(isinstance(timer, TimedContextManagerDecorator))
time.sleep(0.5)
packet = self.recv(2).split("\n")[0] # ignore telemetry packet
name_value, type_ = packet.split('|')
name, value = name_value.split(':')
self.assertEqual('ms', type_)
self.assertEqual('timed_context.test', name)
self.assert_almost_equal(0.5, float(value), 0.1)
self.assert_almost_equal(0.5, timer.elapsed, 0.1)
# In milliseconds
with self.statsd.timed('timed_context.test', use_ms=True) as timer:
time.sleep(0.5)
packet = self.recv(2).split("\n")[0] # ignore telemetry packet
name_value, type_ = packet.split('|')
name, value = name_value.split(':')
self.assertEqual('ms', type_)
self.assertEqual('timed_context.test', name)
self.assert_almost_equal(500, float(value), 100)
self.assert_almost_equal(500, timer.elapsed, 100)
def test_timed_context_exception(self):
"""
Exception bubbles out of the `timed` context manager.
"""
class ContextException(Exception):
pass
def func(self):
with self.statsd.timed('timed_context.test.exception'):
time.sleep(0.5)
raise ContextException()
# Ensure the exception was raised.
with pytest.raises(ContextException):
func(self)
# Ensure the timing was recorded.
packet = self.recv(2).split("\n")[0] # ignore telemetry packet
name_value, type_ = packet.split('|')
name, value = name_value.split(':')
self.assertEqual('ms', type_)
self.assertEqual('timed_context.test.exception', name)
self.assert_almost_equal(0.5, float(value), 0.1)
def test_timed_context_no_metric_exception(self):
"""Test that an exception occurs if using a context manager without a metric."""
def func(self):
with self.statsd.timed():
time.sleep(0.5)
# Ensure the exception was raised.
with pytest.raises(TypeError):
func(self)
# Ensure the timing was recorded.
packet = self.statsd.socket.recv()
self.assertIsNone(packet)
def test_timed_start_stop_calls(self):
# In seconds
timer = self.statsd.timed('timed_context.test')
timer.start()
time.sleep(0.5)
timer.stop()
packet = self.recv(2).split("\n")[0] # ignore telemetry packet
name_value, type_ = packet.split('|')
name, value = name_value.split(':')
self.assertEqual('ms', type_)
self.assertEqual('timed_context.test', name)
self.assert_almost_equal(0.5, float(value), 0.1)
# In milliseconds
timer = self.statsd.timed('timed_context.test', use_ms=True)
timer.start()
time.sleep(0.5)
timer.stop()
packet = self.recv(2).split("\n")[0] # ignore telemetry packet
name_value, type_ = packet.split('|')
name, value = name_value.split(':')
self.assertEqual('ms', type_)
self.assertEqual('timed_context.test', name)
self.assert_almost_equal(500, float(value), 100)
def test_batching(self):
self.statsd.open_buffer()
self.statsd.gauge('page.views', 123)
self.statsd.timing('timer', 123)
self.statsd.close_buffer()
expected = "page.views:123|g\ntimer:123|ms"
self.assert_equal_telemetry(
expected,
self.recv(2),
telemetry=telemetry_metrics(metrics=2, bytes_sent=len(expected))
)
def test_batching_sequential(self):
self.statsd.open_buffer()
self.statsd.gauge('discarded.data', 123)
self.statsd.close_buffer()
self.statsd.open_buffer()
self.statsd.gauge('page.views', 123)
self.statsd.timing('timer', 123)
self.statsd.close_buffer()
expected1 = 'discarded.data:123|g'
expected_metrics1=telemetry_metrics(metrics=1, bytes_sent=len(expected1))
self.assert_equal_telemetry(
expected1,
self.recv(2),
telemetry=expected_metrics1)
expected2 = "page.views:123|g\ntimer:123|ms"
self.assert_equal_telemetry(
expected2,
self.recv(2),
telemetry=telemetry_metrics(
metrics=2,
packets_sent=2,
bytes_sent=len(expected2 + expected_metrics1)
)
)
def test_threaded_batching(self):
num_threads = 4
threads = []
def batch_metrics(index, dsd):
time.sleep(0.3 * index)
dsd.open_buffer()
time.sleep(0.1)
dsd.gauge('page.%d.views' % index, 123)
time.sleep(0.1)
dsd.timing('timer.%d' % index, 123)
time.sleep(0.5)
dsd.close_buffer()
for idx in range(num_threads):
threads.append(Thread(target=batch_metrics, args=(idx, self.statsd)))
for thread in threads:
thread.start()
for thread in threads:
if thread.is_alive():
thread.join()
# This is a bit of a tricky thing to test for - initially only our data packet is
# sent but then telemetry is flushed/reset and the subsequent metric xmit includes
# the telemetry data for the previous packet. The reason for 726 -> 727 increase is
# because packet #2 sends a three digit byte count ("726") that then increases the
# next metric size by 1 byte.
expected_xfer_metrics = [
(33, 1),
(726, 2),
(727, 2),
(727, 2),
]
for idx in range(num_threads):
expected_message = "page.%d.views:123|g\ntimer.%d:123|ms" % (idx, idx)
bytes_sent, packets_sent = expected_xfer_metrics[idx]
self.assert_equal_telemetry(
expected_message,
self.recv(2),
telemetry=telemetry_metrics(
metrics=2,
bytes_sent=bytes_sent,
packets_sent=packets_sent,
)
)
def test_close_buffer_without_open(self):
dogstatsd = DogStatsd()
with self.assertRaises(BufferError):
dogstatsd.close_buffer()
def test_threaded_close_buffer_without_open(self):
def batch_metrics(dsd):
time.sleep(0.3)
dsd.open_buffer()
dsd.gauge('page.views', 123)
dsd.timing('timer', 123)
time.sleep(0.5)
dsd.close_buffer()
def close_async_buffer(self, dsd):
# Ensures that buffer is defined
dsd.open_buffer()
dsd.close_buffer()
time.sleep(0.5)
with self.assertRaises(RuntimeError):
dsd.close_buffer()
thread1 = Thread(target=batch_metrics, args=(self.statsd,))
thread2 = Thread(target=close_async_buffer, args=(self, self.statsd,))
for thread in [thread1, thread2]:
thread.start()
for thread in [thread1, thread2]:
if thread.is_alive():
thread.join()
expected_message = "page.views:123|g\ntimer:123|ms"
self.assert_equal_telemetry(
expected_message,
self.recv(2),
telemetry=telemetry_metrics(
metrics=2,
bytes_sent=29,
packets_sent=1,
)
)
def test_telemetry(self):
self.statsd.metrics_count = 1
self.statsd.events_count = 2
self.statsd.service_checks_count = 3
self.statsd.bytes_sent = 4
self.statsd.bytes_dropped = 5
self.statsd.packets_sent = 6
self.statsd.packets_dropped = 7
self.statsd.open_buffer()
self.statsd.gauge('page.views', 123)
self.statsd.close_buffer()
payload = "page.views:123|g"
telemetry = telemetry_metrics(metrics=2, events=2, service_checks=3, bytes_sent=4 + len(payload),
bytes_dropped=5, packets_sent=7, packets_dropped=7)
self.assert_equal_telemetry(payload, self.recv(2), telemetry=telemetry)
self.assertEqual(0, self.statsd.metrics_count)
self.assertEqual(0, self.statsd.events_count)
self.assertEqual(0, self.statsd.service_checks_count)
self.assertEqual(len(telemetry), self.statsd.bytes_sent)
self.assertEqual(0, self.statsd.bytes_dropped)
self.assertEqual(1, self.statsd.packets_sent)
self.assertEqual(0, self.statsd.packets_dropped)
def test_telemetry_flush_interval(self):
dogstatsd = DogStatsd()
fake_socket = FakeSocket()
dogstatsd.socket = fake_socket
# set the last flush time in the future to be sure we won't flush
dogstatsd._last_flush_time = time.time() + dogstatsd._telemetry_flush_interval
dogstatsd.gauge('gauge', 123.4)
metric = 'gauge:123.4|g'
self.assertEqual(metric, fake_socket.recv())
time1 = time.time()
# setting the last flush time in the past to trigger a telemetry flush
dogstatsd._last_flush_time = time1 - dogstatsd._telemetry_flush_interval -1
dogstatsd.gauge('gauge', 123.4)
self.assert_equal_telemetry(metric, fake_socket.recv(2), telemetry=telemetry_metrics(metrics=2, bytes_sent=2*len(metric), packets_sent=2))
# assert that _last_flush_time has been updated
self.assertTrue(time1 < dogstatsd._last_flush_time)
def test_telemetry_flush_interval_alternate_destination(self):
dogstatsd = DogStatsd(telemetry_host='foo')
fake_socket = FakeSocket()
dogstatsd.socket = fake_socket
fake_telemetry_socket = FakeSocket()
dogstatsd.telemetry_socket = fake_telemetry_socket
self.assertIsNotNone(dogstatsd.telemetry_host)
self.assertIsNotNone(dogstatsd.telemetry_port)
self.assertTrue(dogstatsd._dedicated_telemetry_destination())
# set the last flush time in the future to be sure we won't flush
dogstatsd._last_flush_time = time.time() + dogstatsd._telemetry_flush_interval
dogstatsd.gauge('gauge', 123.4)
self.assertEqual('gauge:123.4|g', fake_socket.recv())
time1 = time.time()
# setting the last flush time in the past to trigger a telemetry flush
dogstatsd._last_flush_time = time1 - dogstatsd._telemetry_flush_interval - 1
dogstatsd.gauge('gauge', 123.4)
self.assertEqual('gauge:123.4|g', fake_socket.recv())
self.assert_equal_telemetry('', fake_telemetry_socket.recv(), telemetry=telemetry_metrics(metrics=2, bytes_sent=13*2, packets_sent=2))
# assert that _last_flush_time has been updated
self.assertTrue(time1 < dogstatsd._last_flush_time)
def test_telemetry_flush_interval_batch(self):
dogstatsd = DogStatsd()
fake_socket = FakeSocket()
dogstatsd.socket = fake_socket
dogstatsd.open_buffer()
dogstatsd.gauge('gauge1', 1)
dogstatsd.gauge('gauge2', 2)
time1 = time.time()
# setting the last flush time in the past to trigger a telemetry flush
dogstatsd._last_flush_time = time1 - statsd._telemetry_flush_interval -1
dogstatsd.close_buffer()
metric = 'gauge1:1|g\ngauge2:2|g'
self.assert_equal_telemetry(metric, fake_socket.recv(2), telemetry=telemetry_metrics(metrics=2, bytes_sent=len(metric)))
# assert that _last_flush_time has been updated
self.assertTrue(time1 < dogstatsd._last_flush_time)
def test_context_manager(self):
fake_socket = FakeSocket()
with DogStatsd(telemetry_min_flush_interval=0) as dogstatsd:
dogstatsd.socket = fake_socket
dogstatsd.gauge('page.views', 123)
dogstatsd.timing('timer', 123)
metric = "page.views:123|g\ntimer:123|ms"
self.assertEqual(metric, fake_socket.recv())
self.assertEqual(telemetry_metrics(metrics=2, bytes_sent=len(metric)), fake_socket.recv())
# self.assert_equal_telemetry("page.views:123|g\ntimer:123|ms", fake_socket.recv(2), telemetry=telemetry_metrics(metrics=2))
def test_batched_buffer_autoflush(self):
fake_socket = FakeSocket()
bytes_sent = 0
with DogStatsd(telemetry_min_flush_interval=0) as dogstatsd:
single_metric = 'mycounter:1|c'
self.assertEqual(dogstatsd._max_payload_size, UDP_OPTIMAL_PAYLOAD_LENGTH)
metrics_per_packet = dogstatsd._max_payload_size // (len(single_metric) + 1)
dogstatsd.socket = fake_socket
for _ in range(metrics_per_packet + 1):
dogstatsd.increment('mycounter')
payload = '\n'.join([single_metric for _ in range(metrics_per_packet)])
telemetry = telemetry_metrics(metrics=metrics_per_packet+1, bytes_sent=len(payload))
bytes_sent += len(payload) + len(telemetry)
self.assertEqual(payload, fake_socket.recv())
self.assertEqual(telemetry, fake_socket.recv())
self.assertEqual(single_metric, fake_socket.recv())
telemetry = telemetry_metrics(metrics=0, packets_sent=2, bytes_sent=len(single_metric) + len(telemetry))
self.assertEqual(telemetry, fake_socket.recv())
def test_module_level_instance(self):
self.assertTrue(isinstance(statsd, DogStatsd))
def test_instantiating_does_not_connect(self):
dogpound = DogStatsd()
self.assertIsNone(dogpound.socket)
def test_accessing_socket_opens_socket(self):
dogpound = DogStatsd()
try:
self.assertIsNotNone(dogpound.get_socket())
finally:
dogpound.socket.close()
def test_accessing_socket_multiple_times_returns_same_socket(self):
dogpound = DogStatsd()
fresh_socket = FakeSocket()
dogpound.socket = fresh_socket
self.assertEqual(fresh_socket, dogpound.get_socket())
self.assertNotEqual(FakeSocket(), dogpound.get_socket())
def test_tags_from_environment(self):
with preserve_environment_variable('DATADOG_TAGS'):
os.environ['DATADOG_TAGS'] = 'country:china,age:45,blue'
dogstatsd = DogStatsd(telemetry_min_flush_interval=0)
dogstatsd.socket = FakeSocket()
dogstatsd.gauge('gt', 123.4)
metric = 'gt:123.4|g|#country:china,age:45,blue'
self.assertEqual(metric, dogstatsd.socket.recv())
self.assertEqual(telemetry_metrics(tags="country:china,age:45,blue", bytes_sent=len(metric)), dogstatsd.socket.recv())
def test_tags_from_environment_and_constant(self):
with preserve_environment_variable('DATADOG_TAGS'):
os.environ['DATADOG_TAGS'] = 'country:china,age:45,blue'
dogstatsd = DogStatsd(constant_tags=['country:canada', 'red'], telemetry_min_flush_interval=0)
dogstatsd.socket = FakeSocket()
dogstatsd.gauge('gt', 123.4)
tags = "country:canada,red,country:china,age:45,blue"
metric = 'gt:123.4|g|#'+tags
self.assertEqual(metric, dogstatsd.socket.recv())
self.assertEqual(telemetry_metrics(tags=tags, bytes_sent=len(metric)), dogstatsd.socket.recv())
def test_entity_tag_from_environment(self):
with preserve_environment_variable('DD_ENTITY_ID'):
os.environ['DD_ENTITY_ID'] = '04652bb7-19b7-11e9-9cc6-42010a9c016d'
dogstatsd = DogStatsd(telemetry_min_flush_interval=0)
dogstatsd.socket = FakeSocket()
dogstatsd.gauge('gt', 123.4)
metric = 'gt:123.4|g|#dd.internal.entity_id:04652bb7-19b7-11e9-9cc6-42010a9c016d'
self.assertEqual(metric, dogstatsd.socket.recv())
self.assertEqual(
telemetry_metrics(tags="dd.internal.entity_id:04652bb7-19b7-11e9-9cc6-42010a9c016d", bytes_sent=len(metric)),
dogstatsd.socket.recv())
def test_entity_tag_from_environment_and_constant(self):
with preserve_environment_variable('DD_ENTITY_ID'):
os.environ['DD_ENTITY_ID'] = '04652bb7-19b7-11e9-9cc6-42010a9c016d'
dogstatsd = DogStatsd(constant_tags=['country:canada', 'red'], telemetry_min_flush_interval=0)
dogstatsd.socket = FakeSocket()
dogstatsd.gauge('gt', 123.4)
metric = 'gt:123.4|g|#country:canada,red,dd.internal.entity_id:04652bb7-19b7-11e9-9cc6-42010a9c016d'
self.assertEqual(metric, dogstatsd.socket.recv())
self.assertEqual(
telemetry_metrics(tags="country:canada,red,dd.internal.entity_id:04652bb7-19b7-11e9-9cc6-42010a9c016d",
bytes_sent=len(metric)),
dogstatsd.socket.recv()
)
def test_entity_tag_and_tags_from_environment_and_constant(self):
with preserve_environment_variable('DATADOG_TAGS'):
os.environ['DATADOG_TAGS'] = 'country:china,age:45,blue'
with preserve_environment_variable('DD_ENTITY_ID'):
os.environ['DD_ENTITY_ID'] = '04652bb7-19b7-11e9-9cc6-42010a9c016d'
dogstatsd = DogStatsd(constant_tags=['country:canada', 'red'], telemetry_min_flush_interval=0)
dogstatsd.socket = FakeSocket()
dogstatsd.gauge('gt', 123.4)
tags = "country:canada,red,country:china,age:45,blue,dd.internal.entity_id:04652bb7-19b7-11e9-9cc6-42010a9c016d"
metric = 'gt:123.4|g|#'+tags
self.assertEqual(metric, dogstatsd.socket.recv())
self.assertEqual(telemetry_metrics(tags=tags, bytes_sent=len(metric)), dogstatsd.socket.recv())
def test_dogstatsd_initialization_with_dd_env_service_version(self):
"""
Dogstatsd should automatically use DD_ENV, DD_SERVICE, and DD_VERSION (if present)
to set {env, service, version} as global tags for all metrics emitted.
"""
cases = [
# Test various permutations of setting DD_* env vars, as well as other global tag configuration.
# An empty string signifies that the env var either isn't set or that it is explicitly set to empty string.
('', '', '', '', [], []),
('prod', '', '', '', [], ['env:prod']),
('prod', 'dog', '', '', [], ['env:prod', 'service:dog']),
('prod', 'dog', 'abc123', '', [], ['env:prod', 'service:dog', 'version:abc123']),
('prod', 'dog', 'abc123', 'env:prod,type:app', [], ['env:prod', 'env:prod', 'service:dog', 'type:app', 'version:abc123']),
('prod', 'dog', 'abc123', 'env:prod2,type:app', [], ['env:prod', 'env:prod2', 'service:dog', 'type:app', 'version:abc123']),
('prod', 'dog', 'abc123', '', ['env:prod', 'type:app'], ['env:prod', 'env:prod', 'service:dog', 'type:app', 'version:abc123']),
('prod', 'dog', 'abc123', '', ['env:prod2', 'type:app'], ['env:prod', 'env:prod2', 'service:dog', 'type:app', 'version:abc123']),
('prod', 'dog', 'abc123', 'env:prod3,custom_tag:cat', ['env:prod2', 'type:app'], ['custom_tag:cat', 'env:prod', 'env:prod2', 'env:prod3', 'service:dog', 'type:app', 'version:abc123']),
]
for case in cases:
dd_env, dd_service, dd_version, datadog_tags, constant_tags, global_tags = case
with EnvVars(
env_vars={
'DATADOG_TAGS': datadog_tags,
'DD_ENV': dd_env,
'DD_SERVICE': dd_service,
'DD_VERSION': dd_version,
}
):
dogstatsd = DogStatsd(constant_tags=constant_tags, telemetry_min_flush_interval=0)
dogstatsd.socket = FakeSocket()
# Guarantee consistent ordering, regardless of insertion order.
dogstatsd.constant_tags.sort()
self.assertEqual(global_tags, dogstatsd.constant_tags)
# Make call with no tags passed; only the globally configured tags will be used.
global_tags_str = ','.join([t for t in global_tags])
dogstatsd.gauge('gt', 123.4)
# Protect against the no tags case.
metric = 'gt:123.4|g|#{}'.format(global_tags_str) if global_tags_str else 'gt:123.4|g'
self.assertEqual(metric, dogstatsd.socket.recv())
self.assertEqual(telemetry_metrics(tags=global_tags_str, bytes_sent=len(metric)), dogstatsd.socket.recv())
dogstatsd._reset_telemetry()
# Make another call with local tags passed.
passed_tags = ['env:prod', 'version:def456', 'custom_tag:toad']
all_tags_str = ','.join([t for t in passed_tags + global_tags])
dogstatsd.gauge('gt', 123.4, tags=passed_tags)
metric = 'gt:123.4|g|#{}'.format(all_tags_str)
self.assertEqual(metric, dogstatsd.socket.recv())
self.assertEqual(telemetry_metrics(tags=global_tags_str, bytes_sent=len(metric)), dogstatsd.socket.recv())
def test_gauge_does_not_send_none(self):
self.statsd.gauge('metric', None)
self.assertIsNone(self.statsd.socket.recv())
def test_increment_does_not_send_none(self):
self.statsd.increment('metric', None)
self.assertIsNone(self.statsd.socket.recv())
def test_decrement_does_not_send_none(self):
self.statsd.decrement('metric', None)
self.assertIsNone(self.statsd.socket.recv())
def test_timing_does_not_send_none(self):
self.statsd.timing('metric', None)
self.assertIsNone(self.statsd.socket.recv())
def test_histogram_does_not_send_none(self):
self.statsd.histogram('metric', None)
self.assertIsNone(self.statsd.socket.recv())
|
test_concurrent_futures.py | import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.skip_if_broken_multiprocessing_synchronize()
from test.support.script_helper import assert_python_ok
import contextlib
import itertools
import logging
from logging.handlers import QueueHandler
import os
import queue
import sys
import threading
import time
import unittest
import weakref
from pickle import PicklingError
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future,
BrokenExecutor)
from concurrent.futures.process import BrokenProcessPool
from multiprocessing import get_context
import multiprocessing.process
import multiprocessing.util
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
INITIALIZER_STATUS = 'uninitialized'
def mul(x, y):
return x * y
def capture(*args, **kwargs):
return args, kwargs
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
def init(x):
global INITIALIZER_STATUS
INITIALIZER_STATUS = x
def get_init_status():
return INITIALIZER_STATUS
def init_fail(log_queue=None):
if log_queue is not None:
logger = logging.getLogger('concurrent.futures')
logger.addHandler(QueueHandler(log_queue))
logger.setLevel('CRITICAL')
logger.propagate = False
time.sleep(0.1) # let some futures be scheduled
raise ValueError('error in initializer')
class MyObject(object):
def my_method(self):
pass
class EventfulGCObj():
def __init__(self, mgr):
self.event = mgr.Event()
def __del__(self):
self.event.set()
def make_dummy_object(_):
return MyObject()
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._thread_key = test.support.threading_setup()
def tearDown(self):
test.support.reap_children()
test.support.threading_cleanup(*self._thread_key)
class ExecutorMixin:
worker_count = 5
executor_kwargs = {}
def setUp(self):
super().setUp()
self.t1 = time.monotonic()
if hasattr(self, "ctx"):
self.executor = self.executor_type(
max_workers=self.worker_count,
mp_context=self.get_context(),
**self.executor_kwargs)
else:
self.executor = self.executor_type(
max_workers=self.worker_count,
**self.executor_kwargs)
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
self.executor = None
dt = time.monotonic() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 300, "synchronization issue: test lasted too long")
super().tearDown()
def get_context(self):
return get_context(self.ctx)
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolForkMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "fork"
def get_context(self):
if sys.platform == "win32":
self.skipTest("require unix system")
return super().get_context()
class ProcessPoolSpawnMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "spawn"
class ProcessPoolForkserverMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "forkserver"
def get_context(self):
if sys.platform == "win32":
self.skipTest("require unix system")
return super().get_context()
def create_executor_tests(mixin, bases=(BaseTestCase,),
executor_mixins=(ThreadPoolMixin,
ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin)):
def strip_mixin(name):
if name.endswith(('Mixin', 'Tests')):
return name[:-5]
elif name.endswith('Test'):
return name[:-4]
else:
return name
for exe in executor_mixins:
name = ("%s%sTest"
% (strip_mixin(exe.__name__), strip_mixin(mixin.__name__)))
cls = type(name, (mixin,) + (exe,) + bases, {})
globals()[name] = cls
class InitializerMixin(ExecutorMixin):
worker_count = 2
def setUp(self):
global INITIALIZER_STATUS
INITIALIZER_STATUS = 'uninitialized'
self.executor_kwargs = dict(initializer=init,
initargs=('initialized',))
super().setUp()
def test_initializer(self):
futures = [self.executor.submit(get_init_status)
for _ in range(self.worker_count)]
for f in futures:
self.assertEqual(f.result(), 'initialized')
class FailingInitializerMixin(ExecutorMixin):
worker_count = 2
def setUp(self):
if hasattr(self, "ctx"):
# Pass a queue to redirect the child's logging output
self.mp_context = self.get_context()
self.log_queue = self.mp_context.Queue()
self.executor_kwargs = dict(initializer=init_fail,
initargs=(self.log_queue,))
else:
# In a thread pool, the child shares our logging setup
# (see _assert_logged())
self.mp_context = None
self.log_queue = None
self.executor_kwargs = dict(initializer=init_fail)
super().setUp()
def test_initializer(self):
with self._assert_logged('ValueError: error in initializer'):
try:
future = self.executor.submit(get_init_status)
except BrokenExecutor:
# Perhaps the executor is already broken
pass
else:
with self.assertRaises(BrokenExecutor):
future.result()
# At some point, the executor should break
t1 = time.monotonic()
while not self.executor._broken:
if time.monotonic() - t1 > 5:
self.fail("executor not broken after 5 s.")
time.sleep(0.01)
# ... and from this point submit() is guaranteed to fail
with self.assertRaises(BrokenExecutor):
self.executor.submit(get_init_status)
def _prime_executor(self):
pass
@contextlib.contextmanager
def _assert_logged(self, msg):
if self.log_queue is not None:
yield
output = []
try:
while True:
output.append(self.log_queue.get_nowait().getMessage())
except queue.Empty:
pass
else:
with self.assertLogs('concurrent.futures', 'CRITICAL') as cm:
yield
output = cm.output
self.assertTrue(any(msg in line for line in output),
output)
create_executor_tests(InitializerMixin)
create_executor_tests(FailingInitializerMixin)
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
if __name__ == "__main__":
context = '{context}'
if context == "":
t = {executor_type}(5)
else:
from multiprocessing import get_context
context = get_context(context)
t = {executor_type}(5, mp_context=context)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__,
context=getattr(self, "ctx", "")))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_submit_after_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
import atexit
@atexit.register
def run_last():
try:
t.submit(id, None)
except RuntimeError:
print("runtime-error")
raise
from concurrent.futures import {executor_type}
if __name__ == "__main__":
context = '{context}'
if not context:
t = {executor_type}(5)
else:
from multiprocessing import get_context
context = get_context(context)
t = {executor_type}(5, mp_context=context)
t.submit(id, 42).result()
""".format(executor_type=self.executor_type.__name__,
context=getattr(self, "ctx", "")))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertIn("RuntimeError: cannot schedule new futures", err.decode())
self.assertEqual(out.strip(), b"runtime-error")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, BaseTestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(3):
self.executor.submit(acquire_lock, sem)
self.assertEqual(len(self.executor._threads), 3)
for i in range(3):
sem.release()
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
test.support.gc_collect()
for t in threads:
t.join()
def test_thread_names_assigned(self):
executor = futures.ThreadPoolExecutor(
max_workers=5, thread_name_prefix='SpecialPool')
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
test.support.gc_collect()
for t in threads:
self.assertRegex(t.name, r'^SpecialPool_[0-4]$')
t.join()
def test_thread_names_default(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
test.support.gc_collect()
for t in threads:
# Ensure that our default name is reasonably sane and unique when
# no thread_name_prefix was supplied.
self.assertRegex(t.name, r'ThreadPoolExecutor-\d+_[0-4]$')
t.join()
class ProcessPoolShutdownTest(ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
call_queue = executor._call_queue
queue_management_thread = executor._queue_management_thread
del executor
test.support.gc_collect()
# Make sure that all the executor resources were properly cleaned by
# the shutdown process
queue_management_thread.join()
for p in processes.values():
p.join()
call_queue.join_thread()
create_executor_tests(ProcessPoolShutdownTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, BaseTestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
newgil = hasattr(sys, 'getswitchinterval')
if newgil:
geti, seti = sys.getswitchinterval, sys.setswitchinterval
else:
geti, seti = sys.getcheckinterval, sys.setcheckinterval
oldinterval = geti()
seti(1e-6 if newgil else 1)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
seti(oldinterval)
create_executor_tests(WaitTests,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
# Issue #31641: accept arbitrary iterables.
future1 = self.executor.submit(time.sleep, 2)
completed = [
f for f in futures.as_completed(itertools.repeat(future1, 3))
]
self.assertEqual(len(completed), 1)
def test_free_reference_yielded_future(self):
# Issue #14406: Generator should not keep references
# to finished futures.
futures_list = [Future() for _ in range(8)]
futures_list.append(create_future(state=CANCELLED_AND_NOTIFIED))
futures_list.append(create_future(state=FINISHED, result=42))
with self.assertRaises(futures.TimeoutError):
for future in futures.as_completed(futures_list, timeout=0):
futures_list.remove(future)
wr = weakref.ref(future)
del future
test.support.gc_collect()
self.assertIsNone(wr())
futures_list[0].set_result("test")
for future in futures.as_completed(futures_list):
futures_list.remove(future)
wr = weakref.ref(future)
del future
test.support.gc_collect()
self.assertIsNone(wr())
if futures_list:
futures_list[0].set_result("test")
def test_correct_timeout_exception_msg(self):
futures_list = [CANCELLED_AND_NOTIFIED_FUTURE, PENDING_FUTURE,
RUNNING_FUTURE, SUCCESSFUL_FUTURE]
with self.assertRaises(futures.TimeoutError) as cm:
list(futures.as_completed(futures_list, timeout=0))
self.assertEqual(str(cm.exception), '2 (of 4) futures unfinished')
create_executor_tests(AsCompletedTests)
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
future = self.executor.submit(capture, 1, self=2, fn=3)
self.assertEqual(future.result(), ((1,), {'self': 2, 'fn': 3}))
with self.assertWarns(DeprecationWarning):
future = self.executor.submit(fn=capture, arg=1)
self.assertEqual(future.result(), ((), {'arg': 1}))
with self.assertRaises(TypeError):
self.executor.submit(arg=1)
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
self.assertEqual(
list(self.executor.map(pow, range(10), range(10), chunksize=3)),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@test.support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=5.0)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaisesRegex(ValueError,
"max_workers must be greater "
"than 0"):
self.executor_type(max_workers=number)
def test_free_reference(self):
# Issue #14406: Result iterator should not keep an internal
# reference to result objects.
for obj in self.executor.map(make_dummy_object, range(10)):
wr = weakref.ref(obj)
del obj
test.support.gc_collect()
self.assertIsNone(wr())
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, BaseTestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
def test_default_workers(self):
executor = self.executor_type()
expected = min(32, (os.cpu_count() or 1) + 4)
self.assertEqual(executor._max_workers, expected)
def test_saturation(self):
executor = self.executor_type(4)
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(15 * executor._max_workers):
executor.submit(acquire_lock, sem)
self.assertEqual(len(executor._threads), executor._max_workers)
for i in range(15 * executor._max_workers):
sem.release()
executor.shutdown(wait=True)
def test_idle_thread_reuse(self):
executor = self.executor_type()
executor.submit(mul, 21, 2).result()
executor.submit(mul, 6, 7).result()
executor.submit(mul, 3, 14).result()
self.assertEqual(len(executor._threads), 1)
executor.shutdown(wait=True)
class ProcessPoolExecutorTest(ExecutorTest):
@unittest.skipUnless(sys.platform=='win32', 'Windows-only process limit')
def test_max_workers_too_large(self):
with self.assertRaisesRegex(ValueError,
"max_workers must be <= 61"):
futures.ProcessPoolExecutor(max_workers=62)
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
def test_map_chunksize(self):
def bad_map():
list(self.executor.map(pow, range(40), range(40), chunksize=-1))
ref = list(map(pow, range(40), range(40)))
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=6)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=50)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=40)),
ref)
self.assertRaises(ValueError, bad_map)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
future = self.executor.submit(self._test_traceback)
with self.assertRaises(Exception) as cm:
future.result()
exc = cm.exception
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), futures.process._RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
def test_ressources_gced_in_workers(self):
# Ensure that argument for a job are correctly gc-ed after the job
# is finished
mgr = get_context(self.ctx).Manager()
obj = EventfulGCObj(mgr)
future = self.executor.submit(id, obj)
future.result()
self.assertTrue(obj.event.wait(timeout=1))
# explicitly destroy the object to ensure that EventfulGCObj.__del__()
# is called while manager is still running.
obj = None
test.support.gc_collect()
mgr.shutdown()
mgr.join()
create_executor_tests(ProcessPoolExecutorTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
def hide_process_stderr():
import io
sys.stderr = io.StringIO()
def _crash(delay=None):
"""Induces a segfault."""
if delay:
time.sleep(delay)
import faulthandler
faulthandler.disable()
faulthandler._sigsegv()
def _exit():
"""Induces a sys exit with exitcode 1."""
sys.exit(1)
def _raise_error(Err):
"""Function that raises an Exception in process."""
hide_process_stderr()
raise Err()
def _return_instance(cls):
"""Function that returns a instance of cls."""
hide_process_stderr()
return cls()
class CrashAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
_crash()
class CrashAtUnpickle(object):
"""Bad object that triggers a segfault at unpickling time."""
def __reduce__(self):
return _crash, ()
class ExitAtPickle(object):
"""Bad object that triggers a process exit at pickling time."""
def __reduce__(self):
_exit()
class ExitAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __reduce__(self):
return _exit, ()
class ErrorAtPickle(object):
"""Bad object that triggers an error at pickling time."""
def __reduce__(self):
from pickle import PicklingError
raise PicklingError("Error in pickle")
class ErrorAtUnpickle(object):
"""Bad object that triggers an error at unpickling time."""
def __reduce__(self):
from pickle import UnpicklingError
return _raise_error, (UnpicklingError, )
class ExecutorDeadlockTest:
TIMEOUT = 15
@classmethod
def _sleep_id(cls, x, delay):
time.sleep(delay)
return x
def _fail_on_deadlock(self, executor):
# If we did not recover before TIMEOUT seconds, consider that the
# executor is in a deadlock state and forcefully clean all its
# composants.
import faulthandler
from tempfile import TemporaryFile
with TemporaryFile(mode="w+") as f:
faulthandler.dump_traceback(file=f)
f.seek(0)
tb = f.read()
for p in executor._processes.values():
p.terminate()
# This should be safe to call executor.shutdown here as all possible
# deadlocks should have been broken.
executor.shutdown(wait=True)
print(f"\nTraceback:\n {tb}", file=sys.__stderr__)
self.fail(f"Executor deadlock:\n\n{tb}")
def test_crash(self):
# extensive testing for deadlock caused by crashes in a pool.
self.executor.shutdown(wait=True)
crash_cases = [
# Check problem occurring while pickling a task in
# the task_handler thread
(id, (ErrorAtPickle(),), PicklingError, "error at task pickle"),
# Check problem occurring while unpickling a task on workers
(id, (ExitAtUnpickle(),), BrokenProcessPool,
"exit at task unpickle"),
(id, (ErrorAtUnpickle(),), BrokenProcessPool,
"error at task unpickle"),
(id, (CrashAtUnpickle(),), BrokenProcessPool,
"crash at task unpickle"),
# Check problem occurring during func execution on workers
(_crash, (), BrokenProcessPool,
"crash during func execution on worker"),
(_exit, (), SystemExit,
"exit during func execution on worker"),
(_raise_error, (RuntimeError, ), RuntimeError,
"error during func execution on worker"),
# Check problem occurring while pickling a task result
# on workers
(_return_instance, (CrashAtPickle,), BrokenProcessPool,
"crash during result pickle on worker"),
(_return_instance, (ExitAtPickle,), SystemExit,
"exit during result pickle on worker"),
(_return_instance, (ErrorAtPickle,), PicklingError,
"error during result pickle on worker"),
# Check problem occurring while unpickling a task in
# the result_handler thread
(_return_instance, (ErrorAtUnpickle,), BrokenProcessPool,
"error during result unpickle in result_handler"),
(_return_instance, (ExitAtUnpickle,), BrokenProcessPool,
"exit during result unpickle in result_handler")
]
for func, args, error, name in crash_cases:
with self.subTest(name):
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
executor = self.executor_type(
max_workers=2, mp_context=get_context(self.ctx))
res = executor.submit(func, *args)
with self.assertRaises(error):
try:
res.result(timeout=self.TIMEOUT)
except futures.TimeoutError:
# If we did not recover before TIMEOUT seconds,
# consider that the executor is in a deadlock state
self._fail_on_deadlock(executor)
executor.shutdown(wait=True)
def test_shutdown_deadlock(self):
# Test that the pool calling shutdown do not cause deadlock
# if a worker fails after the shutdown call.
self.executor.shutdown(wait=True)
with self.executor_type(max_workers=2,
mp_context=get_context(self.ctx)) as executor:
self.executor = executor # Allow clean up in fail_on_deadlock
f = executor.submit(_crash, delay=.1)
executor.shutdown(wait=True)
with self.assertRaises(BrokenProcessPool):
f.result()
create_executor_tests(ExecutorDeadlockTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class FutureTests(BaseTestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_done_callback_raises_already_succeeded(self):
with test.support.captured_stderr() as stderr:
def raising_fn(callback_future):
raise Exception('doh!')
f = Future()
# Set the result first to simulate a future that runs instantly,
# effectively allowing the callback to be run immediately.
f.set_result(5)
f.add_done_callback(raising_fn)
self.assertIn('exception calling callback for', stderr.getvalue())
self.assertIn('doh!', stderr.getvalue())
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
t.join()
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
t.join()
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
t.join()
def test_multiple_set_result(self):
f = create_future(state=PENDING)
f.set_result(1)
with self.assertRaisesRegex(
futures.InvalidStateError,
'FINISHED: <Future at 0x[0-9a-f]+ '
'state=finished returned int>'
):
f.set_result(2)
self.assertTrue(f.done())
self.assertEqual(f.result(), 1)
def test_multiple_set_exception(self):
f = create_future(state=PENDING)
e = ValueError()
f.set_exception(e)
with self.assertRaisesRegex(
futures.InvalidStateError,
'FINISHED: <Future at 0x[0-9a-f]+ '
'state=finished raised ValueError>'
):
f.set_exception(Exception())
self.assertEqual(f.exception(), e)
_threads_key = None
def setUpModule():
global _threads_key
_threads_key = test.support.threading_setup()
def tearDownModule():
test.support.threading_cleanup(*_threads_key)
multiprocessing.util._cleanup_tests()
if __name__ == "__main__":
unittest.main()
|
dataloader.py | # -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import collections
import gc
import math
import multiprocessing
import os
import platform
import queue
import random
import threading
import time
from typing import Callable, Union
import numpy as np
from ..device import _sh, get_default_device
from ..functional.tensor import copy
from ..logger import get_logger
from ..random.rng import _random_seed_generator
from ..tensor import Tensor
from .collator import Collator
from .dataset import Dataset, StreamDataset
from .sampler import MapSampler, Sampler, SequentialSampler, StreamSampler
from .transform import PseudoTransform, Transform
try:
import thread
except:
import _thread as thread
logger = get_logger(__name__)
GLOBAL_TIMEOUT = 5
def raise_timeout_error():
raise RuntimeError("dataloader timeout")
class DataLoader:
r"""Provides a convenient way to iterate on a given dataset.
DataLoader combines a dataset with
:class:`~.Sampler`, :class:`~.Transform` and :class:`~.Collator`,
make it flexible to get minibatch continually from a dataset.
Args:
dataset: dataset from which to load the minibatch.
sampler: defines the strategy to sample data from the dataset.
transform: defined the transforming strategy for a sampled batch.
Default: None
collator: defined the merging strategy for a transformed batch.
Default: None
num_workers: the number of sub-process to load, transform and collate
the batch. ``0`` means using single-process. Default: 0
timeout: if positive, means the timeout value(second) for collecting a
batch from workers. Default: 0
timeout_event: callback function triggered by timeout, default to raise
runtime error.
divide: define the paralleling strategy in multi-processing mode.
``True`` means one batch is divided into :attr:`num_workers` pieces, and
the workers will process these pieces parallelly. ``False`` means
different sub-process will process different batch. Default: False
preload: whether to enable the preloading strategy of the dataloader. When enabling, the dataloader will preload one batch to the device memory to speed up the whole training process.
All values in the map, list, and tuple will be converted to :class:`~.Tensor` by preloading, and you will get :class:`~.Tensor` instead of the original Numpy array or Python number.
.. note::
By enabling preload, tensors' host2device copy and device kernel execution will be overlapped, which will improve the training speed at the cost of higher device memory usage (due to one more batch data on device memory).
This feature saves more time when your NN training time is short or your machine's host PCIe bandwidth for each device is low.
"""
__initialized = False
def __init__(
self,
dataset: Dataset,
sampler: Sampler = None,
transform: Transform = None,
collator: Collator = None,
num_workers: int = 0,
timeout: int = 0,
timeout_event: Callable = raise_timeout_error,
divide: bool = False,
preload: bool = False,
):
if num_workers < 0:
raise ValueError("num_workers should not be negative")
if timeout < 0:
raise ValueError("timeout should not be negative")
if divide and num_workers <= 1:
raise ValueError("divide should not be set to True when num_workers <= 1")
self.dataset = dataset
self.num_workers = num_workers
self.timeout = timeout
self.timeout_event = timeout_event
self.divide = divide
self.preload = preload
if isinstance(dataset, StreamDataset):
self.sampler = sampler if sampler else StreamSampler(batch_size=1)
assert isinstance(
self.sampler, StreamSampler
), "types of dataset and sampler do not match"
else:
assert isinstance(
dataset, Dataset
), "Can not recognize this kind of dataset: %s" % type(dataset)
self.sampler = (
sampler
if sampler
else SequentialSampler(dataset, batch_size=1, drop_last=False)
)
assert isinstance(
self.sampler, MapSampler
), "types of dataset and sampler do not match"
if divide:
if self.sampler.batch_size <= self.num_workers:
raise ValueError(
"batch size must not smaller than num_workers in divide mode."
)
elif self.sampler.batch_size % self.num_workers:
logger.warning(
"batch size is not divisible by num_workers, may lose performance in divide mode."
)
if transform is None:
self.transform = PseudoTransform()
else:
self.transform = transform
if collator is None:
self.collator = Collator()
else:
self.collator = collator
self.__initialized = True
def __iter__(self):
if platform.system() == "Windows" and self.num_workers > 0:
print(
"pyarrow.plasma does not support ParallelDataLoader on windows, changing num_workers to be zero"
)
self.num_workers = 0
if os.getenv("TERMUX_VERSION"):
# FIXME: termux install pyarrow will build error now
# remove this logic after pyarrow fix this issue
print(
"pyarrow do not support on termux env now, changing num_workers to be zero"
)
self.num_workers = 0
if isinstance(self.dataset, StreamDataset):
if not self.num_workers:
return _SerialStreamDataLoaderIter(self, self.preload)
else:
return _ParallelStreamDataLoaderIter(self, self.preload)
else:
assert isinstance(
self.dataset, Dataset
), "Can not recognize this kind of dataset: %s" % type(self.dataset)
if not self.num_workers:
return _SerialMapDataLoaderIter(self, self.preload)
else:
return _ParallelMapDataLoaderIter(self, self.preload)
def __len__(self):
return len(self.sampler)
class PreLoader:
def __init__(self, preload):
if preload:
self.default_device = get_default_device()
self.pre_load_device = self.default_device + ":" + str(_sh.get_next())
self.pre_load_device_cache = None
self.preload = preload
"""
strategy one: load from numpy data, and generate dtype tensor
"""
def _load_tensor(self, batch, cached=True):
if isinstance(batch, np.ndarray):
device = self.pre_load_device if cached else self.default_device
return Tensor(batch, device=device)
elif isinstance(batch, collections.abc.Mapping):
return {k: self._load_tensor(v, cached) for k, v in batch.items()}
elif isinstance(batch, tuple) and hasattr(batch, "_fields"): # namedtuple
return type(batch)(*(self._load_tensor(value, cached) for value in batch))
elif isinstance(batch, collections.abc.Sequence):
return [self._load_tensor(value, cached) for value in batch]
else:
return batch
"""
strategy two: load from cache that is already tensor just do d2d copy
"""
def _load_cache(self, data):
if isinstance(data, Tensor):
if data.device == self.default_device:
return data
return copy(data, device=self.default_device)
elif isinstance(data, collections.abc.Mapping):
return {k: self._load_cache(v) for k, v in data.items()}
elif isinstance(data, tuple) and hasattr(data, "_fields"): # namedtuple
return type(data)(*(self._load_cache(value) for value in data))
elif isinstance(data, collections.abc.Sequence):
return [self._load_cache(value) for value in data]
else:
return data
def _swap_out_cache(self):
out = self._load_cache(self.pre_load_device_cache)
self.pre_load_device_cache = None # clean cache
return out
class _BaseMapDataLoaderIter(PreLoader):
def __init__(self, loader, preload):
super().__init__(preload)
self.dataset = loader.dataset
self.sampler = loader.sampler
self.seed = _random_seed_generator().__next__()
self.transform = loader.transform
self.collator = loader.collator
self.num_workers = loader.num_workers
self.timeout = loader.timeout
self.timeout_event = loader.timeout_event
self.divide = loader.divide
self.num_processed = 0
def _get_next_batch(self):
raise NotImplementedError
def __len__(self):
return len(self.sampler)
def __iter__(self):
return self
def __next__(self):
if self.preload:
cached = self.pre_load_device_cache
if cached is None: # first and last
if self.num_processed >= len(self): # last
raise StopIteration
elif self.num_processed == 0: # first
self._try_load_tensor(cached=False) # first do the h2d
out = self._swap_out_cache()
self._try_load_tensor()
return out
else:
if self.num_processed >= len(self):
raise StopIteration
minibatch = self._get_next_batch()
self.num_processed += 1
return minibatch
def _try_load_tensor(self, cached=True):
if self.num_processed >= len(self):
return
else:
self.num_processed += 1
batch = self._get_next_batch()
self.pre_load_device_cache = self._load_tensor(batch, cached)
class _SerialMapDataLoaderIter(_BaseMapDataLoaderIter):
def __init__(self, loader, preload):
super(_SerialMapDataLoaderIter, self).__init__(loader, preload)
self.indices_iter = iter(self.sampler)
def _get_next_batch(self):
indices = next(self.indices_iter)
items = [self.dataset[idx] for idx in indices]
trans_items = self.transform.apply_batch(items)
return self.collator.apply(trans_items)
class _ParallelMapDataLoaderIter(_BaseMapDataLoaderIter):
__initialized = False
def __init__(self, loader, preload):
super(_ParallelMapDataLoaderIter, self).__init__(loader, preload)
self.task_queues = [
multiprocessing.Queue(maxsize=2) for _ in range(self.num_workers)
]
self.feed_batch_idx = multiprocessing.Value("i", 0)
self.target_batch_idx = multiprocessing.Value("i", 0)
self.shutdown_flag = multiprocessing.Value("i", 0)
self.trans_data_queues = [
multiprocessing.Queue(maxsize=1) for _ in range(self.num_workers)
]
# use shared-memory queue implemented by pyarrow plasma store.
from .tools._queue import PlasmaShmQueue
self.batch_queue = PlasmaShmQueue(maxsize=2)
self.task_feeding_worker = multiprocessing.Process(
target=_task_feeding_loop,
args=(
iter(self.sampler),
self.task_queues,
self.num_workers,
self.divide,
self.shutdown_flag,
self.feed_batch_idx,
),
daemon=True,
)
gc.collect()
self.task_feeding_worker.start()
self.workers = []
for worker_id in range(self.num_workers):
worker = multiprocessing.Process(
target=_worker_loop,
args=(
self.dataset,
self.task_queues[worker_id],
self.trans_data_queues[worker_id],
self.transform,
self.seed + worker_id + 1,
self.shutdown_flag,
),
daemon=True,
)
gc.collect()
worker.start()
self.workers.append(worker)
if self.divide:
self.data_collecting_worker = multiprocessing.Process(
target=_data_gathering_loop,
args=(
self.trans_data_queues,
self.batch_queue,
self.collator,
len(self),
self.num_workers,
self.shutdown_flag,
self.target_batch_idx,
),
daemon=True,
)
else:
self.data_collecting_worker = multiprocessing.Process(
target=_data_selecting_loop,
args=(
self.trans_data_queues,
self.batch_queue,
self.collator,
len(self),
self.num_workers,
self.shutdown_flag,
self.target_batch_idx,
),
daemon=True,
)
gc.collect()
self.data_collecting_worker.start()
self.__initialized = True
def _check_workers(self):
# Check the status of each worker.
if not self.data_collecting_worker.is_alive():
exitcode = self.data_collecting_worker.exitcode
if exitcode != 0:
raise RuntimeError("data collecting worker died. {}".format(exitcode))
if not self.task_feeding_worker.is_alive():
exitcode = self.task_feeding_worker.exitcode
if exitcode != 0:
raise RuntimeError("task feeding worker died. {}".format(exitcode))
for worker_id, worker in enumerate(self.workers):
if not worker.is_alive():
exitcode = worker.exitcode
if exitcode != 0:
raise RuntimeError("worker:{} died. {}".format(worker_id, exitcode))
logger.debug("all workers are alive.")
def _get_next_batch(self):
start_time = time.time()
while True:
self._check_workers()
try:
return self.batch_queue.get(timeout=1)
except queue.Empty:
logger.debug("batch queue empty!")
waited_time = time.time() - start_time
if self.timeout > 0:
if waited_time > self.timeout:
raise RuntimeError("get_next_batch timeout!")
def _shutdown(self):
with self.shutdown_flag.get_lock():
self.shutdown_flag.value = 1
if self.task_feeding_worker.is_alive():
self.task_feeding_worker.terminate()
self.task_feeding_worker.join()
if self.data_collecting_worker.is_alive():
self.data_collecting_worker.terminate()
self.data_collecting_worker.join()
for worker in self.workers:
if worker.is_alive():
worker.terminate()
worker.join()
for q in self.trans_data_queues:
q.cancel_join_thread()
q.close()
for q in self.task_queues:
q.cancel_join_thread()
q.close()
self.batch_queue.cancel_join_thread()
self.batch_queue.close()
def __del__(self):
if self.__initialized:
self._shutdown()
class _BaseStreamDataLoaderIter(PreLoader):
def __init__(self, loader, preload):
super().__init__(preload)
self.dataset = loader.dataset
self.sampler = loader.sampler
self.transform = loader.transform
self.collator = loader.collator
self.num_workers = loader.num_workers
self.timeout = loader.timeout
self.timeout_event = loader.timeout_event
def _get_next_batch(self):
raise NotImplementedError
def _process_raw_data(self, raw_data):
assert len(raw_data) == 2 and isinstance(
raw_data[0], bool
), "StreamDataset should provide a binary tuple, the first item indicates whether the data was batched."
if not raw_data[0]:
data = list((x,) for x in raw_data[1])
else:
data = raw_data[1]
ret = []
for idx in range(len(data[0])):
ret.append(tuple(e[idx] for e in data))
return ret
def __iter__(self):
return self
def __next__(self):
if self.preload:
if self.pre_load_device_cache is None:
self._try_load_tensor(cached=False) # load in current
out = self._swap_out_cache()
self._try_load_tensor() # load in cached
return out
else:
return self._get_next_batch()
def _try_load_tensor(self, cached=True):
batch = self._get_next_batch()
self.pre_load_device_cache = self._load_tensor(batch, cached)
class _SerialStreamDataLoaderIter(_BaseStreamDataLoaderIter):
def __init__(self, loader, preload):
super().__init__(loader, preload)
self.dataset_iter = iter(self.dataset)
self.idx = 0
self.unused = []
def _try_get_raw_data(self, start_time):
raw_data = None
while not raw_data:
try:
if self.timeout > 0:
timer = threading.Timer(self.timeout, thread.interrupt_main)
timer.start()
raw_data = next(self.dataset_iter)
if self.timeout > 0:
timer.cancel()
except KeyboardInterrupt:
raw_data = self.timeout_event()
except:
if self.timeout > 0:
timer.cancel()
waited_time = time.time() - start_time
if waited_time > self.timeout:
raw_data = self.timeout_event()
return raw_data
def _get_next_batch(self):
ret = []
start_time = time.time()
while len(ret) < self.sampler.batch_size:
if len(self.unused) != 0:
batch_data = self.unused
else:
raw_data = self._try_get_raw_data(start_time)
batch_data = self._process_raw_data(raw_data)
while len(batch_data) != 0 and len(ret) < self.sampler.batch_size:
data = batch_data.pop()
ret.append(self.transform.apply(data))
self.unused = batch_data
return self.collator.apply(ret)
class _ParallelStreamDataLoaderIter(_BaseStreamDataLoaderIter):
__initialized = False
def __init__(self, loader, preload):
super().__init__(loader, preload)
self.shutdown_flag = multiprocessing.Value("i", 0)
self.raw_data_queues = [
multiprocessing.Queue(maxsize=1) for _ in range(self.num_workers)
]
self.trans_data_queues = [
multiprocessing.Queue(maxsize=1) for _ in range(self.num_workers)
]
# shared-memory queue implemented by pyarrow plasma store
from .tools._queue import PlasmaShmQueue
self.batch_queue = PlasmaShmQueue(maxsize=2)
self.recieve_worker = multiprocessing.Process(
target=self._worker_to_raw_data_queues, daemon=True
)
gc.collect()
self.recieve_worker.start()
self.transform_workers = []
for worker_id in range(self.num_workers):
worker = multiprocessing.Process(
target=self._worker_to_trans_data_queues, args=(worker_id,), daemon=True
)
gc.collect()
worker.start()
self.transform_workers.append(worker)
self.collect_worker = multiprocessing.Process(
target=self._worker_to_batch_queue, daemon=True
)
gc.collect()
self.collect_worker.start()
self.__initialized = True
def _put_raw_data_queues(self, raw_data, qidx):
batch_data = self._process_raw_data(raw_data)
for data in batch_data:
while True:
qidx = qidx % self.num_workers
try:
self.raw_data_queues[qidx].put(data)
break
except queue.Full:
if self.shutdown_flag.value == 1:
break
logger.debug("raw data queue %d is full" % qidx)
finally:
qidx += 1
return qidx
def _worker_to_raw_data_queues(self):
dataset_iter = iter(self.dataset)
qidx = 0
while True:
if self.shutdown_flag.value == 1:
break
raw_data = next(dataset_iter)
qidx = self._put_raw_data_queues(raw_data, qidx)
def _worker_to_trans_data_queues(self, worker_id):
while True:
if self.shutdown_flag.value == 1:
break
try:
data = self.raw_data_queues[worker_id].get(timeout=GLOBAL_TIMEOUT)
except queue.Empty:
continue
trans_data = self.transform.apply(data)
while True:
try:
self.trans_data_queues[worker_id].put(trans_data)
break
except queue.Full:
if self.shutdown_flag.value == 1:
break
logger.debug("batch queue if full")
def _worker_to_batch_queue(self):
cnt = -1
trans_items = []
while True:
if self.shutdown_flag.value == 1:
break
cnt += 1
queue_id = cnt % self.num_workers
try:
trans_item = self.trans_data_queues[queue_id].get(
timeout=GLOBAL_TIMEOUT
)
except queue.Empty:
continue
trans_items.append(trans_item)
if len(trans_items) == self.sampler.batch_size:
batch_data = self.collator.apply(trans_items)
while True:
try:
self.batch_queue.put(batch_data, timeout=1)
break
except queue.Full:
if self.shutdown_flag.value == 1:
break
logger.debug("batch queue is full")
trans_items = []
def _check_workers(self):
if not self.collect_worker.is_alive():
exitcode = self.collect_worker.exitcode
if exitcode != 0:
raise RuntimeError("collator worker died. {}".format(exitcode))
for worker_id, worker in enumerate(self.transform_workers):
if not worker.is_alive():
exitcode = worker.exitcode
if exitcode != 0:
raise RuntimeError(
"worker: {} died. {}".format(worker_id, exitcode)
)
def _get_next_batch(self):
start_time = time.time()
while True:
self._check_workers()
try:
return self.batch_queue.get(timeout=1)
except queue.Empty:
logger.debug("batch queue empty!")
waited_time = time.time() - start_time
if self.timeout > 0 and waited_time > self.timeout:
self._put_raw_data_queues(self.timeout_event(), 0)
def _shutdown(self):
with self.shutdown_flag.get_lock():
self.shutdown_flag.value = 1
if self.recieve_worker.is_alive():
self.recieve_worker.terminate()
self.recieve_worker.join()
if self.collect_worker.is_alive():
self.collect_worker.terminate()
self.collect_worker.join()
for worker in self.transform_workers:
if worker.is_alive():
worker.terminate()
worker.join()
for q in self.raw_data_queues:
q.cancel_join_thread()
q.close()
for q in self.trans_data_queues:
q.cancel_join_thread()
q.close()
self.batch_queue.cancel_join_thread()
self.batch_queue.close()
def __del__(self):
if self.__initialized:
self._shutdown()
def _task_feeding_loop(
indices_iter, task_queues, num_workers, divide, shutdown_flag, feed_batch_idx
):
# Feed the indices into the task queues
while True:
if shutdown_flag.value == 1:
break
batch_idx = feed_batch_idx.value
try:
indices = next(indices_iter)
except StopIteration:
break
if divide:
# make sure all task_queues is ready for put
while any([q.full() for q in task_queues]):
if shutdown_flag.value == 1:
return
# divide into small pieces, feed to different workers.
sub_num = math.ceil(len(indices) / num_workers)
for worker_id in range(num_workers):
sub_indices = indices[worker_id * sub_num : (worker_id + 1) * sub_num]
task_queues[worker_id].put((batch_idx, sub_indices))
else:
# distribute tasks to different workers uniformly.
target_id = batch_idx % num_workers
while task_queues[target_id].full():
if shutdown_flag.value == 1:
return
task_queues[target_id].put((batch_idx, indices))
with feed_batch_idx.get_lock():
feed_batch_idx.value += 1
def _worker_loop(dataset, task_queue, trans_data_queue, transform, seed, shutdown_flag):
# Get dataset items and do the transform
random.seed(seed)
np.random.seed(seed)
while True:
if shutdown_flag.value == 1:
break
try:
batch_idx, indices = task_queue.get(timeout=GLOBAL_TIMEOUT)
except queue.Empty:
continue
if len(indices) > 0:
items = [dataset[idx] for idx in indices]
trans_items = transform.apply_batch(items)
else:
# in case of incomplete last batch
trans_items = ()
while True:
try:
trans_data_queue.put((batch_idx, trans_items), timeout=1)
break
except queue.Full:
if shutdown_flag.value == 1:
break
logger.debug("batch part queue is full!")
def _data_gathering_loop(
trans_data_queues,
batch_queue,
collator,
length,
num_workers,
shutdown_flag,
target_idx,
):
# Gathering the small pieces of batch data into full batch data
while True:
if shutdown_flag.value == 1:
break
target_batch_idx = target_idx.value
if target_batch_idx >= length:
break
full_trans_items = []
for worker_id in range(num_workers):
while True:
try:
batch_idx, trans_items = trans_data_queues[worker_id].get(
timeout=GLOBAL_TIMEOUT
)
break
except queue.Empty:
if shutdown_flag.value == 1:
break
logger.debug(
"worker:{} data queue get timeout! target batch idx:{}".format(
worker_id, target_batch_idx
)
)
if batch_idx != target_batch_idx:
raise RuntimeError(
"Unexperted batch_idx in data gathering loop. worker_id:{}.".format(
worker_id
)
)
else:
full_trans_items.extend(trans_items)
# Merge different parts into a batch.
full_batch = collator.apply(full_trans_items)
while True:
try:
batch_queue.put(full_batch, timeout=1)
break
except queue.Full:
if shutdown_flag.value == 1:
break
logger.debug("batch queue is full!")
with target_idx.get_lock():
target_idx.value += 1
batch_queue.disconnect_client()
def _data_selecting_loop(
trans_data_queues,
batch_queue,
collator,
length,
num_workers,
shutdown_flag,
target_idx,
):
# Make sure that batch is generated exactly with the same order as generated indices
while True:
if shutdown_flag.value == 1:
break
target_batch_idx = target_idx.value
if target_batch_idx >= length:
break
target_worker_id = target_batch_idx % num_workers
while True:
try:
batch_idx, trans_items = trans_data_queues[target_worker_id].get(
timeout=GLOBAL_TIMEOUT
)
batch_data = collator.apply(trans_items)
break
except queue.Empty:
if shutdown_flag.value == 1:
break
logger.debug(
"worker:{} data queue get timeout! target batch idx:{}".format(
target_worker_id, target_batch_idx
)
)
if batch_idx != target_batch_idx:
raise RuntimeError(
"batch_idx {} mismatch the target_batch_idx {}".format(
batch_idx, target_batch_idx
)
)
while True:
try:
batch_queue.put(batch_data, timeout=1)
break
except queue.Full:
if shutdown_flag.value == 1:
break
logger.debug("batch queue is full!")
with target_idx.get_lock():
target_idx.value += 1
batch_queue.disconnect_client()
|
client_inside_server_test.py | import time
from multiprocessing.context import Process
import pytest
from zero import AsyncZeroClient, ZeroClient
from server1 import run as run1
from server2 import run as run2
@pytest.mark.asyncio
async def test_client_inside_server():
try:
from pytest_cov.embed import cleanup_on_sigterm
except ImportError:
pass
else:
cleanup_on_sigterm()
p = Process(target=run1)
p.start()
p2 = Process(target=run2)
p2.start()
time.sleep(2)
client = ZeroClient("localhost", 7778)
assert client.call("echo", "Hello") == "Server1: Hello"
assert client.call("hello", None) == "Hello from server1"
assert client.call("async_echo", "Hello") == "Server1: Hello"
assert client.call("async_hello", None) == "Hello from server1"
async_client = AsyncZeroClient("localhost", 7778)
assert await async_client.call("echo", "Hello") == "Server1: Hello"
assert await async_client.call("hello", None) == "Hello from server1"
assert await async_client.call("async_echo", "Hello") == "Server1: Hello"
assert await async_client.call("async_hello", None) == "Hello from server1"
p.terminate()
p2.terminate()
|
samsungws.py | """
SamsungTVWS - Samsung Smart TV WS API wrapper
Copyright (C) 2019 Xchwarze
Copyright (C) 2020 Ollo69
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
import base64
import json
import logging
import re
import requests
import ssl
import subprocess
import sys
import time
import uuid
import websocket
from datetime import datetime
from enum import Enum
from threading import Thread, Lock
from yarl import URL
from . import exceptions
from . import shortcuts
PING_MATCHER = re.compile(
r"(?P<min>\d+.\d+)\/(?P<avg>\d+.\d+)\/(?P<max>\d+.\d+)\/(?P<mdev>\d+.\d+)"
)
PING_MATCHER_BUSYBOX = re.compile(
r"(?P<min>\d+.\d+)\/(?P<avg>\d+.\d+)\/(?P<max>\d+.\d+)"
)
WIN32_PING_MATCHER = re.compile(r"(?P<min>\d+)ms.+(?P<max>\d+)ms.+(?P<avg>\d+)ms")
DEFAULT_POWER_ON_DELAY = 120
MIN_APP_SCAN_INTERVAL = 10
MAX_WS_PING_INTERVAL = 10
TYPE_DEEP_LINK = "DEEP_LINK"
TYPE_NATIVE_LAUNCH = "NATIVE_LAUNCH"
_LOGGING = logging.getLogger(__name__)
def gen_uuid():
return str(uuid.uuid4())
class App:
def __init__(self, app_id, app_name, app_type):
self.app_id = app_id
self.app_name = app_name
self.app_type = app_type
class ArtModeStatus(Enum):
Unsupported = 0
Unavailable = 1
Off = 2
On = 3
class Ping:
"""The Class for handling the data retrieval."""
def __init__(self, host, count):
"""Initialize the data object."""
self._ip_address = host
self._count = count
self.available = False
if sys.platform == "win32":
self._ping_cmd = [
"ping",
"-n",
str(self._count),
"-w",
"2000",
self._ip_address,
]
else:
self._ping_cmd = [
"ping",
"-n",
"-q",
"-c",
str(self._count),
"-W2",
self._ip_address,
]
def ping(self):
"""Send ICMP echo request and return details if success."""
pinger = subprocess.Popen(
self._ping_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
try:
out = pinger.communicate()
_LOGGING.debug("Output is %s", str(out))
if sys.platform == "win32":
match = WIN32_PING_MATCHER.search(str(out).split("\n")[-1])
rtt_min, rtt_avg, rtt_max = match.groups()
elif "max/" not in str(out):
match = PING_MATCHER_BUSYBOX.search(str(out).split("\n")[-1])
rtt_min, rtt_avg, rtt_max = match.groups()
else:
match = PING_MATCHER.search(str(out).split("\n")[-1])
rtt_min, rtt_avg, rtt_max, rtt_mdev = match.groups()
return True
except (subprocess.CalledProcessError, AttributeError):
return False
class SamsungTVWS:
_WS_ENDPOINT_REMOTE_CONTROL = "/api/v2/channels/samsung.remote.control"
_WS_ENDPOINT_APP_CONTROL = "/api/v2"
_WS_ENDPOINT_ART = "/api/v2/channels/com.samsung.art-app"
_REST_URL_FORMAT = "http://{host}:8001/api/v2/{append}"
def __init__(
self,
host,
token=None,
token_file=None,
port=8001,
timeout=None,
key_press_delay=1.0,
name="SamsungTvRemote",
app_list=None,
):
self.host = host
self.token = token
self.token_file = token_file
self.port = port or 8001
self.timeout = None if timeout == 0 else timeout
self.key_press_delay = key_press_delay
self.name = name or "SamsungTvRemote"
self.connection = None
self._app_list = app_list
self._artmode_status = ArtModeStatus.Unsupported
self._power_on_requested = False
self._power_on_requested_time = datetime.min
self._power_on_delay = DEFAULT_POWER_ON_DELAY
self._power_on_artmode = False
self._installed_app = {}
self._running_app = None
self._app_type = {}
self._sync_lock = Lock()
self._last_app_scan = datetime.min
self._last_ping = datetime.min
self._is_connected = False
self._ws_remote = None
self._client_remote = None
self._ws_control = None
self._client_control = None
self._ws_art = None
self._client_art = None
self._client_art_supported = 2
self._ping = Ping(self.host, 1)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _serialize_string(self, string):
if isinstance(string, str):
string = str.encode(string)
return base64.b64encode(string).decode("utf-8")
def _is_ssl_connection(self):
return self.port == 8002
def _format_websocket_url(self, path, is_ssl=False, use_token=True):
scheme = "wss" if is_ssl else "ws"
if is_ssl and use_token:
token = self._get_token()
else:
token = ""
new_uri = URL.build(
scheme=scheme,
host=self.host,
port=self.port,
path=path,
query={"name": self._serialize_string(self.name)}
)
if token:
return str(new_uri.update_query({"token": token}))
return str(new_uri)
def _format_rest_url(self, append=""):
params = {
"host": self.host,
"append": append,
}
return self._REST_URL_FORMAT.format(**params)
def _get_token(self):
if self.token_file is not None:
try:
with open(self.token_file, "r") as token_file:
return token_file.readline()
except:
return ""
else:
return self.token
def _set_token(self, token):
_LOGGING.debug("New token %s", token)
if self.token_file is not None:
_LOGGING.debug("Save new token to file %s", self.token_file)
with open(self.token_file, "w") as token_file:
token_file.write(token)
else:
self.token = token
def _ws_send(self, command, key_press_delay=None, *, use_control=False, ws_socket=None):
using_remote = False
if not use_control:
if self._ws_remote:
connection = self._ws_remote
using_remote = True
else:
connection = self.open()
elif ws_socket:
connection = ws_socket
else:
self.start_client(start_all=True)
return False
payload = json.dumps(command)
try:
connection.send(payload)
except websocket.WebSocketConnectionClosedException:
_LOGGING.warning("_ws_send: connection is closed, send command failed")
if using_remote or use_control:
_LOGGING.info("_ws_send: try to restart communication threads")
self.start_client(start_all=use_control)
return False
except websocket.WebSocketTimeoutException:
_LOGGING.warning("_ws_send: timeout error sending command %s", payload)
return False
if using_remote:
# we consider a message sent valid as a ping
self._last_ping = datetime.now()
if key_press_delay is None:
if self.key_press_delay > 0:
time.sleep(self.key_press_delay)
elif key_press_delay > 0:
time.sleep(key_press_delay)
return True
def _rest_request(self, target, method="GET"):
url = self._format_rest_url(target)
try:
if method == "POST":
return requests.post(url, timeout=self.timeout)
elif method == "PUT":
return requests.put(url, timeout=self.timeout)
elif method == "DELETE":
return requests.delete(url, timeout=self.timeout)
else:
return requests.get(url, timeout=self.timeout)
except requests.ConnectionError:
raise exceptions.HttpApiError(
"TV unreachable or feature not supported on this model."
)
def _process_api_response(self, response):
try:
return json.loads(response)
except json.JSONDecodeError:
_LOGGING.debug(
"Failed to parse response from TV. response text: %s", response
)
raise exceptions.ResponseError(
"Failed to parse response from TV. Maybe feature not supported on this model"
)
def _check_conn_id(self, resp_data):
if not resp_data:
return False
msg_id = resp_data.get("id")
if not msg_id:
return False
clients_info = resp_data.get("clients")
for client in clients_info:
device_name = client.get("deviceName")
if device_name:
if device_name == self._serialize_string(self.name):
conn_id = client.get("id", "")
if conn_id == msg_id:
return True
return False
def _client_remote_thread(self):
if self._ws_remote:
return
is_ssl = self._is_ssl_connection()
url = self._format_websocket_url(
self._WS_ENDPOINT_REMOTE_CONTROL,
is_ssl=is_ssl
)
sslopt = {"cert_reqs": ssl.CERT_NONE} if is_ssl else {}
websocket.setdefaulttimeout(self.timeout)
self._ws_remote = websocket.WebSocketApp(
url,
on_message=self._on_message_remote,
on_ping=self._on_ping_remote,
)
_LOGGING.debug("Thread SamsungRemote started")
# we set ping interval (1 hour) only to enable multi-threading mode
# on socket. TV do not answer to ping but send ping to client
self._ws_remote.run_forever(
sslopt=sslopt, ping_interval=3600
)
self._is_connected = False
if self._ws_art:
self._ws_art.close()
if self._ws_control:
self._ws_control.close()
self._ws_remote.close()
self._ws_remote = None
_LOGGING.debug("Thread SamsungRemote terminated")
def _on_ping_remote(self, _, payload):
_LOGGING.debug("Received ping %s, sending pong", payload)
self._last_ping = datetime.now()
if self._ws_remote.sock:
try:
self._ws_remote.sock.pong(payload)
except Exception as ex:
_LOGGING.warning("send_pong failed: {}".format(ex))
def _on_message_remote(self, _, message):
response = self._process_api_response(message)
_LOGGING.debug(response)
event = response.get("event")
if not event:
return
# we consider a message valid as a ping
self._last_ping = datetime.now()
if event == "ms.channel.connect":
conn_data = response.get("data")
if not self._check_conn_id(conn_data):
return
_LOGGING.debug("Message remote: received connect")
token = conn_data.get("token")
if token:
self._set_token(token)
self._is_connected = True
self._request_apps_list()
self.start_client(start_all=True)
elif event == "ed.installedApp.get":
_LOGGING.debug("Message remote: received installedApp")
self._handle_installed_app(response)
elif event == "ed.edenTV.update":
_LOGGING.debug("Message remote: received edenTV")
self.get_running_app(force_scan=True)
def _request_apps_list(self):
_LOGGING.debug("Request app list")
self._ws_send(
{
"method": "ms.channel.emit",
"params": {"event": "ed.installedApp.get", "to": "host"},
},
key_press_delay=0,
)
def _handle_installed_app(self, response):
list_app = response.get("data", {}).get("data")
installed_app = {}
for app_info in list_app:
app_id = app_info["appId"]
_LOGGING.debug("Found app: %s", app_id)
app = App(app_id, app_info["name"], app_info["app_type"])
installed_app[app_id] = app
self._installed_app = installed_app
def _client_control_thread(self):
if self._ws_control:
return
is_ssl = self._is_ssl_connection()
url = self._format_websocket_url(
self._WS_ENDPOINT_APP_CONTROL,
is_ssl=is_ssl,
use_token=False
)
sslopt = {"cert_reqs": ssl.CERT_NONE} if is_ssl else {}
websocket.setdefaulttimeout(self.timeout)
self._ws_control = websocket.WebSocketApp(
url,
on_message=self._on_message_control,
)
_LOGGING.debug("Thread SamsungControl started")
# we set ping interval (1 hour) only to enable multi-threading mode
# on socket. TV do not answer to ping but send ping to client
self._ws_control.run_forever(
sslopt=sslopt, ping_interval=3600
)
self._ws_control.close()
self._ws_control = None
_LOGGING.debug("Thread SamsungControl terminated")
def _on_message_control(self, _, message):
response = self._process_api_response(message)
_LOGGING.debug(response)
result = response.get("result")
if result:
self._set_running_app(response)
return
error = response.get("error")
if error:
self._manage_control_err(response)
return
event = response.get("event")
if not event:
return
if event == "ms.channel.connect":
conn_data = response.get("data")
if not self._check_conn_id(conn_data):
return
_LOGGING.debug("Message control: received connect")
self.get_running_app()
elif event == "ed.installedApp.get":
_LOGGING.debug("Message control: received installedApp")
self._handle_installed_app(response)
def _set_running_app(self, response):
app_id = response.get("id")
if not app_id:
return
result = response.get("result")
if result is None:
return
elif isinstance(result, bool):
is_running = result
else:
is_running = result.get("visible")
if is_running is None:
return
if self._running_app:
if is_running and app_id != self._running_app:
_LOGGING.debug("app running: %s", app_id)
self._running_app = app_id
elif not is_running and app_id == self._running_app:
_LOGGING.debug("app stopped: %s", app_id)
self._running_app = None
elif is_running:
_LOGGING.debug("app running: %s", app_id)
self._running_app = app_id
def _manage_control_err(self, response):
app_id = response.get("id")
if not app_id:
return
error_code = response.get("error", {}).get("code", 0)
if error_code == 404: # Not found error
if self._installed_app:
if app_id not in self._installed_app:
_LOGGING.error("App ID %s not found", app_id)
return
# app_type = self._app_type.get(app_id)
# if app_type is None:
# _LOGGING.info(
# "App ID %s with type DEEP_LINK not found, set as NATIVE_LAUNCH",
# app_id,
# )
# self._app_type[app_id] = 4
def _get_app_status(self, app_id, app_type):
_LOGGING.debug("Get app status: AppID: %s, AppType: %s", app_id, app_type)
# if app_type == 4:
# method = "ms.webapplication.get"
# else:
# method = "ms.application.get"
if app_type == 4: # app type 4 always return not found error
return
method = "ms.application.get"
self._ws_send(
{
"id": app_id,
"method": method,
"params": {"id": app_id},
},
key_press_delay=0,
use_control=True,
ws_socket=self._ws_control,
)
def _client_art_thread(self):
if self._ws_art:
return
is_ssl = self._is_ssl_connection()
url = self._format_websocket_url(
self._WS_ENDPOINT_ART,
is_ssl=is_ssl,
use_token=False
)
sslopt = {"cert_reqs": ssl.CERT_NONE} if is_ssl else {}
websocket.setdefaulttimeout(self.timeout)
self._ws_art = websocket.WebSocketApp(
url,
on_message=self._on_message_art,
)
_LOGGING.debug("Thread SamsungArt started")
# we set ping interval (1 hour) only to enable multi-threading mode
# on socket. TV do not answer to ping but send ping to client
self._ws_art.run_forever(
sslopt=sslopt, ping_interval=3600
)
self._ws_art.close()
self._ws_art = None
_LOGGING.debug("Thread SamsungArt terminated")
def _on_message_art(self, _, message):
response = self._process_api_response(message)
_LOGGING.debug(response)
event = response.get("event")
if not event:
return
if event == "ms.channel.connect":
conn_data = response.get("data")
if not self._check_conn_id(conn_data):
return
_LOGGING.debug("Message art: received connect")
self._client_art_supported = 1
elif event == "ms.channel.ready":
_LOGGING.debug("Message art: channel ready")
self._get_artmode_status()
elif event == "d2d_service_message":
_LOGGING.debug("Message art: d2d message")
self._handle_artmode_status(response)
def _get_artmode_status(self):
_LOGGING.debug("Sending get_art_status")
msg_data = {
"request": "get_artmode_status",
"id": gen_uuid(),
}
self._ws_send(
{
"method": "ms.channel.emit",
"params": {
"data": json.dumps(msg_data),
"to": "host",
"event": "art_app_request",
},
},
key_press_delay=0,
use_control=True,
ws_socket=self._ws_art,
)
def _handle_artmode_status(self, response):
data_str = response.get("data")
if not data_str:
return
data = self._process_api_response(data_str)
event = data.get("event", "")
if event == "art_mode_changed":
status = data.get("status", "")
if status == "on":
artmode_status = ArtModeStatus.On
else:
artmode_status = ArtModeStatus.Off
elif event == "artmode_status":
value = data.get("value", "")
if value == "on":
artmode_status = ArtModeStatus.On
else:
artmode_status = ArtModeStatus.Off
elif event == "go_to_standby":
artmode_status = ArtModeStatus.Unavailable
elif event == "wakeup":
self._get_artmode_status()
return
else:
# Unknown message
return
if self._power_on_requested and artmode_status != ArtModeStatus.Unavailable:
if artmode_status == ArtModeStatus.On and not self._power_on_artmode:
self.send_key("KEY_POWER", key_press_delay=0)
elif artmode_status == ArtModeStatus.Off and self._power_on_artmode:
self.send_key("KEY_POWER", key_press_delay=0)
self._power_on_requested = False
self._artmode_status = artmode_status
@property
def is_connected(self):
return self._is_connected
@property
def artmode_status(self):
return self._artmode_status
@property
def installed_app(self):
return self._installed_app
@property
def running_app(self):
return self._running_app
def ping_device(self):
result = self._ping.ping()
# check ws ping/pong
call_time = datetime.now()
if result and self._ws_remote:
difference = (call_time - self._last_ping).total_seconds()
result = difference < MAX_WS_PING_INTERVAL
if not result:
self.stop_client()
if self._artmode_status != ArtModeStatus.Unsupported:
self._artmode_status = ArtModeStatus.Unavailable
if self._power_on_requested:
difference = (call_time - self._power_on_requested_time).total_seconds()
if difference > self._power_on_delay:
self._power_on_requested = False
return result
def set_power_on_request(self, set_art_mode=False, power_on_delay=0):
self._power_on_requested = True
self._power_on_requested_time = datetime.now()
self._power_on_artmode = set_art_mode
self._power_on_delay = max(power_on_delay, 0) or DEFAULT_POWER_ON_DELAY
def set_power_off_request(self):
self._power_on_requested = False
def get_running_app(self, *, force_scan=False):
if not self._ws_control:
return
with self._sync_lock:
call_time = datetime.now()
difference = (call_time - self._last_app_scan).total_seconds()
if (difference < MIN_APP_SCAN_INTERVAL and not force_scan) or difference < 1:
return
self._last_app_scan = call_time
if self._app_list is not None:
app_to_check = {}
for app_name, app_id in self._app_list.items():
app = None
if self._installed_app:
app = self._installed_app.get(app_id)
else:
app_type = self._app_type.get(app_id, 2)
if app_type <= 4:
app = App(app_id, app_name, app_type)
if app:
app_to_check[app_id] = app
else:
app_to_check = self._installed_app
for app in app_to_check.values():
self._get_app_status(app.app_id, app.app_type)
def start_client(self, *, start_all=False):
"""Start all thread that connect to the TV websocket"""
if self._client_remote is None or not self._client_remote.is_alive():
self._client_remote = Thread(target=self._client_remote_thread)
self._client_remote.name = "SamsungRemote"
self._client_remote.setDaemon(True)
self._client_remote.start()
return
if start_all:
if self._client_control is None or not self._client_control.is_alive():
self._client_control = Thread(target=self._client_control_thread)
self._client_control.name = "SamsungControl"
self._client_control.setDaemon(True)
self._client_control.start()
if (
self._client_art_supported > 0 and
(self._client_art is None or not self._client_art.is_alive())
):
if self._client_art_supported > 1:
self._client_art_supported = 0
self._client_art = Thread(target=self._client_art_thread)
self._client_art.name = "SamsungArt"
self._client_art.setDaemon(True)
self._client_art.start()
def stop_client(self):
if self._ws_remote:
self._ws_remote.close()
def open(self):
if self.connection is not None:
return self.connection
is_ssl = self._is_ssl_connection()
url = self._format_websocket_url(
self._WS_ENDPOINT_REMOTE_CONTROL,
is_ssl=is_ssl
)
sslopt = {"cert_reqs": ssl.CERT_NONE} if is_ssl else {}
_LOGGING.debug("WS url %s", url)
connection = websocket.create_connection(url, self.timeout, sslopt=sslopt)
completed = False
response = ""
for iteration in range(3):
response = self._process_api_response(connection.recv())
_LOGGING.debug(response)
event = response.get("event", "-")
if event != "ms.channel.connect":
break
conn_data = response.get("data")
if self._check_conn_id(conn_data):
completed = True
token = conn_data.get("token")
if token:
self._set_token(token)
break
if not completed:
self.close()
raise exceptions.ConnectionFailure(response)
self.connection = connection
return connection
def close(self):
if self.connection:
self.connection.close()
_LOGGING.debug("Connection closed.")
self.connection = None
def send_key(self, key, key_press_delay=None, cmd="Click"):
_LOGGING.debug("Sending key %s", key)
return self._ws_send(
{
"method": "ms.remote.control",
"params": {
"Cmd": cmd,
"DataOfCmd": key,
"Option": "false",
"TypeOfRemote": "SendRemoteKey",
},
},
key_press_delay,
)
def hold_key(self, key, seconds):
if self.send_key(key, key_press_delay=0, cmd="Press"):
time.sleep(seconds)
return self.send_key(key, key_press_delay=0, cmd="Release")
return False
def send_text(self, text, send_delay=None):
if not text:
return False
base64_text = self._serialize_string(text)
if self._ws_send(
{
"method": "ms.remote.control",
"params": {
"Cmd": f"{base64_text}",
"DataOfCmd": "base64",
"TypeOfRemote": "SendInputString",
},
},
key_press_delay=send_delay,
):
self._ws_send(
{
"method": "ms.remote.control",
"params": {
"TypeOfRemote": "SendInputEnd",
},
},
key_press_delay=0,
)
return True
return False
def move_cursor(self, x, y, duration=0):
self._ws_send(
{
"method": "ms.remote.control",
"params": {
"Cmd": "Move",
"Position": {"x": x, "y": y, "Time": str(duration)},
"TypeOfRemote": "ProcessMouseDevice",
},
},
key_press_delay=0,
)
def run_app(self, app_id, action_type="", meta_tag="", *, use_remote=False):
if not action_type:
app = self._installed_app.get(app_id)
if app:
app_type = app.app_type
else:
app_type = self._app_type.get(app_id, 2)
action_type = TYPE_DEEP_LINK if app_type == 2 else TYPE_NATIVE_LAUNCH
elif action_type != TYPE_NATIVE_LAUNCH:
action_type = TYPE_DEEP_LINK
_LOGGING.debug(
"Sending run app app_id: %s app_type: %s meta_tag: %s",
app_id,
action_type,
meta_tag,
)
if self._ws_control and action_type == TYPE_DEEP_LINK and not use_remote:
return self._ws_send(
{
"id": app_id,
"method": "ms.application.start",
"params": {"id": app_id},
},
key_press_delay=0,
use_control=True,
ws_socket=self._ws_control,
)
return self._ws_send(
{
"method": "ms.channel.emit",
"params": {
"event": "ed.apps.launch",
"to": "host",
"data": {
# action_type: NATIVE_LAUNCH / DEEP_LINK
# app_type == 2 ? 'DEEP_LINK' : 'NATIVE_LAUNCH',
"action_type": action_type,
"appId": app_id,
"metaTag": meta_tag,
},
},
},
key_press_delay=0,
)
def open_browser(self, url):
_LOGGING.debug("Opening url in browser %s", url)
return self.run_app("org.tizen.browser", TYPE_NATIVE_LAUNCH, url)
def rest_device_info(self):
_LOGGING.debug("Get device info via rest api")
response = self._rest_request("")
return self._process_api_response(response.text)
def rest_app_status(self, app_id):
_LOGGING.debug("Get app %s status via rest api", app_id)
response = self._rest_request("applications/" + app_id)
return self._process_api_response(response.text)
def rest_app_run(self, app_id):
_LOGGING.debug("Run app %s via rest api", app_id)
response = self._rest_request("applications/" + app_id, "POST")
return self._process_api_response(response.text)
def rest_app_close(self, app_id):
_LOGGING.debug("Close app %s via rest api", app_id)
response = self._rest_request("applications/" + app_id, "DELETE")
return self._process_api_response(response.text)
def rest_app_install(self, app_id):
_LOGGING.debug("Install app %s via rest api", app_id)
response = self._rest_request("applications/" + app_id, "PUT")
return self._process_api_response(response.text)
def shortcuts(self):
return shortcuts.SamsungTVShortcuts(self)
|
tamilgun_uniqueip.py | import sys
import threading
import time
import socket
import os
try: from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
fo = open("thread.txt", "r+")
fo.write("30");
# Close opend file
fo.close()
glob=1
def task(page,srcip):
global glob
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((srcip,0))
s.connect(('94.156.175.155', 80))
#s.settimeout(1)
s.sendall(
'GET /livesearch.php?q='+str(glob)+'&search=Search HTTP/1.1\r\n' +
'Host: www.tamildbox.com \r\n'+
'User-Agent: Mozilla/5.0 (X11; Ubuntu; linux x86_64; rv:53.0) Gecko/20100101 Firefox/53.0 \r\n'+
'Accept:text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 \r\n'+
'Accept-Language: en-US,en;q=0.5 \r\n'+
'Connection: close \r\n'+
'\r\n'
)
chunk = s.recv(65536)
print srcip,'\t','page=',glob,'\t',chunk[0:15]
glob=glob+1
iplist=[]
try:
ip_bits=sys.argv[1].split('.');
host_bit=int(ip_bits[3]);
except:
print "Invalid ip address"
sys.exit(0)
try:
no_of_ip=int(sys.argv[2])
except:
print "Invalid 2nd Argv"
sys.exit(0)
for i in range(0,no_of_ip):
iplist.append(ip_bits[0]+'.'+ip_bits[1]+'.'+ip_bits[2]+'.'+str(host_bit+i))
print i
g=0
while True:
try:
file=open("thread.txt","r+")
strno=file.read(10)
intno=int(strno)
file.close()
if intno!=g:
print "mismatch";
for ip in iplist:
for i in range(0,intno):
t2=threading.Thread(target=task,args=(i,ip))
t2.daemon=True
t2.start()
g=intno
else:
print "match";
time.sleep(10)
sys.exit()
sys.exit(0)
quit()
os._exit(0)
raise
except:
print "I/O error";
raise
|
runtime_manager_dialog.py | #!/usr/bin/env python
"""
Copyright (c) 2015, Nagoya University
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Autoware nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import wx
import wx.lib.buttons
import wx.lib.agw.customtreectrl as CT
import gettext
import os
import re
import sys
import fcntl
import threading
import Queue
import time
import socket
import struct
import shlex
import signal
import subprocess
import psutil
import pty
import yaml
import datetime
import syslog
import rtmgr
import rospy
import std_msgs.msg
from std_msgs.msg import Bool
from decimal import Decimal
from runtime_manager.msg import ConfigRcnn
from runtime_manager.msg import ConfigSsd
from runtime_manager.msg import ConfigCarDpm
from runtime_manager.msg import ConfigPedestrianDpm
from runtime_manager.msg import ConfigNdt
from runtime_manager.msg import ConfigNdtMapping
from runtime_manager.msg import ConfigNdtMappingOutput
from runtime_manager.msg import ConfigICP
from runtime_manager.msg import ConfigVoxelGridFilter
from runtime_manager.msg import ConfigRingFilter
from runtime_manager.msg import ConfigDistanceFilter
from runtime_manager.msg import ConfigRandomFilter
from runtime_manager.msg import ConfigWaypointFollower
from runtime_manager.msg import ConfigTwistFilter
from runtime_manager.msg import ConfigVelocitySet
from runtime_manager.msg import ConfigLatticeVelocitySet
from runtime_manager.msg import ConfigCarKf
from runtime_manager.msg import ConfigPedestrianKf
from runtime_manager.msg import ConfigLaneRule
from runtime_manager.msg import ConfigLaneSelect
from runtime_manager.msg import ConfigLaneStop
from runtime_manager.msg import ConfigCarFusion
from runtime_manager.msg import ConfigPedestrianFusion
from tablet_socket_msgs.msg import mode_cmd
from tablet_socket_msgs.msg import gear_cmd
from tablet_socket_msgs.msg import Waypoint
from tablet_socket_msgs.msg import route_cmd
from ndt_localizer.msg import ndt_stat
from geometry_msgs.msg import TwistStamped
from geometry_msgs.msg import Vector3
from runtime_manager.msg import accel_cmd
from runtime_manager.msg import steer_cmd
from runtime_manager.msg import brake_cmd
from runtime_manager.msg import indicator_cmd
from runtime_manager.msg import lamp_cmd
from runtime_manager.msg import traffic_light
from runtime_manager.msg import adjust_xy
from types import MethodType
SCHED_OTHER = 0
SCHED_FIFO = 1
SCHED_RR = 2
PROC_MANAGER_SOCK="/tmp/autoware_proc_manager"
class MyFrame(rtmgr.MyFrame):
def __init__(self, *args, **kwds):
rtmgr.MyFrame.__init__(self, *args, **kwds)
self.all_procs = []
self.all_cmd_dics = []
self.load_dic = self.load_yaml('param.yaml', def_ret={})
self.config_dic = {}
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.params = []
self.all_tabs = []
self.all_th_infs = []
self.log_que = Queue.Queue()
self.log_que_stdout = Queue.Queue()
self.log_que_stderr = Queue.Queue()
self.log_que_show = Queue.Queue()
#
# ros
#
rospy.init_node('runime_manager', anonymous=True)
rospy.Subscriber('to_rtmgr', std_msgs.msg.String, self.RosCb)
self.pub = rospy.Publisher('from_rtmgr', std_msgs.msg.String, queue_size=10)
#
# for Quick Start tab
#
tab = self.tab_qs
self.all_tabs.append(tab)
self.qs_cmd = {}
self.all_cmd_dics.append(self.qs_cmd)
self.qs_dic = self.load_yaml('qs.yaml')
self.add_params(self.qs_dic.get('params', []))
self.setup_buttons(self.qs_dic.get('buttons', {}), self.qs_cmd)
for nm in [ 'map', 'sensing', 'localization', 'detection', 'mission_planning', 'motion_planning' ]:
for key in self.qs_dic.get('exec_time', {}).get(nm, {}).keys():
(topic, msg, attr) = ( key.split('.') + [ None, None, None ] )[:3]
msg = globals().get(msg)
msg = msg if msg else std_msgs.msg.Float32
attr = attr if attr else 'data'
rospy.Subscriber(topic, msg, self.exec_time_callback, callback_args=(key, attr))
#
# for Setup tab
#
tab = self.tab_setup
self.all_tabs.append(tab)
setup_cmd = {}
self.all_cmd_dics.append(setup_cmd)
dic = self.load_yaml('setup.yaml')
self.add_params(dic.get('params', []))
self.setup_buttons(dic.get('buttons', {}), setup_cmd)
#
# for Map tab
#
tab = self.tab_map
self.all_tabs.append(tab)
self.map_cmd = {}
self.all_cmd_dics.append(self.map_cmd)
self.map_dic = self.load_yaml('map.yaml')
self.add_params(self.map_dic.get('params', []))
self.setup_buttons(self.map_dic.get('buttons', {}), self.map_cmd)
self.tc_point_cloud = self.obj_to_varpanel_tc(self.button_point_cloud, 'path_pcd')
self.tc_area_list = self.obj_to_varpanel_tc(self.button_area_lists, 'path_area_list')
self.label_point_cloud_bar.Destroy()
self.label_point_cloud_bar = BarLabel(tab, ' Loading... ')
self.label_point_cloud_bar.Enable(False)
def hook1G(args):
for f in args.get('func')().split(','):
sz = os.path.getsize(f)
if sz > 1024*1024*1024:
wx.MessageBox("Over 1GB\n\n{}\n({:,})".format(f, sz), caption='Warning')
args = { 'func':self.tc_point_cloud.GetValue }
hook_var = { 'hook':hook1G, 'args':args, 'flags':['every_time'] }
obj = self.button_point_cloud
gdic_v = self.obj_to_gdic(obj, {}).get('path_pcd', {})
gdic_v['hook_var'] = hook_var
#
# for Sensing tab
#
tab = self.tab_sensing
self.all_tabs.append(tab)
self.drv_probe_cmd = {}
self.sensing_cmd = {}
self.all_cmd_dics.append(self.sensing_cmd)
dic = self.load_yaml('sensing.yaml')
self.add_params(dic.get('params', []))
self.create_checkboxes(dic, self.panel_sensing, None, self.drv_probe_cmd, self.sensing_cmd, self.OnSensingDriver)
self.setup_buttons(dic.get('buttons', {}), self.sensing_cmd)
#self.timer = wx.Timer(self)
#self.Bind(wx.EVT_TIMER, self.OnProbe, self.timer)
#self.probe_interval = 10*1000
#if self.checkbox_auto_probe.GetValue():
# self.OnProbe(None)
# self.timer.Start(self.probe_interval)
self.dlg_rosbag_record = MyDialogRosbagRecord(self, cmd_dic=self.sensing_cmd)
buttons_color_hdr_setup(self.dlg_rosbag_record)
sense_cmds_dic = dic.get('cmds', {})
#
# for Computing tab
#
tab = self.tab_computing
self.all_tabs.append(tab)
parent = self.tree_ctrl_0.GetParent()
for i in range(2):
self.obj_get('tree_ctrl_' + str(i)).Destroy()
items = self.load_yaml('computing.yaml')
self.add_params(items.get('params', []))
self.sys_gdic = items.get('sys_gui')
self.sys_gdic['update_func'] = self.update_func
self.computing_cmd = {}
self.all_cmd_dics.append(self.computing_cmd)
for i in range(2):
tree_ctrl = self.create_tree(parent, items['subs'][i], None, None, self.computing_cmd)
tree_ctrl.ExpandAll()
tree_ctrl.SetBackgroundColour(wx.NullColour)
setattr(self, 'tree_ctrl_' + str(i), tree_ctrl)
self.Bind(CT.EVT_TREE_ITEM_CHECKED, self.OnTreeChecked)
self.setup_buttons(items.get('buttons', {}), self.computing_cmd)
#
# for Sensing tab (cmds)
#
parent = self.tree_ctrl_sense.GetParent()
self.tree_ctrl_sense.Destroy()
tree_ctrl = self.create_tree(parent, sense_cmds_dic, None, None, self.sensing_cmd)
tree_ctrl.ExpandAll()
tree_ctrl.SetBackgroundColour(wx.NullColour)
self.tree_ctrl_sense = tree_ctrl
#
# for Interface tab
#
tab = self.tab_interface
self.all_tabs.append(tab)
self.interface_cmd = {}
self.all_cmd_dics.append(self.interface_cmd)
self.interface_dic = self.load_yaml('interface.yaml')
self.add_params(self.interface_dic.get('params', []))
self.setup_buttons(self.interface_dic.get('buttons', {}), self.interface_cmd)
self.setup_buttons(self.interface_dic.get('checkboxs', {}), self.interface_cmd)
szr = wx.BoxSizer(wx.VERTICAL)
for cc in self.interface_dic.get('control_check', []):
pdic = {}
prm = self.get_param(cc.get('param'))
for var in prm['vars']:
pdic[ var['name'] ] = var['v']
gdic = self.gdic_get_1st(cc)
panel = ParamPanel(self.panel_interface_cc, frame=self, pdic=pdic, gdic=gdic, prm=prm)
szr.Add(panel, 0, wx.EXPAND)
self.panel_interface_cc.SetSizer(szr)
#
# for Database tab
#
tab = self.tab_database
self.all_tabs.append(tab)
self.data_cmd = {}
self.all_cmd_dics.append(self.data_cmd)
dic = self.load_yaml('data.yaml')
self.add_params(dic.get('params', []))
parent = self.tree_ctrl_data.GetParent()
self.tree_ctrl_data.Destroy()
tree_ctrl = self.create_tree(parent, dic, None, None, self.data_cmd)
tree_ctrl.ExpandAll()
tree_ctrl.SetBackgroundColour(wx.NullColour)
self.tree_ctrl_data = tree_ctrl
#self.setup_config_param_pdic()
if 'buttons' in dic:
self.setup_buttons(dic['buttons'], self.data_cmd)
#
# for Simulation Tab
#
tab = self.tab_simulation
self.all_tabs.append(tab)
self.simulation_cmd = {}
self.all_cmd_dics.append(self.simulation_cmd)
dic = self.load_yaml('simulation.yaml')
self.add_params(dic.get('params', []))
self.setup_buttons(dic.get('buttons'), self.simulation_cmd)
btn = self.button_play_rosbag_play
# setup for rosbag info
gdic = self.obj_to_gdic(btn, {})
gdic_v = dic_getset(gdic, 'file', {})
gdic_v['update_hook'] = self.rosbag_info_hook
tc = self.obj_to_varpanel_tc(btn, 'file')
if tc:
self.rosbag_info_hook( tc.GetValue() )
#vp = self.obj_to_varpanel(btn, 'sim_time')
#self.checkbox_sim_time = vp.obj
#try:
# cmd = ['rosparam', 'get', '/use_sim_time']
# if subprocess.check_output(cmd, stderr=open(os.devnull, 'wb')).strip() == 'true':
# self.checkbox_sim_time.SetValue(True)
#except subprocess.CalledProcessError:
# pass
self.label_rosbag_play_bar.Destroy()
self.label_rosbag_play_bar = BarLabel(tab, ' Playing... ')
self.label_rosbag_play_bar.Enable(False)
#
# for Status tab
#
tab = self.tab_status
self.all_tabs.append(tab)
self.status_cmd = {}
self.all_cmd_dics.append(self.status_cmd)
self.status_dic = self.load_yaml('status.yaml')
self.add_params(self.status_dic.get('params', []))
self.setup_buttons(self.status_dic.get('buttons', {}), self.status_cmd)
font = wx.Font(10, wx.FONTFAMILY_MODERN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
self.label_top_cmd.SetFont(font)
#
# for Topics tab
#
tab = self.tab_topics
self.all_tabs.append(tab)
#
# for All
#
self.bitmap_logo.Destroy()
# bm = scaled_bitmap(wx.Bitmap(rtmgr_src_dir() + 'autoware_logo_1.png'), 0.2)
bm = scaled_bitmap(wx.Bitmap(rtmgr_src_dir() + 'nogoya_university.png'), 0.35)
self.bitmap_logo = wx.StaticBitmap(self, wx.ID_ANY, bm)
rtmgr.MyFrame.__do_layout(self)
cond = lambda s : s.startswith('tab_')
self.tab_names = [ self.name_get_cond(tab, cond=cond, def_ret='').replace('tab_', '', 1) for tab in self.all_tabs ]
new_btn_grps = ( lambda btn_names, tab_names=self.tab_names :
[ [ self.obj_get('button_{}_{}'.format(bn, tn)) for tn in tab_names ] for bn in btn_names ] )
self.alias_grps = new_btn_grps( ('rosbag', 'rviz', 'rqt') )
self.alias_grps += new_btn_grps( ('android_tablet', 'oculus_rift', 'vehicle_gateway', 'auto_pilot'),
('qs', 'interface') )
for grp in self.alias_grps:
wx.CallAfter(self.alias_sync, get_top(grp))
s = get_tooltip_obj(grp[0])
if s:
for obj in grp[1:]:
set_tooltip_str(obj, s)
# Topics tab (need, after layout for sizer)
self.topics_dic = self.load_yaml('topics.yaml')
self.topics_list = []
self.topics_echo_curr_topic = None
self.topics_echo_proc = None
self.topics_echo_thinf = None
self.topics_echo_que = Queue.Queue()
self.topics_echo_sum = 0
thinf = th_start(self.topics_echo_show_th)
self.all_th_infs.append(thinf)
self.refresh_topics_list()
# waypoint
self.route_cmd_waypoint = [ Waypoint(0,0), Waypoint(0,0) ]
rospy.Subscriber('route_cmd', route_cmd, self.route_cmd_callback)
# topic /xxx_stat
self.stat_dic = {}
for k in [ 'gnss', 'pmap', 'vmap', 'lf' ]:
self.stat_dic[k] = False
name = k + '_stat'
rospy.Subscriber(name, std_msgs.msg.Bool, self.stat_callback, callback_args=k)
# top command thread setup
toprc = os.path.expanduser('~/.toprc')
backup = os.path.expanduser('~/.toprc-autoware-backup')
self.toprc_setup(toprc, backup)
cpu_ibls = [ InfoBarLabel(self, 'CPU'+str(i)) for i in range(get_cpu_count())]
sz = sizer_wrap(cpu_ibls, wx.HORIZONTAL, 1, wx.EXPAND, 0)
self.sizer_cpuinfo.Add(sz, 8, wx.ALL | wx.EXPAND, 4)
self.lb_top5 = []
for i in range(5):
lb = wx.StaticText(self, wx.ID_ANY, '')
change_font_point_by_rate(lb, 0.75)
self.lb_top5.append(lb)
line = wx.StaticLine(self, wx.ID_ANY)
ibl = InfoBarLabel(self, 'Memory', bar_orient=wx.HORIZONTAL)
szr = sizer_wrap(self.lb_top5 + [ line, ibl ], flag=wx.EXPAND | wx.FIXED_MINSIZE)
self.sizer_cpuinfo.Add(szr, 2, wx.ALL | wx.EXPAND, 4)
th_arg = { 'setting':self.status_dic.get('top_cmd_setting', {}),
'cpu_ibls':cpu_ibls, 'mem_ibl':ibl,
'toprc':toprc, 'backup':backup }
thinf = th_start(self.top_cmd_th, th_arg)
self.all_th_infs.append(thinf)
# ps command thread
#thinf = th_start(self.ps_cmd_th, { 'interval':5 })
#self.all_th_infs.append(thinf)
# logout thread
interval = self.status_dic.get('gui_update_interval_ms', 100) * 0.001
tc = self.text_ctrl_stdout
thinf = th_start(self.logout_th, { 'que':self.log_que_stdout, 'interval':interval, 'tc':tc } )
self.all_th_infs.append(thinf)
thinf = th_start(self.logout_th, { 'que':self.log_que_stderr, 'interval':interval, 'tc':tc } )
self.all_th_infs.append(thinf)
thinf = th_start(self.logout_th, { 'que':self.log_que, 'interval':interval, 'tc':tc } )
self.all_th_infs.append(thinf)
if interval > 0:
thinf = th_start(self.logshow_th, { 'que':self.log_que_show , 'interval':interval , 'tc':tc })
self.all_th_infs.append(thinf)
else:
self.checkbox_stdout.Enable(False)
tc.Enable(False)
# mkdir
paths = [ os.environ['HOME'] + '/.autoware/data/tf',
os.environ['HOME'] + '/.autoware/data/map/pointcloud_map',
os.environ['HOME'] + '/.autoware/data/map/vector_map' ]
for path in paths:
if not os.path.exists(path):
subprocess.call([ 'mkdir', '-p', path ])
# icon
bm = scaled_bitmap(wx.Bitmap(rtmgr_src_dir() + 'PolyU_Logo.png'), 0.5)
icon = wx.EmptyIcon()
icon.CopyFromBitmap(bm)
self.SetIcon(icon)
wx.CallAfter( self.boot_booted_cmds )
def __do_layout(self):
pass
def boot_booted_cmds(self):
if not self.load_dic.get('booted_cmds', {}).get('enable', False):
return
names = self.load_dic.get('booted_cmds', {}).get('names', [])
lst = [ ( name, self.cfg_dic( { 'name': name } ).get('obj') ) for name in names ]
lst = [ (name, obj) for (name, obj) in lst if obj ]
if not lst:
return
choices = [ obj.GetLabel() if hasattr(obj, 'GetLabel') else name for (name, obj) in lst ]
dlg = wx.MultiChoiceDialog(self, 'boot command ?', '', choices)
dlg.SetSelections( range( len(names) ) )
if dlg.ShowModal() != wx.ID_OK:
return
for i in dlg.GetSelections():
(_, obj) = lst[i]
post_evt_toggle_obj(self, obj, True)
def OnClose(self, event):
if self.quit_select() != 'quit':
return
# kill_all
for proc in self.all_procs[:]: # copy
(_, obj) = self.proc_to_cmd_dic_obj(proc)
self.launch_kill(False, 'dmy', proc, obj=obj)
shutdown_proc_manager()
shutdown_sh = self.get_autoware_dir() + '/ros/shutdown'
if os.path.exists(shutdown_sh):
os.system(shutdown_sh)
for thinf in self.all_th_infs:
th_end(thinf)
self.Destroy()
def quit_select(self):
def timer_func():
if self.quit_timer:
self.quit_timer = 'timeout'
evt = wx.PyCommandEvent( wx.EVT_CLOSE.typeId, self.GetId() )
wx.PostEvent(self, evt)
if not hasattr(self, 'quit_timer') or not self.quit_timer:
self.quit_timer = threading.Timer(2.0, timer_func)
self.quit_timer.start()
return 'not quit'
if self.quit_timer == 'timeout':
self.save_param_yaml()
return 'quit'
self.quit_timer.cancel()
self.quit_timer = None
lst = [
( 'Save and Quit', [ 'save', 'quit' ] ),
( 'Save to param.yaml', [ 'save' ] ),
( 'Quit without saving', [ 'quit' ] ),
( 'Reload computing.yaml', [ 'reload' ] ),
( self.get_booted_cmds_enable_msg()[1], [ 'toggle_booted_cmds' ] ),
]
choices = [ s for (s, _) in lst ]
dlg = wx.SingleChoiceDialog(self, 'select command', '', choices)
if dlg.ShowModal() != wx.ID_OK:
return 'not quit'
i = dlg.GetSelection() # index of choices
(_, f) = lst[i]
if 'save' in f:
self.save_param_yaml()
if 'reload' in f:
self.reload_computing_yaml()
if 'toggle_booted_cmds' in f:
self.toggle_booted_cmds()
return 'quit' if 'quit' in f else 'not quit'
def save_param_yaml(self):
save_dic = {}
for (name, pdic) in self.load_dic.items():
if pdic and pdic != {}:
prm = self.cfg_dic( {'name':name, 'pdic':pdic} ).get('param', {})
no_saves = prm.get('no_save_vars', [])
pdic = pdic.copy()
for k in pdic.keys():
if k in no_saves:
del pdic[k]
save_dic[name] = pdic
names = []
for proc in self.all_procs:
(_, obj) = self.proc_to_cmd_dic_obj(proc)
name = self.cfg_dic( { 'obj': obj } ).get('name')
names.append(name)
if 'booted_cmds' not in save_dic:
save_dic['booted_cmds'] = {}
save_dic.get('booted_cmds')['names'] = names
if save_dic != {}:
dir = rtmgr_src_dir()
print('saving param.yaml')
f = open(dir + 'param.yaml', 'w')
s = yaml.dump(save_dic, default_flow_style=False)
#print 'save\n', s # for debug
f.write(s)
f.close()
def reload_computing_yaml(self):
parent = self.tree_ctrl_0.GetParent()
sizer = self.tree_ctrl_0.GetContainingSizer()
items = self.load_yaml('computing.yaml')
# backup cmd_dic proc
cmd_dic = self.computing_cmd
to_name = lambda obj: next( ( d.get('name') for d in self.config_dic.values() if d.get('obj') == obj ), None )
procs = [ ( to_name(obj), proc ) for (obj, (cmd, proc)) in cmd_dic.items() if proc ]
# remove old tree ctrl
for i in range(2):
self.obj_get('tree_ctrl_' + str(i)).Destroy()
# remove old params
names = [ prm.get('name') for prm in items.get('params', []) ]
for prm in self.params[:]: # copy
if prm.get('name') in names:
self.params.remove(prm)
self.add_params(items.get('params', []))
# overwrite sys_gdic
old = self.sys_gdic
self.sys_gdic = items.get('sys_gui')
self.sys_gdic['update_func'] = self.update_func
for d in self.config_dic.values():
if d.get('gdic') == old:
d['gdic'] = self.sys_gdic
# listing update names
def subs_names(subs):
f2 = lambda s: subs_names( s.get('subs') ) if 'subs' in s else [ s.get('name') ]
f = lambda lst, s: lst + f2(s)
return reduce(f, subs, [])
names = subs_names( items.get('subs') )
names += items.get('buttons', {}).keys()
# remove old data of name in config_dic
for (k, v) in self.config_dic.items():
if v.get('name') in names:
self.config_dic.pop(k, None)
# rebuild tree ctrl
cmd_dic.clear()
for i in range(2):
tree_ctrl = self.create_tree(parent, items['subs'][i], None, None, self.computing_cmd)
tree_ctrl.ExpandAll()
tree_ctrl.SetBackgroundColour(wx.NullColour)
setattr(self, 'tree_ctrl_' + str(i), tree_ctrl)
sizer.Add(tree_ctrl, 1, wx.EXPAND, 0)
self.setup_buttons(items.get('buttons', {}), self.computing_cmd)
# restore cmd_dic proc
to_obj = lambda name: next( ( d.get('obj') for d in self.config_dic.values() if d.get('name') == name ), None )
for (name, proc) in procs:
obj = to_obj(name)
if obj and obj in cmd_dic:
cmd_dic[ obj ] = ( cmd_dic.get(obj)[0], proc )
set_val(obj, True)
parent.Layout()
def toggle_booted_cmds(self):
(enable, msg) = self.get_booted_cmds_enable_msg()
style = wx.OK | wx.CANCEL | wx.ICON_QUESTION
dlg = wx.MessageDialog(self, msg, '', style)
if dlg.ShowModal() != wx.ID_OK:
return
if 'booted_cmds' not in self.load_dic:
self.load_dic['booted_cmds'] = {}
self.load_dic.get('booted_cmds')['enable'] = not enable
def get_booted_cmds_enable_msg(self):
enable = self.load_dic.get('booted_cmds', {}).get('enable', False)
s = 'Enable' if not enable else 'Disable'
msg = '{} booted commands menu ?'.format(s)
return (enable, msg)
def RosCb(self, data):
print('recv topic msg : ' + data.data)
r = rospy.Rate(10)
rospy.is_shutdown()
r.sleep()
self.pub.publish(data.data)
r.sleep()
def setup_buttons(self, d, run_dic):
for (k,d2) in d.items():
pfs = [ 'button_', 'checkbox_' ]
obj = next( (self.obj_get(pf+k) for pf in pfs if self.obj_get(pf+k)), None)
if not obj:
s = 'button_' + k
obj = StrValObj(s, False)
setattr(self, s, obj)
if not d2 or type(d2) is not dict:
continue
if 'run' in d2:
run_dic[obj] = (d2['run'], None)
set_tooltip(obj, d2)
gdic = self.gdic_get_1st(d2)
if 'param' in d2:
pdic = self.load_dic_pdic_setup(k, d2)
prm = self.get_param(d2.get('param'))
for var in prm.get('vars'):
name = var.get('name')
if name not in pdic and 'v' in var:
pdic[name] = var.get('v')
for (name, v) in pdic.items():
restore = eval( gdic.get(name, {}).get('restore', 'lambda a : None') )
restore(v)
self.add_cfg_info(obj, obj, k, pdic, gdic, False, prm)
pnls = [ gdic.get(var.get('name'), {}).get('panel') for var in prm.get('vars') ]
for pnl in [ gdic.get('panel') ] + pnls:
if pnl:
self.set_param_panel(obj, eval_if_str(self, pnl))
else:
self.add_cfg_info(obj, obj, k, None, gdic, False, None)
def OnGear(self, event):
grp = { self.button_statchk_d : 1,
self.button_statchk_r : 2,
self.button_statchk_b : 3,
self.button_statchk_n : 4 }
self.radio_action(event, grp.keys())
v = grp.get(event.GetEventObject())
if v is not None:
pub = rospy.Publisher('gear_cmd', gear_cmd, queue_size=10)
pub.publish(gear_cmd(gear=v))
def OnLamp(self, event):
pub = rospy.Publisher('lamp_cmd', lamp_cmd, queue_size=10)
msg = lamp_cmd()
msg.l = self.button_statchk_lamp_l.GetValue()
msg.r = self.button_statchk_lamp_r.GetValue()
pub.publish(msg)
def OnIndi(self, event):
pub = rospy.Publisher('indicator_cmd', indicator_cmd, queue_size=10)
msg = indicator_cmd()
msg.l = self.button_statchk_indi_l.GetValue()
msg.r = self.button_statchk_indi_r.GetValue()
pub.publish(msg)
def OnAutoPilot(self, event):
obj = event.GetEventObject()
self.alias_sync(obj)
v = obj.GetValue()
pub = rospy.Publisher('mode_cmd', mode_cmd, queue_size=10)
pub.publish(mode_cmd(mode=v))
def radio_action(self, event, grp):
push = event.GetEventObject()
for b in grp:
v = b.GetValue()
act = None
act = True if b is push and not v else act
act = False if b is not push and v else act
if act is not None:
set_val(b, act)
def stat_label_off(self, obj):
qs_nms = [ 'map', 'sensing', 'localization', 'detection', 'mission_planning', 'motion_planning' ]
exec_time = self.qs_dic.get('exec_time', {})
gdic = self.obj_to_gdic(obj, {})
msg = std_msgs.msg.Bool(False)
for k in gdic.get('stat_topic', []):
# exec_time off
if next( (dic for dic in exec_time.values() if k in dic), None):
self.exec_time_callback(std_msgs.msg.Float32(0), (k, 'data'))
else:
self.stat_callback(msg, k)
# Quick Start tab, exec_time off
obj_nm = self.name_get(obj)
nm = next( (nm for nm in qs_nms if 'button_' + nm + '_qs' == obj_nm), None)
for key in exec_time.get(nm, {}):
self.exec_time_callback(std_msgs.msg.Float32(0), (key, 'data'))
def route_cmd_callback(self, data):
self.route_cmd_waypoint = data.point
def stat_callback(self, msg, k):
self.stat_dic[k] = msg.data
if k == 'pmap':
v = self.stat_dic.get(k)
wx.CallAfter(self.label_point_cloud.SetLabel, 'OK' if v else '')
if k in [ 'pmap', 'vmap' ]:
v = self.stat_dic.get('pmap') and self.stat_dic.get('vmap')
wx.CallAfter(self.label_map_qs.SetLabel, 'OK' if v else '')
def exec_time_callback(self, msg, (key, attr)):
msec = int(getattr(msg, attr, 0))
exec_time = self.qs_dic.get('exec_time', {})
(nm, dic) = next( ( (nm, dic) for (nm, dic) in exec_time.items() if key in dic), None)
dic[ key ] = msec
lb = self.obj_get('label_' + nm + '_qs')
if lb:
sum = reduce( lambda a,b:a+(b if b else 0), dic.values(), 0 )
wx.CallAfter(lb.SetLabel, str(sum)+' ms' if sum > 0 else '')
# update Status tab
lb = ''
for nm in [ 'map', 'sensing', 'localization', 'detection', 'mission_planning', 'motion_planning' ]:
dic = exec_time.get(nm, {})
sum = reduce( lambda a,b:a+(b if b else 0), dic.values(), 0 )
if sum > 0:
s = nm + ' : ' + str(sum) + ' ms'
lb += s + '\n'
wx.CallAfter(self.label_node_time.SetLabel, lb)
wx.CallAfter(self.label_node_time.GetParent().FitInside)
#
# Setup tab
#
def OnSetupLocalizer(self, event):
obj = self.button_setup_tf
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
self.update_func(pdic, gdic, prm)
#
# Computing Tab
#
def OnTreeMotion(self, event):
tree = event.GetEventObject()
pt = event.GetPosition()
event.Skip()
(item, flags) = tree.HitTest(pt)
if flags & CT.TREE_HITTEST_ONITEMLABEL == 0:
return
text = item.GetData()
if not text:
return
x = item.GetX()
y = item.GetY()
w = item.GetWidth()
h = item.GetHeight()
(x, y) = tree.CalcScrolledPosition(x, y)
iw = tree.GetItemWindow(item)
w -= iw.GetSize()[0] if iw else 0
if not wx.Rect(x, y, w, h).Contains(pt):
return
(x, y) = tree.ClientToScreen((x, y))
self.tip_info = (tree, text, wx.Rect(x, y, w, h))
if getattr(self, 'tip_timer', None) is None:
self.tip_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.OnTipTimer, self.tip_timer)
self.tip_timer.Start(200, oneShot=True)
def OnTipTimer(self, event):
if getattr(self, 'tip_info', None):
(tree, text, rect) = self.tip_info
(w, h) = self.GetSize()
wx.TipWindow(tree, text, maxLength=w, rectBound=rect)
def OnTreeChecked(self, event):
self.OnChecked_obj(event.GetItem())
def OnChecked_obj(self, obj):
self.OnLaunchKill_obj(obj)
def OnHyperlinked(self, event):
self.OnHyperlinked_obj(event.GetEventObject())
def OnHyperlinked_obj(self, obj):
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
if pdic is None or prm is None:
return
dic_list_push(gdic, 'dialog_type', 'config')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
def obj_to_add_args(self, obj, msg_box=True):
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
if pdic is None or prm is None:
return None
if 'need_camera_info' in gdic.get('flags', []) and msg_box:
ids = self.camera_ids()
if ids:
var = self.get_var(prm, 'camera_id', {})
var['choices'] = ids
dic_list_push(gdic, 'dialog_type', 'sel_cam')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
dlg_ret = show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
if dlg_ret != 0:
return False
else:
pdic['camera_id'] = ''
if 'open_dialog' in gdic.get('flags', []) and msg_box:
dic_list_push(gdic, 'dialog_type', 'open')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
dlg_ret = show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
if dlg_ret != 0:
return False
self.update_func(pdic, gdic, prm)
s = ''
vars = []
for var in prm.get('vars'):
cmd_param = var.get('cmd_param')
if cmd_param:
vars.append(var)
for var in vars[:]: # copy
cmd_param = var.get('cmd_param')
if cmd_param.get('tail'):
vars.remove(var)
vars.append(var)
for var in vars[:]: # copy
name = var.get('name')
flags = gdic.get(name, {}).get('flags', [])
if 'hide' in flags or 'disable' in flags:
vars.remove(var)
for var in vars:
cmd_param = var.get('cmd_param')
name = var.get('name')
v = pdic.get(name)
if (v is None or v == '') and 'default' in cmd_param:
v = cmd_param.get('default')
if dic_eval_if_str(self, cmd_param, 'must') and (v is None or v == ''):
print 'cmd_param', name, 'is required'
if msg_box:
wx.MessageBox('cmd_param ' + name + ' is required')
return False
if dic_eval_if_str(self, cmd_param, 'only_enable') and not v:
continue
if dic_eval_if_str(self, cmd_param, 'only_disable') and v:
continue
name = cmd_param.get('var_name', name)
unpack = cmd_param.get('unpack')
if unpack is not None:
v = ' '.join( v.split(unpack) )
add = ''
dash = cmd_param.get('dash')
if dash is not None:
add += dash + name
delim = cmd_param.get('delim')
if delim is not None:
str_v = str(v)
if var.get('kind') is None:
str_v = adjust_num_str(str_v)
if var.get('kind') == 'path':
str_v = path_expand_cmd(str_v)
str_v = os.path.expandvars(os.path.expanduser(str_v))
relpath_from = var.get('relpath_from')
if relpath_from:
relpath_from = path_expand_cmd(relpath_from)
relpath_from = os.path.expandvars(os.path.expanduser(relpath_from))
str_v = os.path.relpath(str_v, relpath_from)
add += delim + str_v
if add != '':
s += add + ' '
return s.strip(' ').split(' ') if s != '' else None
def obj_to_pdic_gdic_prm(self, obj, sys=False):
info = self.config_dic.get(obj)
if info is None:
sys_prm = self.get_param('sys')
prm_chk = lambda prm : prm is sys_prm if sys else prm is not sys_prm
info = next( ( v for v in self.config_dic.values() if v.get('obj') is obj and prm_chk(v.get('param')) ), None)
if info is None:
return (None, None, None)
pdic = info.get('pdic')
prm = info.get('param')
gdic = info.get('gdic')
return (pdic, gdic, prm)
def obj_to_gdic(self, obj, def_ret=None):
(_, gdic, _) = self.obj_to_pdic_gdic_prm(obj) if obj else (None, None, None)
return gdic if gdic else def_ret
def cfg_obj_dic(self, arg_dic, sys=False, def_ret=(None,{})):
sys_prm = self.get_param('sys')
prm_chk = {
True : (lambda prm : prm is sys_prm),
False : (lambda prm : prm is not sys_prm),
None : (lambda prm : True) }.get(sys)
arg_dic_chk = lambda dic: all( [ dic.get(k) == v for (k,v) in arg_dic.items() ] )
return next( ( (cfg_obj, dic) for (cfg_obj, dic) in self.config_dic.items() \
if arg_dic_chk(dic) and prm_chk(dic.get('param')) ), def_ret)
def cfg_dic(self, arg_dic, sys=False, def_ret={}):
(_, dic) = self.cfg_obj_dic(arg_dic, sys=sys, def_ret=(None, def_ret))
return dic
def cfg_prm_to_obj(self, arg_dic, sys=False):
return self.cfg_dic(arg_dic, sys=sys).get('obj')
def name_to_pdic_gdic_prm(self, name, sys=False):
d = self.cfg_dic( {'name':name}, sys=sys )
return ( d.get('pdic'), d.get('gdic'), d.get('param') )
def update_func(self, pdic, gdic, prm):
pdic_empty = (pdic == {})
for var in prm.get('vars', []):
name = var.get('name')
gdic_v = gdic.get(name, {})
func = gdic_v.get('func')
if func is None and not pdic_empty:
continue
v = var.get('v')
if func is not None:
v = eval(func) if type(func) is str else func()
pdic[ name ] = v
hook = gdic_v.get('update_hook')
if hook:
hook(v)
hook_var = gdic_v.get('hook_var', {})
every_time = 'every_time' in hook_var.get('flags', [])
if var == gdic.get('update_func_arg_var') or every_time:
hook = hook_var.get('hook')
if hook:
hook(hook_var.get('args', {}))
if 'pub' in prm:
self.publish_param_topic(pdic, prm)
self.rosparam_set(pdic, prm)
self.update_depend_enable(pdic, gdic, prm)
d = self.cfg_dic( {'pdic':pdic, 'gdic':gdic, 'param':prm}, sys=True )
self.update_proc_cpu(d.get('obj'), d.get('pdic'), d.get('param'))
def update_proc_cpu(self, obj, pdic=None, prm=None):
if obj is None or not obj.GetValue():
return
(_, _, proc) = self.obj_to_cmd_dic_cmd_proc(obj)
if proc is None:
return
if pdic is None or prm is None:
(pdic, _, prm) = self.obj_to_pdic_gdic_prm(obj, sys=True)
cpu_chks = self.param_value_get(pdic, prm, 'cpu_chks')
cpu_chks = cpu_chks if cpu_chks else [ True for i in range(get_cpu_count()) ]
cpus = [ i for i in range(get_cpu_count()) if cpu_chks[i] ]
nice = self.param_value_get(pdic, prm, 'nice', 0)
d = { 'OTHER':SCHED_OTHER, 'FIFO':SCHED_FIFO, 'RR':SCHED_RR }
policy = SCHED_OTHER
priority = 0
if self.param_value_get(pdic, prm, 'real_time', False):
policy = d.get(self.param_value_get(pdic, prm, 'policy', 'FIFO'), SCHED_FIFO)
priority = self.param_value_get(pdic, prm, 'prio', 0)
procs = [ proc ] + get_proc_children(proc, r=True)
for proc in procs:
print 'pid={}'.format(proc.pid)
if get_proc_nice(proc) != nice:
print 'nice {} -> {}'.format(get_proc_nice(proc), nice)
if set_process_nice(proc, nice) is False:
print 'Err set_process_nice()'
if get_proc_cpu_affinity(proc) != cpus:
print 'cpus {} -> {}'.format(get_proc_cpu_affinity(proc), cpus)
if set_process_cpu_affinity(proc, cpus) is False:
print 'Err set_process_cpu_affinity()'
policy_str = next( (k for (k,v) in d.items() if v == policy), '?')
print 'sched policy={} prio={}'.format(policy_str, priority)
if set_scheduling_policy(proc, policy, priority) is False:
print 'Err scheduling_policy()'
def param_value_get(self, pdic, prm, name, def_ret=None):
def_ret = self.param_default_value_get(prm, name, def_ret)
return pdic.get(name, def_ret) if pdic else def_ret
def param_default_value_get(self, prm, name, def_ret=None):
return next( (var.get('v') for var in prm.get('vars') if var.get('name') == name ), def_ret) \
if prm else def_ret
def update_depend_enable(self, pdic, gdic, prm):
for var in prm.get('vars', []):
name = var.get('name')
gdic_v = gdic.get(name, {})
depend = gdic_v.get('depend')
if depend is None:
continue
vp = gdic_v.get('var')
if vp is None:
continue
v = pdic.get(depend)
if v is None:
continue
depend_bool = eval( gdic_v.get('depend_bool', 'lambda v : bool(v)') )
v = depend_bool(v)
enables_set(vp, 'depend', v)
def publish_param_topic(self, pdic, prm):
pub = prm['pub']
klass_msg = globals()[ prm['msg'] ]
msg = klass_msg()
for (name, v) in pdic.items():
if prm.get('topic') == '/twist_cmd' and name == 'twist.angular.z':
v = -v
(obj, attr) = msg_path_to_obj_attr(msg, name)
if obj and attr in obj.__slots__:
type_str = obj._slot_types[ obj.__slots__.index(attr) ]
setattr(obj, attr, str_to_rosval(v, type_str, v))
if 'stamp' in prm.get('flags', []):
(obj, attr) = msg_path_to_obj_attr(msg, 'header.stamp')
setattr(obj, attr, rospy.get_rostime())
pub.publish(msg)
def rosparam_set(self, pdic, prm):
rosparams = None
for var in prm.get('vars', []):
name = var['name']
if 'rosparam' not in var or name not in pdic:
continue
rosparam = var['rosparam']
v = pdic.get(name)
v = str(v)
cvdic = { 'True':'true', 'False':'false' }
if v in cvdic:
v = cvdic.get(v)
if rosparams is None:
cmd = [ 'rosparam', 'list' ]
rosparams = subprocess.check_output(cmd).strip().split('\n')
nm = rosparam
nm = ('/' if len(nm) > 0 and nm[0] != '/' else '') + nm
exist = nm in rosparams
if exist:
cmd = [ 'rosparam', 'get', rosparam ]
ov = subprocess.check_output(cmd).strip()
if ov == v:
continue
elif v == '':
continue
cmd = [ 'rosparam', 'set', rosparam, v ] if v != '' else [ 'rosparam', 'delete', rosparam ]
print(cmd)
subprocess.call(cmd)
#
# Sensing Tab
#
def OnSensingDriver(self, event):
self.OnChecked_obj(event.GetEventObject())
def OnRosbagRecord(self, event):
self.dlg_rosbag_record.Show()
obj = event.GetEventObject()
set_val(obj, False)
def create_checkboxes(self, dic, panel, sizer, probe_dic, run_dic, bind_handler):
if 'name' not in dic:
return
obj = None
bdr_flg = wx.ALL
if 'subs' in dic:
lst = []
for d in dic['subs']:
self.create_checkboxes(d, panel, lst, probe_dic, run_dic, bind_handler)
if dic['name']:
obj = static_box_sizer(panel, dic.get('name'))
set_tooltip(obj.GetStaticBox(), dic)
else:
obj = wx.BoxSizer(wx.VERTICAL)
for (o, flg) in lst:
obj.Add(o, 0, wx.EXPAND | flg, 4)
else:
obj = wx.CheckBox(panel, wx.ID_ANY, dic['name'])
set_tooltip(obj, dic)
self.Bind(wx.EVT_CHECKBOX, bind_handler, obj)
bdr_flg = wx.LEFT | wx.RIGHT
if 'probe' in dic:
probe_dic[obj] = (dic['probe'], None)
if 'run' in dic:
run_dic[obj] = (dic['run'], None)
if 'param' in dic:
obj = self.add_config_link(dic, panel, obj)
else:
gdic = self.gdic_get_1st(dic)
self.add_cfg_info(obj, obj, dic.get('name'), None, gdic, False, None)
if sizer is not None:
sizer.append((obj, bdr_flg))
else:
panel.SetSizer(obj)
def add_config_link(self, dic, panel, obj):
cfg_obj = wx.HyperlinkCtrl(panel, wx.ID_ANY, '[config]', '')
fix_link_color(cfg_obj)
self.Bind(wx.EVT_HYPERLINK, self.OnConfig, cfg_obj)
add_objs = (obj, wx.StaticText(panel, wx.ID_ANY, ' '), cfg_obj)
hszr = sizer_wrap(add_objs, wx.HORIZONTAL)
name = dic['name']
pdic = self.load_dic_pdic_setup(name, dic)
gdic = self.gdic_get_1st(dic)
prm = self.get_param(dic.get('param'))
self.add_cfg_info(cfg_obj, obj, name, pdic, gdic, True, prm)
return hszr
def camera_ids(self):
if self.button_synchronization.GetValue():
return []
cmd = "rostopic list | sed -n 's|/image_raw||p' | sed 's/^$/\//'"
return subprocess.check_output(cmd, shell=True).strip().split()
def cam_id_to_obj(self, cam_id, v):
cam_id_obj = self.cfg_prm_to_obj( {'name':cam_id} )
if cam_id_obj is None:
cam_id_obj = StrValObj(cam_id, v)
cam_id_obj.SetValue(v)
return cam_id_obj
def camera_id_hook(self, args):
new_id = args.get('pdic', {}).get('camera_id', '')
ids = args.get('ids', [])
if new_id not in ids:
return
idx = ids.index(new_id)
pp = args.get('param_panel')
if pp:
pp.detach_func()
dlg = args.get('dlg')
if dlg:
dlg.EndModal(idx + 100)
def OnCalibrationPublisher(self, event):
obj = event.GetEventObject()
(_, gdic_org, prm) = self.obj_to_pdic_gdic_prm(obj)
if obj.GetValue():
gdic_org['ids'] = self.camera_ids()
ids = gdic_org.get('ids', [])
if ids == []:
self.OnLaunchKill(event)
return
#
# setup
#
(cmd_dic, cmd, _) = self.obj_to_cmd_dic_cmd_proc(obj)
flags = gdic_org.get('flags', [])[:] # copy
if 'open_dialog' in flags:
flags.remove('open_dialog')
pdic_baks = {}
for cam_id in ids:
(pdic_a, gdic_a, _) = self.name_to_pdic_gdic_prm(cam_id)
pdic = pdic_a if pdic_a else self.load_dic_pdic_setup(cam_id, {})
pdic_baks[cam_id] = pdic.copy()
gdic = gdic_a if gdic_a else gdic_org.copy()
gdic['flags'] = flags
cam_id_obj = self.cam_id_to_obj(cam_id, obj.GetValue())
if not hasattr(cam_id_obj, 'enables_proxy'):
cam_id_obj.enables_proxy = (obj, cam_id_obj.s)
if not pdic_a or not gdic_a:
self.add_cfg_info(cam_id_obj, cam_id_obj, cam_id, pdic, gdic, False, prm)
if not cam_id_obj in cmd_dic:
cmd_dic[ cam_id_obj ] = (cmd, None)
var = self.get_var(prm, 'camera_id', {})
var['choices'] = ids
#
# Dialog
#
cam_id = ids[0]
while obj.GetValue():
(pdic, gdic, _) = self.name_to_pdic_gdic_prm(cam_id)
pdic['camera_id'] = cam_id
dic_list_push(gdic, 'dialog_type', 'open2')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
gdic_v = dic_getset(gdic, 'camera_id', {})
args = { 'pdic':pdic, 'ids':ids, 'param_panel':gdic.get('param_panel'), 'dlg':dlg }
gdic_v['hook_var'] = { 'hook':self.camera_id_hook, 'args':args }
dlg_ret = show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
pdic['camera_id'] = cam_id # restore
if dlg_ret == 0: # OK
break
idx = dlg_ret - 100
if idx < 0 or len(ids) <= idx: # Cancel
for cam_id in ids:
(pdic, _, _) = self.name_to_pdic_gdic_prm(cam_id)
pdic.update(pdic_baks.get(cam_id))
set_val(obj, False)
return
# Menu changed
cam_id = ids[idx]
#
# Launch / Kill
#
for cam_id in ids:
cam_id_obj = self.cfg_prm_to_obj( {'name':cam_id} )
(pdic, _, _) = self.obj_to_pdic_gdic_prm(cam_id_obj)
pdic['solo_camera'] = False
#print '@', cam_id, cam_id_obj.GetValue()
self.OnLaunchKill_obj(cam_id_obj)
#
# Simulation Tab
#
def rosbag_info_hook(self, v):
if not v:
return
th_start(self.rosbag_info_hook_th, {'v':v} )
def rosbag_info_hook_th(self, ev, v): # thread
err = subprocess.STDOUT
s = subprocess.check_output([ 'rosbag', 'info', v ], stderr=err).strip()
wx.CallAfter(self.label_rosbag_info.SetLabel, s)
wx.CallAfter(self.label_rosbag_info.GetParent().FitInside)
#
# Data Tab
#
#
# Stauts tab
#
def info_col(self, v, v_yellow, v_red, col_normal, col_red):
if v < v_yellow:
return col_normal
if v < v_red:
(nr,ng,nb) = col_normal
(rr,rg,rb) = col_red
return ( (nr+rr)/2, (ng+rg)/2, (nb+rb)/2 )
return col_red
def mem_kb_info(self):
lst = subprocess.check_output(['free']).strip().split('\n')[2].split()[2:4]
used = int(lst[0])
free = int(lst[1])
return (used + free, used)
def toprc_create(self):
(child_pid, fd) = pty.fork()
if child_pid == 0: # child
os.execvp('top', ['top'])
else: #parent
sec = 0.2
for s in ['1', 'c', 'W', 'q']:
time.sleep(sec)
os.write(fd, s)
def toprc_setup(self, toprc, backup):
if os.path.exists(toprc):
os.rename(toprc, backup)
self.toprc_create()
def toprc_restore(self, toprc, backup):
os.remove(toprc)
if os.path.exists(backup):
os.rename(backup, toprc)
# top command thread
def top_cmd_th(self, ev, setting, cpu_ibls, mem_ibl, toprc, backup):
interval = setting.get('interval', 3)
alert_level = setting.get('alert_level', {})
rate_per_cpu = alert_level.get('rate_per_cpu', 80)
rate_per_cpu_yellow = alert_level.get('rate_per_cpu_yellow', 80)
rate_cpu = alert_level.get('rate_cpu', 80)
rate_mem = alert_level.get('rate_mem', 80)
rate_mem_yellow = alert_level.get('rate_mem_yellow', 80)
for ibl in cpu_ibls:
ibl.lmt_bar_prg = rate_per_cpu
mem_ibl.lmt_bar_prg = rate_mem
alerted = False
cpu_n = get_cpu_count()
while not ev.wait(interval):
s = subprocess.check_output(['sh', '-c', 'env COLUMNS=512 top -b -n 2 -d 0.1']).strip()
i = s.rfind('\ntop -') + 1
s = s[i:]
wx.CallAfter(self.label_top_cmd.SetLabel, s)
wx.CallAfter(self.label_top_cmd.GetParent().FitInside)
k = '%Cpu'
fv_sum = 0
i = 0
for t in s.split('\n'):
if t[:len(k)] != k:
continue
lst = t[1:].split()
v = lst[1] if lst[1] != ':' else lst[2]
if v[0] == ':':
v = v[1:]
fv = str_to_float(v)
col = self.info_col(fv, rate_per_cpu_yellow, rate_per_cpu, (64,64,64), (200,0,0))
if i < cpu_n:
ibl = cpu_ibls[i]
wx.CallAfter(ibl.lb_set, v+'%', col)
wx.CallAfter(ibl.bar_set, int(fv))
fv_sum += fv
i += 1
k = 'KiB Mem:'
(total, used) = self.mem_kb_info()
rate = 100 * used / total
for u in [ 'KB', 'MB', 'GB', 'TB' ]:
if total <= 10 * 1024 or used <= 10:
break
total /= 1024
used /= 1024
col = self.info_col(rate, rate_mem_yellow, rate_mem, (64,64,64), (200,0,0))
tx = str(used) + u + '/' + str(total) + u + '(' + str(rate) + '%)'
wx.CallAfter(mem_ibl.lb_set, tx, col)
wx.CallAfter(mem_ibl.bar_set, rate)
is_alert = (fv_sum >= rate_cpu * cpu_n) or rate >= rate_mem
# --> for test
if os.path.exists('/tmp/alert_test_on'):
is_alert = True
if os.path.exists('/tmp/alert_test_off'):
is_alert = False
# <-- for test
if is_alert and not alerted:
thinf = th_start(self.alert_th, {'bgcol':(200,50,50)})
alerted = True
if not is_alert and alerted:
th_end(thinf)
alerted = False
# top5
i = s.find('\n\n') + 2
lst = s[i:].split('\n')
hd = lst[0]
top5 = lst[1:1+5]
i = hd.rfind('COMMAND')
cmds = [ line[i:].split(' ')[0] for line in top5 ]
i = hd.find('%CPU')
loads = [ line[i-1:].strip().split(' ')[0] for line in top5 ]
for (lb, cmd, load) in zip(self.lb_top5, cmds, loads):
col = self.info_col(str_to_float(load), rate_per_cpu_yellow, rate_per_cpu, (64,64,64), (200,0,0))
wx.CallAfter(lb.SetForegroundColour, col)
wx.CallAfter(lb.SetLabel, cmd + ' (' + load + ' %CPU)')
self.toprc_restore(toprc, backup)
def alert_th(self, bgcol, ev):
wx.CallAfter(self.RequestUserAttention)
c = bgcol
o = wx.NullColour
while not ev.wait(0.5):
for col in [ c, o, c, o, c, o ]:
wx.CallAfter(self.set_bg_all_tabs, col)
time.sleep(0.05)
def log_th(self, file, que, ev):
while not ev.wait(0):
s = file.readline()
if not s:
break
que.put(s)
def logout_th(self, que, interval, tc, ev):
if que == self.log_que_stdout or que == self.log_que_stderr:
while not ev.wait(0):
try:
s = que.get(timeout=1)
except Queue.Empty:
continue
self.log_que.put(s)
if interval <= 0:
continue
ckbox = self.checkbox_stdout if que == self.log_que_stdout else self.checkbox_stderr
if ckbox.GetValue():
self.log_que_show.put( cut_esc(s) )
else: # == self.log_que
f = None
path = self.status_dic.get('log_path')
is_syslog = (path == 'syslog')
if is_syslog:
ident = sys.argv[0].split('/')[-1]
syslog.openlog(ident, syslog.LOG_PID | syslog.LOG_CONS)
elif path:
path = os.path.expandvars(os.path.expanduser(path))
f = open(path, 'a') if path else None
while not ev.wait(0):
try:
s = que.get(timeout=1)
except Queue.Empty:
continue
print s.strip()
sys.stdout.flush()
s = cut_esc(s)
if is_syslog:
syslog.syslog(s)
elif f:
f.write(s)
f.flush()
if is_syslog:
syslog.closelog()
if f:
f.close()
def logshow_th(self, que, interval, tc, ev):
while not ev.wait(interval):
try:
s = que.get(timeout=1)
except Queue.Empty:
continue
wx.CallAfter(append_tc_limit, tc, s)
# que clear
if self.checkbox_stdout.GetValue() is False and \
self.checkbox_stderr.GetValue() is False and \
que.qsize() > 0:
que_clear(que)
wx.CallAfter(tc.Clear)
#
# for Topics tab
#
def OnRefreshTopics(self, event):
self.refresh_topics_list()
def refresh_topics_list(self):
lst = subprocess.check_output([ 'rostopic', 'list' ]).strip().split('\n')
panel = self.panel_topics_list
szr = self.sizer_topics_list
for obj in self.topics_list:
szr.Remove(obj)
obj.Destroy()
self.topics_list = []
for topic in lst:
obj = wx.HyperlinkCtrl(panel, wx.ID_ANY, topic, '')
self.Bind(wx.EVT_HYPERLINK, self.OnTopicLink, obj)
szr.Add(obj, 0, wx.LEFT, 4)
fix_link_color(obj)
self.topics_list.append(obj)
szr.Layout()
panel.SetVirtualSize(szr.GetMinSize())
# info clear
lb = self.label_topics_info
lb.SetLabel('')
# echo clear
self.topics_proc_th_end()
# wait que clear
while self.topics_echo_que.qsize() > 0:
time.sleep(0.1)
tc = self.text_ctrl_topics_echo
tc.Enable(False)
wx.CallAfter(tc.Clear)
wx.CallAfter(tc.Enable, True)
self.topics_echo_sum = 0
self.topic_echo_curr_topic = None
def OnEcho(self, event):
if self.checkbox_topics_echo.GetValue() and self.topic_echo_curr_topic:
self.topics_proc_th_start(self.topic_echo_curr_topic)
else:
self.topics_proc_th_end()
def OnTopicLink(self, event):
obj = event.GetEventObject()
topic = obj.GetLabel()
self.topic_echo_curr_topic = topic
# info
info = subprocess.check_output([ 'rostopic', 'info', topic ]).strip()
lb = self.label_topics_info
lb.SetLabel(info)
lb.GetParent().FitInside()
# echo
self.topics_proc_th_end()
if self.checkbox_topics_echo.GetValue():
self.topics_proc_th_start(topic)
def topics_proc_th_start(self, topic):
out = subprocess.PIPE
err = subprocess.STDOUT
self.topics_echo_proc = psutil.Popen([ 'rostopic', 'echo', topic ], stdout=out, stderr=err)
self.topics_echo_thinf = th_start(self.topics_echo_th)
def topics_proc_th_end(self):
thinf = self.topics_echo_thinf
if thinf:
th_end(thinf)
self.topics_echo_thinf = None
proc = self.topics_echo_proc
if proc:
terminate_children(proc)
terminate(proc)
#proc.wait()
self.topics_echo_proc = None
def topics_echo_th(self, ev):
if not self.topics_echo_proc:
return
file = self.topics_echo_proc.stdout
fl = fcntl.fcntl(file.fileno(), fcntl.F_GETFL)
fcntl.fcntl(file.fileno(), fcntl.F_SETFL, fl | os.O_NONBLOCK)
while not ev.wait(0):
try:
s = file.read(1)
except:
continue
if not s:
break
if self.checkbox_topics_echo.GetValue():
self.topics_echo_que.put(s)
que_clear(self.topics_echo_que)
def topics_echo_show_th(self, ev):
que = self.topics_echo_que
interval = self.topics_dic.get('gui_update_interval_ms', 100) * 0.001
chars_limit = self.topics_dic.get('gui_chars_limit', 10000)
tc = self.text_ctrl_topics_echo
while not ev.wait(interval):
qsz = que.qsize()
if qsz <= 0:
continue
if qsz > chars_limit:
over = qsz - chars_limit
for i in range(over):
try:
que.get(timeout=1)
except Queue.Empty:
break
qsz = chars_limit
arr = []
for i in range(qsz):
try:
s = que.get(timeout=1)
except Queue.Empty:
s = ''
arr.append(s)
s = ''.join(arr)
self.topics_echo_sum += len(s)
rm_chars = 0
if self.topics_echo_sum > chars_limit:
rm_chars = self.topics_echo_sum - chars_limit
self.topics_echo_sum = chars_limit
if self.checkbox_topics_echo.GetValue():
wx.CallAfter(append_tc_limit, tc, s, rm_chars)
#
# Common Utils
#
def set_param_panel(self, obj, parent):
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
panel = ParamPanel(parent, frame=self, pdic=pdic, gdic=gdic, prm=prm)
sizer_wrap((panel,), wx.VERTICAL, 0, wx.EXPAND, 0, parent)
k = 'ext_toggle_enables'
gdic[ k ] = gdic.get(k, []) + [ panel ]
def obj_to_varpanel(self, obj, var_name):
gdic = self.obj_to_gdic(obj, {})
return gdic.get(var_name, {}).get('var')
def obj_to_varpanel_tc(self, obj, var_name):
vp = self.obj_to_varpanel(obj, var_name)
return vp.tc if vp and vp.tc else None
def OnConfig(self, event):
self.OnHyperlinked_obj(event.GetEventObject())
def add_params(self, params):
for prm in params:
if 'topic' in prm and 'msg' in prm:
klass_msg = globals()[ prm['msg'] ]
prm['pub'] = rospy.Publisher(prm['topic'], klass_msg, latch=True, queue_size=10)
self.params += params
def gdic_get_1st(self, dic):
gdic = dic.get('gui', {})
gdic['update_func'] = self.update_func
return gdic
def add_cfg_info(self, cfg_obj, obj, name, pdic, gdic, run_disable, prm):
self.config_dic[ cfg_obj ] = { 'obj':obj , 'name':name , 'pdic':pdic , 'gdic':gdic,
'run_disable':run_disable , 'param':prm }
def get_param(self, prm_name):
return next( (prm for prm in self.params if prm['name'] == prm_name), None)
def get_var(self, prm, var_name, def_ret=None):
return next( (var for var in prm.get('vars') if var.get('name') == var_name), def_ret)
def obj_to_cmd_dic(self, obj):
return next( (cmd_dic for cmd_dic in self.all_cmd_dics if obj in cmd_dic), None)
def obj_to_cmd_dic_cmd_proc(self, obj):
cmd_dic = self.obj_to_cmd_dic(obj)
if cmd_dic is None:
return (None, None, None)
(cmd, proc) = cmd_dic.get(obj, (None, None))
return (cmd_dic, cmd, proc)
def OnLaunchKill(self, event):
self.OnLaunchKill_obj(event.GetEventObject())
def OnLaunchKill_obj(self, obj):
self.alias_sync(obj)
obj = self.alias_grp_top_obj(obj)
v = obj.GetValue()
add_args = self.obj_to_add_args(obj, msg_box=v) # no open dialog at kill
if add_args is False:
set_val(obj, not v)
return
(cmd_dic, _, proc_bak) = self.obj_to_cmd_dic_cmd_proc(obj)
self.launch_kill_proc(obj, cmd_dic, add_args=add_args)
(_, _, proc) = self.obj_to_cmd_dic_cmd_proc(obj)
if proc != proc_bak:
self.toggle_enable_obj(obj)
if proc:
self.update_proc_cpu(obj)
def OnRosbagPlay(self, event):
obj = event.GetEventObject()
play = self.button_play_rosbag_play
stop = self.button_stop_rosbag_play
pause = self.button_pause_rosbag_play
(_, _, prm) = self.obj_to_pdic_gdic_prm(play)
var = self.get_var(prm, 'sim_time', {})
if obj == play:
var['v'] = True
self.OnLaunchKill_obj(play)
button_color_change(play)
set_val(stop, False)
set_val(pause, False)
elif obj == stop:
set_val(stop, True)
set_val(play, False)
set_val(pause, False)
var['v'] = False
self.OnLaunchKill_obj(play)
button_color_change(stop)
elif obj == pause:
(_, _, proc) = self.obj_to_cmd_dic_cmd_proc(play)
if proc:
proc.stdin.write(' ')
def OnFtrace(self, event):
obj = event.GetEventObject()
cmd = 'rosrun runtime_manager ftrace.py'
v = obj.GetValue()
self.ftrace_proc_ = self.launch_kill(v, cmd,
None if v else self.ftrace_proc_, obj=obj)
def stdout_file_search(self, file, k):
s = ''
while True:
c = file.read(1)
if not c:
return None
if c != '\r' and c != '\n':
s += c
continue
s = s.strip()
if k in s:
break
s = ''
i = s.find(k) + len(k)
return s[i:]
# thread
def point_cloud_progress_bar(self, file, ev):
obj = self.button_point_cloud
(pdic, _, _) = self.obj_to_pdic_gdic_prm(obj)
n = len(pdic.get('path_pcd', '').split(','))
if n == 0:
return
i = 0
while not ev.wait(0):
s = self.stdout_file_search(file, 'load ')
if not s:
break
err_key = 'failed '
if s[:len(err_key)] != err_key:
i += 1
else:
i -= 1
print s
wx.CallAfter(self.label_point_cloud_bar.set, 100 * i / n)
wx.CallAfter(self.label_point_cloud_bar.clear)
# thread
def rosbag_play_progress_bar(self, file, ev):
while not ev.wait(0):
s = self.stdout_file_search(file, 'Duration:')
if not s:
break
lst = s.split()
pos = str_to_float(lst[0])
# lst[1] is '/'
total = str_to_float(lst[2])
if total == 0:
continue
prg = int(100 * pos / total + 0.5)
pos = str(int(pos))
total = str(int(total))
wx.CallAfter(self.label_rosbag_play_bar.set, prg)
wx.CallAfter(self.label_rosbag_play_pos.SetLabel, pos)
wx.CallAfter(self.label_rosbag_play_total.SetLabel, total)
wx.CallAfter(self.label_rosbag_play_bar.clear)
wx.CallAfter(self.label_rosbag_play_pos.SetLabel, '')
wx.CallAfter(self.label_rosbag_play_total.SetLabel, '')
def alias_sync(self, obj, v=None):
en = None
if getattr(obj, 'IsEnabled', None):
(key, en) = enables_get_last(obj)
if not key:
en = obj.IsEnabled()
grp = self.alias_grp_get(obj)
if getattr(obj, 'GetValue', None):
v = obj.GetValue()
for o in grp:
if o is obj:
continue
if en is not None and o.IsEnabled() != en and not self.is_toggle_button(o):
if key:
enable_set(o, key, en)
else:
o.Enable(en)
if v is not None and getattr(o, 'SetValue', None):
set_val(o, v)
if getattr(o, 'SetInsertionPointEnd', None):
o.SetInsertionPointEnd()
def alias_grp_top_obj(self, obj):
return get_top(self.alias_grp_get(obj), obj)
def alias_grp_get(self, obj):
return next( (grp for grp in self.alias_grps if obj in grp), [])
def create_tree(self, parent, items, tree, item, cmd_dic):
name = items.get('name', '')
if tree is None:
style = wx.TR_HAS_BUTTONS | wx.TR_NO_LINES | wx.TR_HIDE_ROOT | wx.TR_DEFAULT_STYLE | wx.SUNKEN_BORDER
tree = CT.CustomTreeCtrl(parent, wx.ID_ANY, agwStyle=style)
# for disable wrong scrolling at checked
tree.AcceptsFocus = MethodType(lambda self: False, tree, CT.CustomTreeCtrl)
item = tree.AddRoot(name, data=tree)
tree.Bind(wx.EVT_MOTION, self.OnTreeMotion)
else:
ct_type = 1 if 'cmd' in items else 0 # 1:checkbox type
item = tree.AppendItem(item, name, ct_type=ct_type)
if 'desc' in items:
item.SetData(items.get('desc'))
if 'cmd' in items:
cmd_dic[item] = (items['cmd'], None)
pdic = self.load_dic_pdic_setup(name, items)
pnl = wx.Panel(tree, wx.ID_ANY)
add_objs = []
self.new_link(item, name, pdic, self.sys_gdic, pnl, 'sys', 'sys', add_objs)
gdic = self.gdic_get_1st(items)
if 'param' in items:
self.new_link(item, name, pdic, gdic, pnl, 'app', items.get('param'), add_objs)
else:
self.add_cfg_info(item, item, name, None, gdic, False, None)
szr = sizer_wrap(add_objs, wx.HORIZONTAL, parent=pnl)
szr.Fit(pnl)
tree.SetItemWindow(item, pnl)
for sub in items.get('subs', []):
self.create_tree(parent, sub, tree, item, cmd_dic)
return tree
def new_link(self, item, name, pdic, gdic, pnl, link_str, prm_name, add_objs):
lkc = None
if 'no_link' not in gdic.get('flags', []):
lkc = wx.HyperlinkCtrl(pnl, wx.ID_ANY, link_str, "")
fix_link_color(lkc)
self.Bind(wx.EVT_HYPERLINK, self.OnHyperlinked, lkc)
if len(add_objs) > 0:
add_objs += [ wx.StaticText(pnl, wx.ID_ANY, ' ') ]
add_objs += [ wx.StaticText(pnl, wx.ID_ANY, '['), lkc, wx.StaticText(pnl, wx.ID_ANY, ']') ]
prm = self.get_param(prm_name)
self.add_cfg_info(lkc if lkc else item, item, name, pdic, gdic, False, prm)
def load_dic_pdic_setup(self, name, dic):
name = dic.get('share_val', dic.get('name', name))
pdic = self.load_dic.get(name, {})
self.load_dic[ name ] = pdic
return pdic
def launch_kill_proc(self, obj, cmd_dic, add_args=None):
if obj not in cmd_dic:
set_val(obj, False)
print('not implemented.')
return
v = obj.GetValue()
(cmd, proc) = cmd_dic[obj]
if not cmd:
set_val(obj, False)
proc = self.launch_kill(v, cmd, proc, add_args, obj=obj)
(cfg_obj, dic) = self.cfg_obj_dic( {'obj':obj} )
if cfg_obj and dic.get('run_disable'):
cfg_obj.Enable(not v)
cmd_dic[obj] = (cmd, proc)
if not v:
self.stat_label_off(obj)
def proc_to_cmd_dic_obj(self, proc):
for cmd_dic in self.all_cmd_dics:
obj = next( (obj for (obj, v) in cmd_dic.items() if proc in v), None)
if obj:
return (cmd_dic, obj)
return (None, None)
def launch_kill(self, v, cmd, proc, add_args=None, sigint=None, obj=None, kill_children=None):
msg = None
msg = 'already launched.' if v and proc else msg
msg = 'already terminated.' if not v and proc is None else msg
msg = 'cmd not implemented.' if not cmd else msg
if msg is not None:
print(msg)
return proc
if v:
args = shlex.split(cmd)
if add_args:
args += add_args
print(args) # for debug
f = self.obj_to_gdic(obj, {}).get('stdout_func')
f = eval_if_str(self, f)
f = f if f else self.log_th
out = subprocess.PIPE if f else None
err = subprocess.STDOUT if f else None
if f == self.log_th:
err = subprocess.PIPE
shell = ( len(args) > 0 and args[0] == 'do_shell_exec' )
if shell:
args = ' '.join( args[1:] )
proc = psutil.Popen(args, stdin=subprocess.PIPE, stdout=out, stderr=err, shell=shell)
self.all_procs.append(proc)
if f == self.log_th:
thinf = th_start(f, {'file':proc.stdout, 'que':self.log_que_stdout})
self.all_th_infs.append(thinf)
thinf = th_start(f, {'file':proc.stderr, 'que':self.log_que_stderr})
self.all_th_infs.append(thinf)
elif f:
thinf = th_start(f, {'file':proc.stdout})
self.all_th_infs.append(thinf)
else:
flags = self.obj_to_gdic(obj, {}).get('flags', [])
if sigint is None:
sigint = 'SIGTERM' not in flags
if kill_children is None:
kill_children = 'kill_children' in flags
if kill_children:
terminate_children(proc, sigint)
terminate(proc, sigint)
enables_set(obj, 'proc_wait', False)
th_start( proc_wait_thread, {'proc': proc, 'obj': obj} )
if proc in self.all_procs:
self.all_procs.remove(proc)
proc = None
return proc
def roslaunch_to_nodes(self, cmd):
try:
s = subprocess.check_output(cmd).strip()
return s.split('\n') if s != '' else []
except subprocess.CalledProcessError:
return []
def set_bg_all_tabs(self, col=wx.NullColour):
add_pnls = [
self,
self.tree_ctrl_0,
self.tree_ctrl_1,
self.tree_ctrl_data ]
for tab in self.all_tabs + add_pnls:
tab.SetBackgroundColour(col)
def get_autoware_dir(self):
dir = rtmgr_src_dir() + '../../../../../../'
return os.path.abspath(dir)
def load_yaml(self, filename, def_ret=None):
return load_yaml(filename, def_ret)
def toggle_enable_obj(self, obj):
objs = []
pfs = [ 'button_play_', 'button_stop_', 'button_pause_',
'button_ref_', 'text_ctrl_' ]
key = self.obj_key_get(obj, pfs)
if key:
objs += self.key_objs_get(pfs, key)
gdic = self.obj_to_gdic(obj, {})
objs += [ eval_if_str(self, e) for e in gdic.get('ext_toggle_enables', []) ]
self.toggle_enables(objs)
def toggle_enables(self, objs):
for obj in objs:
if getattr(obj, 'IsEnabled', None):
en = enables_get(obj, 'toggle', obj.IsEnabled())
enables_set(obj, 'toggle', not en)
self.alias_sync(obj)
def is_toggle_button(self, obj):
return self.name_get(obj).split('_')[0] == 'button' and getattr(obj, 'GetValue', None)
def obj_name_split(self, obj, pfs):
name = self.name_get(obj)
if name is None:
return (None, None)
return next( ( ( name[:len(pf)], name[len(pf):] ) for pf in pfs if name[:len(pf)] == pf ), None)
def obj_key_get(self, obj, pfs):
name = self.name_get(obj)
if name is None:
return None
return next( (name[len(pf):] for pf in pfs if name[:len(pf)] == pf), None)
def key_objs_get(self, pfs, key):
return [ self.obj_get(pf + key) for pf in pfs if self.obj_get(pf + key) ]
def name_get(self, obj):
return next( (nm for nm in dir(self) if getattr(self, nm) is obj), None)
def name_get_cond(self, obj, cond=(lambda s : True), def_ret=None):
return next( (nm for nm in dir(self) if cond(nm) and getattr(self, nm) is obj), def_ret)
def val_get(self, name):
obj = self.obj_get(name)
if obj is None:
return None
return obj.GetValue() if getattr(obj, 'GetValue', None) else None
def obj_get(self, name):
return getattr(self, name, None)
def gdic_dialog_type_chk(gdic, name):
dlg_type = dic_list_get(gdic, 'dialog_type', 'config')
tail = '_dialog_only'
lst = [ (k, k[:-len(tail)]) for k in gdic.keys() if k[-len(tail):] == tail ]
only_chk = next( (False for (k,type) in lst if type != dlg_type and name in gdic.get(k, [])), True)
tail = '_dialog_allow'
lst = [ (k, k[:-len(tail)]) for k in gdic.keys() if k[-len(tail):] == tail ]
allow_chk = next( (False for (k,type) in lst if type == dlg_type and name not in gdic.get(k, [])), True)
return only_chk and allow_chk
def gdic_dialog_name_get(gdic):
dlg_type = dic_list_get(gdic, 'dialog_type', 'config')
return gdic.get(dlg_type + '_dialog', gdic.get('dialog', 'MyDialogParam') )
class ParamPanel(wx.Panel):
def __init__(self, *args, **kwds):
self.frame = kwds.pop('frame')
self.pdic = kwds.pop('pdic')
self.gdic = kwds.pop('gdic')
self.prm = kwds.pop('prm')
wx.Panel.__init__(self, *args, **kwds)
self.gdic['param_panel'] = self
obj = self.frame.cfg_prm_to_obj( {'pdic':self.pdic, 'gdic':self.gdic, 'param':self.prm} )
(_, _, proc) = self.frame.obj_to_cmd_dic_cmd_proc(obj)
hszr = None
self.vps = []
self.tmp_msg = None
szr = wx.BoxSizer(wx.VERTICAL)
topic_szrs = (None, None)
vars = self.prm.get('vars')
if self.gdic.get('show_order'):
var_lst = lambda name, vars : [ var for var in vars if var.get('name') == name ]
vars = reduce( lambda lst, name : lst + var_lst(name, vars), self.gdic.get('show_order'), [] )
for var in vars:
name = var.get('name')
if not gdic_dialog_type_chk(self.gdic, name):
continue
gdic_v = self.get_gdic_v_and_chk_enable(name)
if gdic_v is None:
continue
bak_stk_push(gdic_v, 'func')
if gdic_v.get('func'):
continue
v = self.pdic.get(name, var.get('v'))
vp = VarPanel(self, var=var, v=v, update=self.update)
vp.setup_tooltip()
self.vps.append(vp)
gdic_v['var'] = vp
gdic_v['func'] = vp.get_v
prop = gdic_v.get('prop', 0)
border = gdic_v.get('border', 0)
flag = wx_flag_get(gdic_v.get('flags', []))
do_category = 'no_category' not in gdic_v.get('flags', [])
if do_category and self.in_msg(var):
bak = (szr, hszr)
(szr, hszr) = topic_szrs
if szr is None:
szr = static_box_sizer(self, 'topic : ' + self.prm.get('topic'))
bak[0].Add(szr, 0, wx.EXPAND | wx.ALL, 4)
targ_szr = szr
if vp.is_nl():
hszr = None if hszr else hszr
flag |= wx.EXPAND
else:
if hszr is None:
hszr = wx.BoxSizer(wx.HORIZONTAL)
szr.Add(hszr, 0, wx.EXPAND)
flag |= wx.ALIGN_CENTER_VERTICAL
targ_szr = hszr
if do_category and 'rosparam' in var:
rp_szr = static_box_sizer(self, 'rosparam : ' + var.get('rosparam'))
targ_szr.Add(rp_szr, 0, wx.EXPAND | wx.ALL, 4)
targ_szr = rp_szr
user_category = gdic_v.get('user_category')
if user_category is not None and hszr:
user_szr = static_box_sizer(self, user_category, orient=wx.HORIZONTAL)
(flgs, bdr) = gdic_v.get('user_category_add', [ [], 0 ])
targ_szr.Add(user_szr, 0, wx_flag_get(flgs), bdr)
targ_szr = hszr = user_szr
targ_szr.Add(vp, prop, flag, border)
if 'nl' in gdic_v.get('flags', []):
hszr = None
if do_category and self.in_msg(var):
topic_szrs = (szr, hszr)
(szr, hszr) = bak
if 'hline' in gdic_v.get('flags', []) and hszr is None:
szr.Add(wx.StaticLine(self, wx.ID_ANY), 0, wx.EXPAND | wx.TOP | wx.BOTTOM, 4)
if not self.in_msg(var) and var.get('rosparam'):
k = 'ext_toggle_enables'
self.gdic[ k ] = self.gdic.get(k, []) + [ vp ]
enables_set(vp, 'toggle', proc is None)
if 'disable' in gdic_v.get('flags', []):
vp.Enable(False)
if 'hide' in gdic_v.get('flags', []):
vp.Hide()
self.SetSizer(szr)
if 'no_init_update' not in self.prm.get('flags', []):
self.update()
def get_gdic_v_and_chk_enable(self, var_name):
gdic_v = dic_getset(self.gdic, var_name, {})
if 'panel' in gdic_v and dic_eval_if_str(self.frame, gdic_v, 'panel') != self.GetParent():
return None
return gdic_v
def update(self, var=None):
update_func = self.gdic.get('update_func')
if update_func:
self.gdic['update_func_arg_var'] = var
update_func(self.pdic, self.gdic, self.prm)
def detach_func(self):
for var in self.prm.get('vars'):
name = var.get('name')
if not gdic_dialog_type_chk(self.gdic, name):
continue
gdic_v = self.get_gdic_v_and_chk_enable(name)
if gdic_v is None:
continue
if 'func' in gdic_v:
bak_stk_pop(gdic_v, 'func')
vp = gdic_v.get('var')
lst_remove_once(self.gdic.get('ext_toggle_enables', []), vp)
def in_msg(self, var):
if 'topic' not in self.prm or 'msg' not in self.prm:
return False
if self.tmp_msg is None:
klass_msg = globals().get( self.prm.get('msg') )
if klass_msg is None:
return False
self.tmp_msg = klass_msg()
(obj, attr) = msg_path_to_obj_attr(self.tmp_msg, var.get('name'))
return obj and attr in obj.__slots__
class VarPanel(wx.Panel):
def __init__(self, *args, **kwds):
self.var = kwds.pop('var')
v = kwds.pop('v')
self.update = kwds.pop('update')
wx.Panel.__init__(self, *args, **kwds)
self.min = self.var.get('min')
self.max = self.var.get('max')
self.has_slider = self.min is not None and self.max is not None
self.lb = None
label = self.var.get('label', '')
self.kind = self.var.get('kind')
if self.kind == 'radio_box':
choices = self.var.get('choices', [])
style = wx.RA_SPECIFY_COLS if self.var.get('choices_style') == 'h' else wx.RA_SPECIFY_ROWS
self.obj = wx.RadioBox(self, wx.ID_ANY, label, choices=choices, majorDimension=0, style=style)
self.choices_sel_set(v)
self.Bind(wx.EVT_RADIOBOX, self.OnUpdate, self.obj)
return
if self.kind == 'menu':
choices = self.var.get('choices', [])
self.obj = wx.Choice(self, wx.ID_ANY, choices=choices)
self.choices_sel_set(v)
self.Bind(wx.EVT_CHOICE, self.OnUpdate, self.obj)
if label:
self.lb = wx.StaticText(self, wx.ID_ANY, label)
flag = wx.LEFT | wx.ALIGN_CENTER_VERTICAL
sizer_wrap((self.lb, self.obj), wx.HORIZONTAL, 0, flag, 4, self)
return
if self.kind == 'checkbox':
self.obj = wx.CheckBox(self, wx.ID_ANY, label)
self.obj.SetValue(v)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.obj)
return
if self.kind == 'checkboxes':
item_n = dic_eval_if_str(self, self.var, 'item_n', 1)
self.obj = Checkboxes(self, item_n, label)
self.obj.set(v)
for box in self.obj.boxes:
self.obj.Bind(wx.EVT_CHECKBOX, self.OnUpdate, box)
return
if self.kind == 'toggle_button':
self.obj = wx.ToggleButton(self, wx.ID_ANY, label)
set_val(self.obj, v)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnUpdate, self.obj)
button_color_hdr_setup(self.obj)
return
if self.kind == 'hide':
self.Hide()
return
szr = wx.BoxSizer(wx.HORIZONTAL)
self.lb = wx.StaticText(self, wx.ID_ANY, label)
flag = wx.LEFT | wx.ALIGN_CENTER_VERTICAL
szr.Add(self.lb, 0, flag, 4)
if self.kind == 'path':
v = str(v)
v = path_expand_cmd(v)
v = os.path.expandvars(os.path.expanduser(v))
style = wx.TE_PROCESS_ENTER + wx_flag_get( self.var.get('str_flags', []) )
self.tc = wx.TextCtrl(self, wx.ID_ANY, str(v), style=style)
self.Bind(wx.EVT_TEXT_ENTER, self.OnUpdate, self.tc)
if self.kind in ('num', None):
if self.has_slider:
self.w = self.max - self.min
vlst = [ v, self.min, self.max, self.var['v'] ]
self.is_float = len( [ v_ for v_ in vlst if type(v_) is not int ] ) > 0
self.int_max = 1000 if self.is_float else self.max
self.int_min = 0 if self.is_float else self.min
self.slider = wx.Slider(self, wx.ID_ANY, self.get_int_v(), self.int_min, self.int_max)
self.Bind(wx.EVT_COMMAND_SCROLL, self.OnScroll, self.slider)
self.slider.SetMinSize((82, 27))
szr.Add(self.slider, 1, wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, 4)
else:
self.is_float = type(self.var['v']) is not int
self.tc.SetMinSize((40,27))
flag = wx.ALIGN_CENTER_VERTICAL
prop = 1 if self.kind == 'path' or self.kind == 'str' else 0
szr.Add(self.tc, prop, flag, 4)
if self.kind == 'path':
self.ref = wx.Button(self, wx.ID_ANY, 'Ref')
self.Bind(wx.EVT_BUTTON, self.OnRef, self.ref)
button_color_hdr_setup(self.ref)
self.ref.SetMinSize((40,29))
szr.Add(self.ref, 0, flag, 4)
if self.has_slider or self.kind == 'num':
vszr = wx.BoxSizer(wx.VERTICAL)
vszr.Add( self.create_bmbtn("inc.png", self.OnIncBtn) )
vszr.Add( self.create_bmbtn("dec.png", self.OnDecBtn) )
szr.Add(vszr, 0, wx.ALIGN_CENTER_VERTICAL)
self.SetSizer(szr)
def setup_tooltip(self):
if get_tooltips(self.var):
set_tooltips(self.obj, self.var)
if get_tooltip(self.var):
obj = self.lb if self.lb else (self if self.kind == 'radio_box' else self.obj)
set_tooltip(obj, self.var)
def create_bmbtn(self, filename, hdr):
dir = rtmgr_src_dir()
bm = wx.Bitmap(dir + filename, wx.BITMAP_TYPE_ANY)
style = wx.BORDER_NONE | wx.BU_EXACTFIT
obj = wx.lib.buttons.GenBitmapButton(self, wx.ID_ANY, bm, style=style)
self.Bind(wx.EVT_BUTTON, hdr, obj)
return obj
def get_v(self):
if self.kind in [ 'radio_box', 'menu' ]:
return self.choices_sel_get()
if self.kind in [ 'checkbox', 'toggle_button' ]:
return self.obj.GetValue()
if self.kind == 'checkboxes':
return self.obj.get()
if self.kind == 'hide':
return self.var.get('v')
if self.kind in [ 'path', 'str' ]:
return str(self.tc.GetValue())
if not self.has_slider and self.tc.GetValue() == '':
return ''
return self.get_tc_v()
def get_tc_v(self):
s = self.tc.GetValue()
v = str_to_float(s) if self.is_float else int(s)
if self.has_slider:
v = self.min if v < self.min else v
v = self.max if v > self.max else v
self.tc.SetValue(adjust_num_str(str(v)))
return v
def get_int_v(self):
v = self.get_tc_v()
if self.is_float:
v = int( self.int_max * (v - self.min) / self.w if self.w != 0 else 0 )
return v
def OnScroll(self, event):
iv = self.slider.GetValue()
s = str(iv)
if self.is_float:
v = self.min + float(self.w) * iv / self.int_max
s = str(Decimal(v).quantize(Decimal(str(self.get_step()))))
self.tc.SetValue(s)
self.update(self.var)
def OnIncBtn(self, event):
step = self.get_step()
self.add_v(step)
def OnDecBtn(self, event):
step = self.get_step()
self.add_v(-step)
def get_step(self):
step = self.var.get('step')
return step if step else 0.01 if self.is_float else 1
def add_v(self, step):
ov = self.get_v()
self.tc.SetValue(str(ov + step))
v = self.get_v()
if v != ov:
if self.has_slider:
self.slider.SetValue(self.get_int_v())
self.update(self.var)
def OnUpdate(self, event):
if self.has_slider:
self.slider.SetValue(self.get_int_v())
self.update(self.var)
def OnRef(self, event):
if file_dialog(self, self.tc, self.var) == wx.ID_OK:
self.update(self.var)
def choices_sel_get(self):
return self.obj.GetStringSelection() if self.var.get('choices_type') == 'str' else self.obj.GetSelection()
def choices_sel_set(self, v):
if self.var.get('choices_type') == 'str':
self.obj.SetStringSelection(v)
else:
self.obj.SetSelection(v)
def is_nl(self):
return self.has_slider or self.kind in [ 'path' ]
class MyDialogParam(rtmgr.MyDialogParam):
def __init__(self, *args, **kwds):
pdic = kwds.pop('pdic')
self.pdic_bak = pdic.copy()
gdic = kwds.pop('gdic')
prm = kwds.pop('prm')
rtmgr.MyDialogParam.__init__(self, *args, **kwds)
set_size_gdic(self, gdic)
self.Bind(wx.EVT_CLOSE, self.OnClose)
ok_lb_key = 'open_dialog_ok_label'
if dic_list_get(gdic, 'dialog_type', 'config') == 'open' and ok_lb_key in gdic:
self.button_1.SetLabel( gdic.get(ok_lb_key) )
parent = self.panel_v
frame = self.GetParent()
self.panel = ParamPanel(parent, frame=frame, pdic=pdic, gdic=gdic, prm=prm)
szr = sizer_wrap((self.panel,), wx.VERTICAL, 1, wx.EXPAND, 0, parent)
self.SetTitle(prm.get('name', ''))
(w,h) = self.GetSize()
(w2,_) = szr.GetMinSize()
w2 += 20
if w2 > w:
self.SetSize((w2,h))
def OnOk(self, event):
self.panel.update()
self.panel.detach_func()
self.EndModal(0)
def OnCancel(self, event):
self.panel.pdic.update(self.pdic_bak) # restore
self.panel.detach_func()
self.panel.update()
self.EndModal(-1)
def OnClose(self, event):
self.OnCancel(event)
class MyDialogDpm(rtmgr.MyDialogDpm):
def __init__(self, *args, **kwds):
pdic = kwds.pop('pdic')
self.pdic_bak = pdic.copy()
gdic = kwds.pop('gdic')
prm = kwds.pop('prm')
rtmgr.MyDialogDpm.__init__(self, *args, **kwds)
set_size_gdic(self, gdic)
self.Bind(wx.EVT_CLOSE, self.OnClose)
parent = self.panel_v
frame = self.GetParent()
self.frame = frame
self.panel = ParamPanel(parent, frame=frame, pdic=pdic, gdic=gdic, prm=prm)
szr = sizer_wrap((self.panel,), wx.VERTICAL, 1, wx.EXPAND, 0, parent)
self.SetTitle(prm.get('name', ''))
(w,h) = self.GetSize()
(w2,_) = szr.GetMinSize()
w2 += 20
if w2 > w:
self.SetSize((w2,h))
fix_link_color(self.hyperlink_car)
fix_link_color(self.hyperlink_pedestrian)
def OnOk(self, event):
self.panel.update()
self.panel.detach_func()
self.EndModal(0)
def OnLink(self, event):
obj = event.GetEventObject()
dic = { self.hyperlink_car : self.frame.button_car_dpm,
self.hyperlink_pedestrian : self.frame.button_pedestrian_dpm }
obj = dic.get(obj)
if obj:
self.frame.OnHyperlinked_obj(obj)
def OnCancel(self, event):
self.panel.pdic.update(self.pdic_bak) # restore
self.panel.detach_func()
self.panel.update()
self.EndModal(-1)
def OnClose(self, event):
self.OnCancel(event)
class MyDialogCarPedestrian(rtmgr.MyDialogCarPedestrian):
def __init__(self, *args, **kwds):
pdic = kwds.pop('pdic')
self.gdic = kwds.pop('gdic')
prm = kwds.pop('prm')
rtmgr.MyDialogCarPedestrian.__init__(self, *args, **kwds)
set_size_gdic(self)
self.Bind(wx.EVT_CLOSE, self.OnClose)
frame = self.GetParent()
self.frame = frame
self.SetTitle(prm.get('name', ''))
fix_link_color(self.hyperlink_car)
fix_link_color(self.hyperlink_pedestrian)
def OnLink(self, event):
obj = event.GetEventObject()
car_ped = { self.hyperlink_car : 'car', self.hyperlink_pedestrian : 'pedestrian' }.get(obj, 'car')
obj_key = self.gdic.get('car_pedestrian_obj_key', {}).get(car_ped)
obj = getattr(self.frame, 'button_' + obj_key, None) if obj_key else None
if obj:
self.frame.OnHyperlinked_obj(obj)
self.EndModal(0)
def OnClose(self, event):
self.EndModal(-1)
class MyDialogLaneStop(rtmgr.MyDialogLaneStop):
def __init__(self, *args, **kwds):
self.pdic = kwds.pop('pdic')
self.gdic = kwds.pop('gdic')
self.prm = kwds.pop('prm')
rtmgr.MyDialogLaneStop.__init__(self, *args, **kwds)
set_size_gdic(self)
self.frame = self.GetParent()
name = 'lane_stop'
var = next( ( var for var in self.prm.get('vars', []) if var.get('name') == name ), {} )
v = self.pdic.get( name, var.get('v', False) )
set_val(self.checkbox_lane_stop, v)
def update(self):
update_func = self.gdic.get('update_func')
if update_func:
update_func(self.pdic, self.gdic, self.prm)
def OnTrafficRedLight(self, event):
self.pdic['traffic_light'] = 0
self.update()
def OnTrafficGreenLight(self, event):
self.pdic['traffic_light'] = 1
self.update()
def OnTrafficLightRecognition(self, event):
pub = rospy.Publisher('/config/lane_stop', ConfigLaneStop, latch=True, queue_size=10)
msg = ConfigLaneStop()
v = event.GetEventObject().GetValue()
self.pdic['lane_stop'] = v
msg.manual_detection = not v
pub.publish(msg)
def OnOk(self, event):
self.EndModal(0)
def OnCancel(self, event):
self.EndModal(-1)
class MyDialogNdtMapping(rtmgr.MyDialogNdtMapping):
def __init__(self, *args, **kwds):
self.pdic = kwds.pop('pdic')
self.pdic_bak = self.pdic.copy()
self.gdic = kwds.pop('gdic')
self.prm = kwds.pop('prm')
rtmgr.MyDialogNdtMapping.__init__(self, *args, **kwds)
set_size_gdic(self)
parent = self.panel_v
frame = self.GetParent()
self.panel = ParamPanel(parent, frame=frame, pdic=self.pdic, gdic=self.gdic, prm=self.prm)
sizer_wrap((self.panel,), wx.VERTICAL, 1, wx.EXPAND, 0, parent)
self.update_filename()
self.klass_msg = ConfigNdtMappingOutput
self.pub = rospy.Publisher('/config/ndt_mapping_output', self.klass_msg, queue_size=10)
def update_filename(self):
tc = self.text_ctrl_path
path = tc.GetValue()
(dn, fn) = os.path.split(path)
now = datetime.datetime.now()
fn = 'autoware-%02d%02d%02d.pcd' % (
now.year % 100, now.month, now.day)
path = os.path.join(dn, fn)
set_path(tc, path)
def OnRef(self, event):
tc = self.text_ctrl_path
file_dialog(self, tc, { 'path_type' : 'save' } )
def OnRadio(self, event):
v = self.radio_btn_filter_resolution.GetValue()
tc = self.text_ctrl_filter_resolution
tc.Enable(v)
def OnPcdOutput(self, event):
tc = self.text_ctrl_filter_resolution
v = tc.GetValue() if self.radio_btn_filter_resolution.GetValue() else '0.0'
msg = self.klass_msg()
msg.filename = self.text_ctrl_path.GetValue()
msg.filter_res = str_to_float(v)
self.pub.publish(msg)
def OnOk(self, event):
self.panel.detach_func()
self.EndModal(0)
class InfoBarLabel(wx.BoxSizer):
def __init__(self, parent, btm_txt=None, lmt_bar_prg=90, bar_orient=wx.VERTICAL):
wx.BoxSizer.__init__(self, orient=wx.VERTICAL)
self.lb = wx.StaticText(parent, wx.ID_ANY, '')
self.bar = BarLabel(parent, hv=bar_orient, show_lb=False)
bt = wx.StaticText(parent, wx.ID_ANY, btm_txt) if btm_txt else None
self.Add(self.lb, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
if bar_orient == wx.VERTICAL:
sz = self.bar.GetSize()
sz.SetWidth(20)
self.bar.SetMinSize(sz)
self.Add(self.bar, 1, wx.ALIGN_CENTER_HORIZONTAL, 0)
if bt:
self.Add(bt, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
else:
szr = wx.BoxSizer(wx.HORIZONTAL)
if bt:
szr.Add(bt, 0, 0, 0)
szr.Add(self.bar, 1, 0, 0)
self.Add(szr, 1, wx.EXPAND, 0)
self.lmt_bar_prg = lmt_bar_prg
def lb_set(self, txt, col):
self.lb.SetForegroundColour(col)
self.lb.SetLabel(txt);
self.Layout()
def bar_set(self, prg):
(col1, col2) = (wx.Colour(0,0,250), wx.Colour(0,0,128))
if prg >= self.lmt_bar_prg:
(col1, col2) = (wx.Colour(250,0,0), wx.Colour(128,0,0))
self.bar.set_col(col1, col2)
self.bar.set(prg)
class Checkboxes(wx.Panel):
def __init__(self, parent, item_n, lb):
wx.Panel.__init__(self, parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize)
self.boxes = [ wx.CheckBox(self, wx.ID_ANY, lb + str(i)) for i in range(item_n) ]
vsz = wx.BoxSizer(wx.VERTICAL)
for j in range((item_n + 7) / 8):
hsz = wx.BoxSizer(wx.HORIZONTAL)
for i in range(8):
idx = j * 8 + i
if idx < len(self.boxes):
hsz.Add(self.boxes[idx], 0, wx.LEFT, 8)
vsz.Add(hsz)
self.SetSizer(vsz)
vsz.Fit(self)
def set(self, vs):
vs = vs if vs else [ True for box in self.boxes ]
for (box, v) in zip(self.boxes, vs):
box.SetValue(v)
def get(self):
return [ box.GetValue() for box in self.boxes ]
class BarLabel(wx.Panel):
def __init__(self, parent, txt='', pos=wx.DefaultPosition, size=wx.DefaultSize, style=0, hv=wx.HORIZONTAL, show_lb=True):
wx.Panel.__init__(self, parent, wx.ID_ANY, pos, size)
self.lb = wx.StaticText(self, wx.ID_ANY, '', style=style)
self.txt = txt
self.hv = hv
self.dir = wx.SOUTH if hv == wx.HORIZONTAL else wx.EAST
self.show_lb = show_lb
self.prg = -1
self.dflt_col1 = wx.Colour(250,250,250)
self.dflt_col2 = wx.Colour(128,128,128)
self.col1 = self.dflt_col1
self.col2 = self.dflt_col2
self.Bind(wx.EVT_PAINT, self.OnPaint)
def set(self, prg):
self.prg = prg
if self.show_lb:
self.lb.SetLabel(self.txt + str(prg) + '%' if prg >= 0 else '')
self.Refresh()
def set_col(self, col1, col2):
self.col1 = col1 if col1 != wx.NullColour else self.dflt_col1
self.col2 = col2 if col2 != wx.NullColour else self.dflt_col2
def clear(self):
self.set(-1)
def OnPaint(self, event):
dc = wx.PaintDC(self)
(w,h) = self.GetSize()
if self.IsEnabled():
p = (w if self.hv == wx.HORIZONTAL else h) * self.prg / 100
rect = wx.Rect(0, 0, p, h) if self.hv == wx.HORIZONTAL else wx.Rect(0, h-p, w, p)
dc.GradientFillLinear(rect, self.col1, self.col2, self.dir)
rect = wx.Rect(p, 0, w-p, h) if self.hv == wx.HORIZONTAL else wx.Rect(0, 0, w, h-p)
dc.GradientFillLinear(rect, wx.Colour(200,200,200), wx.Colour(220,220,220), self.dir)
else:
rect = wx.Rect(0, 0, w, h)
dc.GradientFillLinear(rect, wx.Colour(250,250,250), wx.Colour(250,250,250), self.dir)
class ColorLabel(wx.Panel):
def __init__(self, parent, lst=[], pos=wx.DefaultPosition, size=wx.DefaultSize, style=0):
wx.Panel.__init__(self, parent, wx.ID_ANY, pos, size)
self.lst = lst
self.Bind(wx.EVT_PAINT, self.OnPaint)
def set(self, lst):
self.lst = lst
self.Refresh()
def OnPaint(self, event):
dc = wx.PaintDC(self)
dc.Clear()
#change_font_point_by_rate(dc, 0.75)
(x,y) = (0,0)
(_, h, _, _) = dc.GetFullTextExtent(' ')
for v in self.lst:
if type(v) is tuple and len(v) == 2:
(x,y) = v
elif type(v) is tuple and len(v) == 3:
dc.SetTextForeground(v)
elif v == '\n':
(x,y) = (0,y+h)
elif type(v) is str:
dc.DrawText(v, x, y)
(w, _, _, _) = dc.GetFullTextExtent(v)
x += w
class StrValObj:
def __init__(self, s, v):
self.s = s
self.v = v
def GetValue(self):
return self.v
def SetValue(self, v):
self.v = v
class MyApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
frame_1 = MyFrame(None, wx.ID_ANY, "")
self.SetTopWindow(frame_1)
buttons_color_hdr_setup(frame_1)
frame_1.Show()
return 1
class MyDialogRosbagRecord(rtmgr.MyDialogRosbagRecord):
def __init__(self, *args, **kwds):
self.cmd_dic = kwds.pop('cmd_dic')
rtmgr.MyDialogRosbagRecord.__init__(self, *args, **kwds)
self.cbs = []
self.refresh()
self.parent = self.GetParent()
self.cmd_dic[ self.button_start ] = ('rosbag record', None)
self.toggles = [ self.button_start, self.button_stop ]
def OnRef(self, event):
tc = self.text_ctrl
file_dialog(self, tc, { 'path_type' : 'save' } )
def OnStart(self, event):
key_obj = self.button_start
path = self.text_ctrl.GetValue()
if path == '':
print('path=""')
return
topic_opt = []
if self.cbs[0].GetValue(): # 'All'
topic_opt = [ '-a' ]
else:
for obj in self.cbs:
if obj.GetValue():
topic_opt += [ obj.GetLabel() ]
if topic_opt == []:
print('topic=[]')
return
args = topic_opt + [ '-O', path ]
split_arg = [ '--split' ] if self.checkbox_split.GetValue() else []
size_arg = self.size_arg_get()
if split_arg and not size_arg:
wx.MessageBox('size is required, with split')
return
args += split_arg + size_arg
(cmd, proc) = self.cmd_dic[ key_obj ]
proc = self.parent.launch_kill(True, cmd, proc, add_args=args, obj=key_obj, kill_children=True)
self.cmd_dic[ key_obj ] = (cmd, proc)
self.parent.toggle_enables(self.toggles)
def OnStop(self, event):
key_obj = self.button_start
(cmd, proc) = self.cmd_dic[ key_obj ]
proc = self.parent.launch_kill(False, cmd, proc, sigint=True, obj=key_obj, kill_children=True)
self.cmd_dic[ key_obj ] = (cmd, proc)
self.parent.toggle_enables(self.toggles)
self.Hide()
def OnRefresh(self, event):
self.refresh()
def refresh(self):
lst = [ 'all' ] + subprocess.check_output([ 'rostopic', 'list' ]).strip().split('\n')
panel = self.panel_1
szr = self.sizer_topic
for obj in self.cbs:
szr.Remove(obj)
obj.Destroy()
self.cbs = []
for topic in lst:
obj = wx.CheckBox(panel, wx.ID_ANY, topic)
bdr = 4 if topic == 'All' else 4 * 4
szr.Add(obj, 0, wx.LEFT, bdr)
self.cbs.append(obj)
szr.Layout()
panel.SetVirtualSize(szr.GetMinSize())
self.update_filename()
def update_filename(self):
tc = self.text_ctrl
path = tc.GetValue()
(dn, fn) = os.path.split(path)
now = datetime.datetime.now()
fn = 'autoware-%04d%02d%02d%02d%02d%02d.rosbag' % (
now.year, now.month, now.day, now.hour, now.minute, now.second)
path = os.path.join(dn, fn)
set_path(tc, path)
def size_arg_get(self):
tc = self.text_ctrl_size
s = tc.GetValue()
mb = 0
try:
mb = str_to_float(s)
except ValueError:
mb = 0
if mb <= 0:
tc.SetValue('')
return [ '--size=' + str(int(mb * 1024 * 1024)) ] if mb > 0 else []
def set_size_gdic(dlg, gdic={}):
(w, h) = dlg.GetSize()
if not gdic:
gdic = getattr(dlg, 'gdic', {})
nw = gdic.get('dialog_width', w)
nh = gdic.get('dialog_height', h)
if (w, h) != (nw, nh):
dlg.SetSize((nw, nh))
def file_dialog(parent, tc, path_inf_dic={}):
path = tc.GetValue()
path = get_top(path.split(','), path)
(dn, fn) = os.path.split(path)
path_type = path_inf_dic.get('path_type')
if path_type == 'dir':
fns = path_inf_dic.get('filenames')
if type(fns) is str and fns[-5:] == '.yaml':
fns = load_yaml(fns)
if type(fns) is not list:
fns = None
path_inf_dic['filenames'] = fns
dlg = wx.DirDialog(parent, defaultPath=path)
else:
st_dic = { 'save' : wx.FD_SAVE, 'multi' : wx.FD_MULTIPLE }
dlg = wx.FileDialog(parent, defaultDir=dn, defaultFile=fn,
style=st_dic.get(path_type, wx.FD_DEFAULT_STYLE))
ret = show_modal(dlg)
if ret == wx.ID_OK:
path = ','.join(dlg.GetPaths()) if path_type == 'multi' else dlg.GetPath()
if path_type == 'dir' and fns:
path = ','.join([ path + '/' + fn for fn in fns ])
set_path(tc, path)
dlg.Destroy()
return ret
def post_evt_toggle_obj(win, obj, v):
evt_id = {
CT.GenericTreeItem : CT.wxEVT_TREE_ITEM_CHECKED,
wx.CheckBox : wx.EVT_CHECKBOX.typeId,
wx.ToggleButton : wx.EVT_TOGGLEBUTTON.typeId,
wx.Button : wx.EVT_BUTTON.typeId,
}.get( type(obj) )
if evt_id == CT.wxEVT_TREE_ITEM_CHECKED:
evt = CT.TreeEvent( evt_id, win.GetId() )
evt.SetItem(obj)
else:
evt = wx.PyCommandEvent( evt_id, obj.GetId() )
evt.SetEventObject(obj)
set_val(obj, v)
wx.PostEvent(win, evt)
def button_color_change(btn, v=None):
if v is None and type(btn) is wx.ToggleButton:
v = btn.GetValue()
key = ( v , btn.IsEnabled() )
dic = { (True,True):('#F9F9F8','#8B8BB9'), (True,False):('#F9F9F8','#E0E0F0') }
(fcol, bcol) = dic.get(key, (wx.NullColour, wx.NullColour))
btn.SetForegroundColour(fcol)
btn.SetBackgroundColour(bcol)
def OnButtonColorHdr(event):
btn = event.GetEventObject()
dic = { wx.EVT_TOGGLEBUTTON.typeId : None,
wx.EVT_LEFT_DOWN.typeId : True,
wx.EVT_LEFT_UP.typeId : False }
v = dic.get(event.GetEventType(), '?')
if v != '?':
button_color_change(btn, v)
event.Skip()
btn_null_bgcol = None
def is_btn_null_bgcol(btn):
global btn_null_bgcol
bak = btn.GetBackgroundColour()
if btn_null_bgcol is None:
btn.SetBackgroundColour(wx.NullColour)
btn_null_bgcol = btn.GetBackgroundColour()
if bak != btn_null_bgcol:
btn.SetBackgroundColour(bak)
return bak == btn_null_bgcol
def button_color_hdr_setup(btn):
hdr = OnButtonColorHdr
if type(btn) is wx.ToggleButton:
btn.Bind(wx.EVT_TOGGLEBUTTON, hdr)
elif type(btn) is wx.Button and is_btn_null_bgcol(btn):
btn.Bind(wx.EVT_LEFT_DOWN, hdr)
btn.Bind(wx.EVT_LEFT_UP, hdr)
def buttons_color_hdr_setup(frm_obj):
key = 'button_'
btns = [ getattr(frm_obj, nm) for nm in dir(frm_obj) if nm[:len(key)] == key ]
for btn in btns:
button_color_hdr_setup(btn)
def show_modal(dlg):
buttons_color_hdr_setup(dlg)
return dlg.ShowModal()
def load_yaml(filename, def_ret=None):
dir = rtmgr_src_dir()
path = dir + filename
if not os.path.isfile(path):
return def_ret
print('loading ' + filename)
f = open(dir + filename, 'r')
d = yaml.load(f)
f.close()
return d
def terminate_children(proc, sigint=False):
for child in get_proc_children(proc):
terminate_children(child, sigint)
terminate(child, sigint)
def terminate(proc, sigint=False):
if sigint:
proc.send_signal(signal.SIGINT)
else:
proc.terminate()
def proc_wait_thread(ev, proc, obj):
proc.wait()
wx.CallAfter(enables_set, obj, 'proc_wait', True)
th_end((None, ev))
def th_start(target, kwargs={}):
ev = threading.Event()
kwargs['ev'] = ev
th = threading.Thread(target=target, kwargs=kwargs)
th.daemon = True
th.start()
return (th, ev)
def th_end((th, ev)):
if not th:
th = threading.current_thread()
threading.Timer( 1.0, th_end, ((th, ev),) ).start()
return
ev.set()
th.join()
def que_clear(que):
with que.mutex:
que.queue.clear()
def append_tc_limit(tc, s, rm_chars=0):
if rm_chars > 0:
tc.Remove(0, rm_chars)
tc.AppendText(s)
def cut_esc(s):
while True:
i = s.find(chr(27))
if i < 0:
break
j = s.find('m', i)
if j < 0:
break
s = s[:i] + s[j+1:]
return s
def change_font_point_by_rate(obj, rate=1.0):
font = obj.GetFont()
pt = font.GetPointSize()
pt = int(pt * rate)
font.SetPointSize(pt)
obj.SetFont(font)
def fix_link_color(obj):
t = type(obj)
if t is CT.GenericTreeItem or t is CT.CustomTreeCtrl:
obj.SetHyperTextVisitedColour(obj.GetHyperTextNewColour())
elif t is wx.HyperlinkCtrl:
obj.SetVisitedColour(obj.GetNormalColour())
def get_tooltip(dic):
return dic.get('desc')
def get_tooltips(dic):
return dic.get('descs', [])
def set_tooltip(obj, dic):
set_tooltip_str(obj, get_tooltip(dic))
def set_tooltip_str(obj, s):
if s and getattr(obj, 'SetToolTipString', None):
obj.SetToolTipString(s)
def set_tooltips(obj, dic):
lst = get_tooltips(dic)
if lst and getattr(obj, 'SetItemToolTip', None):
for (ix, s) in enumerate(lst):
obj.SetItemToolTip(ix, s)
def get_tooltip_obj(obj):
if getattr(obj, 'GetToolTip', None):
t = obj.GetToolTip()
return t.GetTip() if t else None
return None
def scaled_bitmap(bm, scale):
(w, h) = bm.GetSize()
img = wx.ImageFromBitmap(bm)
img = img.Scale(w * scale, h * scale, wx.IMAGE_QUALITY_HIGH)
return wx.BitmapFromImage(img)
def sizer_wrap(add_objs, orient=wx.VERTICAL, prop=0, flag=0, border=0, parent=None):
szr = wx.BoxSizer(orient)
for obj in add_objs:
szr.Add(obj, prop, flag, border)
if parent:
parent.SetSizer(szr)
return szr
def static_box_sizer(parent, s, orient=wx.VERTICAL):
sb = wx.StaticBox(parent, wx.ID_ANY, s)
sb.Lower()
return wx.StaticBoxSizer(sb, orient)
def wx_flag_get(flags):
dic = { 'top' : wx.TOP, 'bottom' : wx.BOTTOM, 'left' : wx.LEFT, 'right' : wx.RIGHT,
'all' : wx.ALL, 'expand' : wx.EXPAND, 'fixed_minsize' : wx.FIXED_MINSIZE,
'center_v' : wx.ALIGN_CENTER_VERTICAL, 'center_h' : wx.ALIGN_CENTER_HORIZONTAL,
'passwd' : wx.TE_PASSWORD }
lst = [ dic.get(f) for f in flags if f in dic ]
return reduce(lambda a,b : a+b, [0] + lst)
def msg_path_to_obj_attr(msg, path):
lst = path.split('.')
obj = msg
for attr in lst[:-1]:
obj = getattr(obj, attr, None)
return (obj, lst[-1])
def str_to_rosval(s, type_str, def_ret=None):
cvt_dic = {
'int8':int , 'int16':int , 'int32':int ,
'uint8':int , 'uint16':int , 'uint32':int ,
'int64':long , 'uint64':long,
'float32':float, 'float64':float,
}
t = cvt_dic.get(type_str)
s = s.replace(',','.') if t is float and type(s) is str else s
return t(s) if t else def_ret
def str_to_float(s):
return float( s.replace(',','.') )
def set_path(tc, v):
tc.SetValue(v)
tc.SetInsertionPointEnd()
def set_val(obj, v):
func = getattr(obj, 'SetValue', getattr(obj, 'Check', None))
if func:
func(v)
obj_refresh(obj)
if type(obj) is wx.ToggleButton:
button_color_change(obj)
def enables_set(obj, k, en):
if hasattr(obj, 'enables_proxy'):
(obj, k) = obj.enables_proxy
d = attr_getset(obj, 'enabLes', {})
d[k] = en
d['last_key'] = k
if hasattr(obj, 'Enable'):
obj.Enable( all( d.values() ) )
obj_refresh(obj)
if isinstance(obj, wx.HyperlinkCtrl):
if not hasattr(obj, 'coLor'):
obj.coLor = { True:obj.GetNormalColour(), False:'#808080' }
c = obj.coLor.get(obj.IsEnabled())
obj.SetNormalColour(c)
obj.SetVisitedColour(c)
def enables_get(obj, k, def_ret=None):
return attr_getset(obj, 'enabLes', {}).get(k, def_ret)
def enables_get_last(obj):
k = enables_get(obj, 'last_key')
return (k, enables_get(obj, k))
def obj_refresh(obj):
if type(obj) is CT.GenericTreeItem:
while obj.GetParent():
obj = obj.GetParent()
tree = obj.GetData()
tree.Refresh()
# dic_list util (push, pop, get)
def dic_list_push(dic, key, v):
dic_getset(dic, key, []).append(v)
def dic_list_pop(dic, key):
dic.get(key, [None]).pop()
def dic_list_get(dic, key, def_ret=None):
return dic.get(key, [def_ret])[-1]
def bak_stk_push(dic, key):
if key in dic:
k = key + '_bak_str'
dic_getset(dic, k, []).append( dic.get(key) )
def bak_stk_pop(dic, key):
k = key + '_bak_str'
stk = dic.get(k, [])
if len(stk) > 0:
dic[key] = stk.pop()
else:
del dic[key]
def bak_stk_set(dic, key, v):
bak_str_push(dic, key)
dic[key] = v
def attr_getset(obj, name, def_ret):
if not hasattr(obj, name):
setattr(obj, name, def_ret)
return getattr(obj, name)
def dic_getset(dic, key, def_ret):
if key not in dic:
dic[key] = def_ret
return dic.get(key)
def lst_append_once(lst, v):
exist = v in lst
if not exist:
lst.append(v)
return exist
def lst_remove_once(lst, v):
exist = v in lst
if exist:
lst.remove(v)
return exist
def get_top(lst, def_ret=None):
return lst[0] if len(lst) > 0 else def_ret
def adjust_num_str(s):
if '.' in s:
while s[-1] == '0':
s = s[:-1]
if s[-1] == '.':
s = s[:-1]
return s
def rtmgr_src_dir():
return os.path.abspath(os.path.dirname(__file__)) + "/"
def path_expand_cmd(path):
lst = path.split('/')
s = lst[0]
if s[:2] == '$(' and s[-1] == ')':
cmd = s[2:-1].split(' ')
lst[0] = subprocess.check_output(cmd).strip()
path = '/'.join(lst)
return path
def eval_if_str(self, v):
return eval(v) if type(v) is str else v
def dic_eval_if_str(self, dic, key, def_ret=None):
return eval_if_str( self, dic.get(key, def_ret) )
def prn_dict(dic):
for (k,v) in dic.items():
print (k, ':', v)
def send_to_proc_manager(order):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.connect(PROC_MANAGER_SOCK)
except socket.error:
print('Failed connect to {}'.format(PROC_MANAGER_SOCK))
return -1
sock.send(yaml.dump(order))
ret = sock.recv(1024)
sock.close()
return int(ret) == 0
def set_process_nice(proc, value):
order = {
"name": "nice",
"pid": proc.pid,
"nice": value
}
return send_to_proc_manager(order)
def set_process_cpu_affinity(proc, cpus):
order = {
"name": "cpu_affinity",
"pid": proc.pid,
"cpus": cpus,
}
return send_to_proc_manager(order)
def shutdown_proc_manager():
order = {
"name": "shutdown",
}
return send_to_proc_manager(order)
def set_scheduling_policy(proc, policy, priority):
order = {
"name": "scheduling_policy",
"pid": proc.pid,
"policy": policy,
"priority": priority,
}
return send_to_proc_manager(order)
# psutil 3.x to 1.x backward compatibility
def get_cpu_count():
try:
return psutil.NUM_CPUS
except AttributeError:
return psutil.cpu_count()
def get_proc_children(proc, r=False):
try:
return proc.get_children(recursive=r)
except AttributeError:
return proc.children(recursive=r)
def get_proc_nice(proc):
try:
return proc.get_nice()
except AttributeError:
return proc.nice()
def get_proc_cpu_affinity(proc):
try:
return proc.get_cpu_affinity()
except AttributeError:
return proc.cpu_affinity()
if __name__ == "__main__":
gettext.install("app")
app = MyApp(0)
app.MainLoop()
# EOF
|
test.py | import time
from threading import Thread
def run1():
time.sleep(2)
print('run1')
def run2():
time.sleep(2)
print('run2')
if __name__ == '__main__':
t1 = Thread(target=run1)
t2 = Thread(target=run2)
t1.start()
t2.start()
t1.join()
t2.join()
print('main') |
test.py | #!/usr/bin/env python
#
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import imp
import logging
import optparse
import os
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
import threading
import utils
import multiprocessing
import errno
from os.path import join, dirname, abspath, basename, isdir, exists
from datetime import datetime
from Queue import Queue, Empty
logger = logging.getLogger('testrunner')
skip_regex = re.compile(r'# SKIP\S*\s+(.*)', re.IGNORECASE)
VERBOSE = False
# ---------------------------------------------
# --- P r o g r e s s I n d i c a t o r s ---
# ---------------------------------------------
class ProgressIndicator(object):
def __init__(self, cases, flaky_tests_mode):
self.cases = cases
self.flaky_tests_mode = flaky_tests_mode
self.parallel_queue = Queue(len(cases))
self.sequential_queue = Queue(len(cases))
for case in cases:
if case.parallel:
self.parallel_queue.put_nowait(case)
else:
self.sequential_queue.put_nowait(case)
self.succeeded = 0
self.remaining = len(cases)
self.total = len(cases)
self.failed = [ ]
self.flaky_failed = [ ]
self.crashed = 0
self.flaky_crashed = 0
self.lock = threading.Lock()
self.shutdown_event = threading.Event()
def PrintFailureHeader(self, test):
if test.IsNegative():
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
print "Path: %s" % "/".join(test.path)
def Run(self, tasks):
self.Starting()
threads = []
# Spawn N-1 threads and then use this thread as the last one.
# That way -j1 avoids threading altogether which is a nice fallback
# in case of threading problems.
for i in xrange(tasks - 1):
thread = threading.Thread(target=self.RunSingle, args=[True, i + 1])
threads.append(thread)
thread.start()
try:
self.RunSingle(False, 0)
# Wait for the remaining threads
for thread in threads:
# Use a timeout so that signals (ctrl-c) will be processed.
thread.join(timeout=10000000)
except (KeyboardInterrupt, SystemExit), e:
self.shutdown_event.set()
except Exception, e:
# If there's an exception we schedule an interruption for any
# remaining threads.
self.shutdown_event.set()
# ...and then reraise the exception to bail out
raise
self.Done()
return not self.failed
def RunSingle(self, parallel, thread_id):
while not self.shutdown_event.is_set():
try:
test = self.parallel_queue.get_nowait()
except Empty:
if parallel:
return
try:
test = self.sequential_queue.get_nowait()
except Empty:
return
case = test.case
case.thread_id = thread_id
self.lock.acquire()
self.AboutToRun(case)
self.lock.release()
try:
start = datetime.now()
output = case.Run()
# SmartOS has a bug that causes unexpected ECONNREFUSED errors.
# See https://smartos.org/bugview/OS-2767
# If ECONNREFUSED on SmartOS, retry the test one time.
if (output.UnexpectedOutput() and
sys.platform == 'sunos5' and
'ECONNREFUSED' in output.output.stderr):
output = case.Run()
output.diagnostic.append('ECONNREFUSED received, test retried')
case.duration = (datetime.now() - start)
except IOError, e:
return
if self.shutdown_event.is_set():
return
self.lock.acquire()
if output.UnexpectedOutput():
if FLAKY in output.test.outcomes and self.flaky_tests_mode == DONTCARE:
self.flaky_failed.append(output)
if output.HasCrashed():
self.flaky_crashed += 1
else:
self.failed.append(output)
if output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
self.remaining -= 1
self.HasRun(output)
self.lock.release()
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class SimpleProgressIndicator(ProgressIndicator):
def Starting(self):
print 'Running %i tests' % len(self.cases)
def Done(self):
print
for failed in self.failed:
self.PrintFailureHeader(failed.test)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(failed.command)
if failed.HasCrashed():
print "--- CRASHED ---"
if failed.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.failed)
if self.crashed > 0:
print "=== %i tests CRASHED" % self.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
print 'Starting %s...' % case.GetLabel()
sys.stdout.flush()
def HasRun(self, output):
if output.UnexpectedOutput():
if output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (output.test.GetLabel(), outcome)
class DotsProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
pass
def HasRun(self, output):
total = self.succeeded + len(self.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if output.UnexpectedOutput():
if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class TapProgressIndicator(SimpleProgressIndicator):
def _printDiagnostic(self, messages):
for l in messages.splitlines():
logger.info('# ' + l)
def Starting(self):
logger.info('1..%i' % len(self.cases))
self._done = 0
def AboutToRun(self, case):
pass
def HasRun(self, output):
self._done += 1
# Print test name as (for example) "parallel/test-assert". Tests that are
# scraped from the addons documentation are all named test.js, making it
# hard to decipher what test is running when only the filename is printed.
prefix = abspath(join(dirname(__file__), '../test')) + os.sep
command = output.command[-1]
if command.endswith('.js'): command = command[:-3]
if command.startswith(prefix): command = command[len(prefix):]
command = command.replace('\\', '/')
if output.UnexpectedOutput():
status_line = 'not ok %i %s' % (self._done, command)
if FLAKY in output.test.outcomes and self.flaky_tests_mode == DONTCARE:
status_line = status_line + ' # TODO : Fix flaky test'
logger.info(status_line)
self._printDiagnostic("\n".join(output.diagnostic))
if output.HasTimedOut():
self._printDiagnostic('TIMEOUT')
self._printDiagnostic(output.output.stderr)
self._printDiagnostic(output.output.stdout)
else:
skip = skip_regex.search(output.output.stdout)
if skip:
logger.info(
'ok %i %s # skip %s' % (self._done, command, skip.group(1)))
else:
status_line = 'ok %i %s' % (self._done, command)
if FLAKY in output.test.outcomes:
status_line = status_line + ' # TODO : Fix flaky test'
logger.info(status_line)
self._printDiagnostic("\n".join(output.diagnostic))
duration = output.test.duration
# total_seconds() was added in 2.7
total_seconds = (duration.microseconds +
(duration.seconds + duration.days * 24 * 3600) * 10**6) / 10**6
logger.info(' ---')
logger.info(' duration: %d.%ds' % (total_seconds, duration.microseconds / 1000))
logger.info(' ...')
def Done(self):
pass
class CompactProgressIndicator(ProgressIndicator):
def __init__(self, cases, flaky_tests_mode, templates):
super(CompactProgressIndicator, self).__init__(cases, flaky_tests_mode)
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Starting(self):
pass
def Done(self):
self.PrintProgress('Done')
def AboutToRun(self, case):
self.PrintProgress(case.GetLabel())
def HasRun(self, output):
if output.UnexpectedOutput():
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(output.test)
stdout = output.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = output.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(output.command)
if output.HasCrashed():
print "--- CRASHED ---"
if output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, str, length):
if length and (len(str) > (length - 3)):
return str[:(length-3)] + "..."
else:
return str
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.succeeded,
'remaining': (((self.total - self.remaining) * 100) // self.total),
'failed': len(self.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self, cases, flaky_tests_mode):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s",
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(cases, flaky_tests_mode, templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self, cases, flaky_tests_mode):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
'stdout': '%s',
'stderr': '%s',
'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"),
'max_length': 78
}
super(MonochromeProgressIndicator, self).__init__(cases, flaky_tests_mode, templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'tap': TapProgressIndicator,
'mono': MonochromeProgressIndicator
}
# -------------------------
# --- F r a m e w o r k ---
# -------------------------
class CommandOutput(object):
def __init__(self, exit_code, timed_out, stdout, stderr):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.failed = None
class TestCase(object):
def __init__(self, context, path, arch, mode):
self.path = path
self.context = context
self.duration = None
self.arch = arch
self.mode = mode
self.parallel = False
self.thread_id = 0
def IsNegative(self):
return self.context.expect_fail
def CompareTime(self, other):
return cmp(other.duration, self.duration)
def DidFail(self, output):
if output.failed is None:
output.failed = self.IsFailureOutput(output)
return output.failed
def IsFailureOutput(self, output):
return output.exit_code != 0
def GetSource(self):
return "(no source available)"
def RunCommand(self, command, env):
full_command = self.context.processor(command)
output = Execute(full_command,
self.context,
self.context.GetTimeout(self.mode),
env)
self.Cleanup()
return TestOutput(self,
full_command,
output,
self.context.store_unexpected_output)
def BeforeRun(self):
pass
def AfterRun(self, result):
pass
def Run(self):
self.BeforeRun()
try:
result = self.RunCommand(self.GetCommand(), {
"TEST_THREAD_ID": "%d" % self.thread_id
})
finally:
# Tests can leave the tty in non-blocking mode. If the test runner
# tries to print to stdout/stderr after that and the tty buffer is
# full, it'll die with a EAGAIN OSError. Ergo, put the tty back in
# blocking mode before proceeding.
if sys.platform != 'win32':
from fcntl import fcntl, F_GETFL, F_SETFL
from os import O_NONBLOCK
for fd in 0,1,2: fcntl(fd, F_SETFL, ~O_NONBLOCK & fcntl(fd, F_GETFL))
self.AfterRun(result)
return result
def Cleanup(self):
return
class TestOutput(object):
def __init__(self, test, command, output, store_unexpected_output):
self.test = test
self.command = command
self.output = output
self.store_unexpected_output = store_unexpected_output
self.diagnostic = []
def UnexpectedOutput(self):
if self.HasCrashed():
outcome = CRASH
elif self.HasTimedOut():
outcome = TIMEOUT
elif self.HasFailed():
outcome = FAIL
else:
outcome = PASS
return not outcome in self.test.outcomes
def HasPreciousOutput(self):
return self.UnexpectedOutput() and self.store_unexpected_output
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
else:
# Timed out tests will have exit_code -signal.SIGTERM.
if self.output.timed_out:
return False
return self.output.exit_code < 0 and \
self.output.exit_code != -signal.SIGABRT
def HasTimedOut(self):
return self.output.timed_out;
def HasFailed(self):
execution_failed = self.test.DidFail(self.output)
if self.test.IsNegative():
return not execution_failed
else:
return execution_failed
def KillProcessWithID(pid):
if utils.IsWindows():
os.popen('taskkill /T /F /PID %d' % pid)
else:
os.kill(pid, signal.SIGTERM)
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.0001
SLEEP_TIME_FACTOR = 1.25
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode);
except ImportError:
pass
return prev_error_mode
def RunProcess(context, timeout, args, **rest):
if context.verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE;
if utils.IsWindows():
if context.suppress_dialogs:
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX;
prev_error_mode = Win32SetErrorMode(error_mode);
Win32SetErrorMode(error_mode | prev_error_mode);
faketty = rest.pop('faketty', False)
pty_out = rest.pop('pty_out')
process = subprocess.Popen(
shell = utils.IsWindows(),
args = popen_args,
**rest
)
if faketty:
os.close(rest['stdout'])
if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE:
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
output = ''
if faketty:
while True:
if time.time() >= end_time:
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
break
# source: http://stackoverflow.com/a/12471855/1903116
# related: http://stackoverflow.com/q/11165521/1903116
try:
data = os.read(pty_out, 9999)
except OSError as e:
if e.errno != errno.EIO:
raise
break # EIO means EOF on some systems
else:
if not data: # EOF
break
output += data
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (process, exit_code, timed_out, output)
def PrintError(str):
sys.stderr.write(str)
sys.stderr.write('\n')
def CheckedUnlink(name):
while True:
try:
os.unlink(name)
except OSError, e:
# On Windows unlink() fails if another process (typically a virus scanner
# or the indexing service) has the file open. Those processes keep a
# file open for a short time only, so yield and try again; it'll succeed.
if sys.platform == 'win32' and e.errno == errno.EACCES:
time.sleep(0)
continue
PrintError("os.unlink() " + str(e))
break
def Execute(args, context, timeout=None, env={}, faketty=False):
if faketty:
import pty
(out_master, fd_out) = pty.openpty()
fd_err = fd_out
pty_out = out_master
else:
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
pty_out = None
# Extend environment
env_copy = os.environ.copy()
for key, value in env.iteritems():
env_copy[key] = value
(process, exit_code, timed_out, output) = RunProcess(
context,
timeout,
args = args,
stdout = fd_out,
stderr = fd_err,
env = env_copy,
faketty = faketty,
pty_out = pty_out
)
if faketty:
os.close(out_master)
errors = ''
else:
os.close(fd_out)
os.close(fd_err)
output = file(outname).read()
errors = file(errname).read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return CommandOutput(exit_code, timed_out, output, errors)
def ExecuteNoCapture(args, context, timeout=None):
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
)
return CommandOutput(exit_code, False, "", "")
def CarCdr(path):
if len(path) == 0:
return (None, [ ])
else:
return (path[0], path[1:])
class TestConfiguration(object):
def __init__(self, context, root):
self.context = context
self.root = root
def Contains(self, path, file):
if len(path) > len(file):
return False
for i in xrange(len(path)):
if not path[i].match(file[i]):
return False
return True
def GetTestStatus(self, sections, defs):
pass
class TestSuite(object):
def __init__(self, name):
self.name = name
def GetName(self):
return self.name
# Use this to run several variants of the tests, e.g.:
# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
VARIANT_FLAGS = [[]]
class TestRepository(TestSuite):
def __init__(self, path):
normalized_path = abspath(path)
super(TestRepository, self).__init__(basename(normalized_path))
self.path = normalized_path
self.is_loaded = False
self.config = None
def GetConfiguration(self, context):
if self.is_loaded:
return self.config
self.is_loaded = True
file = None
try:
(file, pathname, description) = imp.find_module('testcfg', [ self.path ])
module = imp.load_module('testcfg', file, pathname, description)
self.config = module.GetConfiguration(context, self.path)
if hasattr(self.config, 'additional_flags'):
self.config.additional_flags += context.node_args
else:
self.config.additional_flags = context.node_args
finally:
if file:
file.close()
return self.config
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
def AddTestsToList(self, result, current_path, path, context, arch, mode):
for v in VARIANT_FLAGS:
tests = self.GetConfiguration(context).ListTests(current_path, path,
arch, mode)
for t in tests: t.variant_flags = v
result += tests * context.repeat
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
class LiteralTestSuite(TestSuite):
def __init__(self, tests):
super(LiteralTestSuite, self).__init__('root')
self.tests = tests
def GetBuildRequirements(self, path, context):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
if not name or name.match(test.GetName()):
result += test.GetBuildRequirements(rest, context)
return result
def ListTests(self, current_path, path, context, arch, mode):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
test.AddTestsToList(result, full_path, path, context, arch, mode)
result.sort(cmp=lambda a, b: cmp(a.GetName(), b.GetName()))
return result
def GetTestStatus(self, context, sections, defs):
for test in self.tests:
test.GetTestStatus(context, sections, defs)
SUFFIX = {
'debug' : '_g',
'release' : '' }
FLAGS = {
'debug' : ['--enable-slow-asserts', '--debug-code', '--verify-heap'],
'release' : []}
TIMEOUT_SCALEFACTOR = {
'armv6' : { 'debug' : 12, 'release' : 3, 'seaduk': 8 }, # The ARM buildbots are slow.
'arm' : { 'debug' : 8, 'release' : 2, 'seaduk': 8 },
'ia32' : { 'debug' : 4, 'release' : 1, 'seaduk': 8 },
'ppc' : { 'debug' : 4, 'release' : 1, 'seaduk': 8 },
's390' : { 'debug' : 4, 'release' : 1, 'seaduk': 8 } }
class Context(object):
def __init__(self, workspace, buildspace, verbose, vm, args, expect_fail,
timeout, processor, suppress_dialogs,
store_unexpected_output, repeat):
self.workspace = workspace
self.buildspace = buildspace
self.verbose = verbose
self.vm_root = vm
self.node_args = args
self.expect_fail = expect_fail
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
self.store_unexpected_output = store_unexpected_output
self.repeat = repeat
def GetVm(self, arch, mode):
name = 'implementations/%s/target/nucleus' % (mode)
# Currently GYP does not support output_dir for MSVS.
# http://code.google.com/p/gyp/issues/detail?id=40
# It will put the builds into Release/node.exe or Debug/node.exe
if utils.IsWindows():
out_dir = os.path.join(dirname(__file__), "..", "out")
if not exists(out_dir):
if mode == 'debug':
name = os.path.abspath('Debug/node.exe')
else:
name = os.path.abspath('Release/node.exe')
else:
name = os.path.abspath(name + '.exe')
return name
def GetVmFlags(self, testcase, mode):
return testcase.variant_flags + FLAGS[mode]
def GetTimeout(self, mode):
return self.timeout * TIMEOUT_SCALEFACTOR[ARCH_GUESS or 'ia32'][mode]
def RunTestCases(cases_to_run, progress, tasks, flaky_tests_mode):
progress = PROGRESS_INDICATORS[progress](cases_to_run, flaky_tests_mode)
return progress.Run(tasks)
def BuildRequirements(context, requirements, mode, scons_flags):
command_line = (['scons', '-Y', context.workspace, 'mode=' + ",".join(mode)]
+ requirements
+ scons_flags)
output = ExecuteNoCapture(command_line, context)
return output.exit_code == 0
# -------------------------------------------
# --- T e s t C o n f i g u r a t i o n ---
# -------------------------------------------
SKIP = 'skip'
FAIL = 'fail'
PASS = 'pass'
OKAY = 'okay'
TIMEOUT = 'timeout'
CRASH = 'crash'
SLOW = 'slow'
FLAKY = 'flaky'
DONTCARE = 'dontcare'
class Expression(object):
pass
class Constant(Expression):
def __init__(self, value):
self.value = value
def Evaluate(self, env, defs):
return self.value
class Variable(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in env: return ListSet([env[self.name]])
else: return Nothing()
class Outcome(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in defs:
return defs[self.name].GetOutcomes(env, defs)
else:
return ListSet([self.name])
class Set(object):
pass
class ListSet(Set):
def __init__(self, elms):
self.elms = elms
def __str__(self):
return "ListSet%s" % str(self.elms)
def Intersect(self, that):
if not isinstance(that, ListSet):
return that.Intersect(self)
return ListSet([ x for x in self.elms if x in that.elms ])
def Union(self, that):
if not isinstance(that, ListSet):
return that.Union(self)
return ListSet(self.elms + [ x for x in that.elms if x not in self.elms ])
def IsEmpty(self):
return len(self.elms) == 0
class Everything(Set):
def Intersect(self, that):
return that
def Union(self, that):
return self
def IsEmpty(self):
return False
class Nothing(Set):
def Intersect(self, that):
return self
def Union(self, that):
return that
def IsEmpty(self):
return True
class Operation(Expression):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def Evaluate(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
elif self.op == 'if':
return False
elif self.op == '==':
inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
return not inter.IsEmpty()
else:
assert self.op == '&&'
return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
def GetOutcomes(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.GetOutcomes(env, defs).Union(self.right.GetOutcomes(env, defs))
elif self.op == 'if':
if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs)
else: return Nothing()
else:
assert self.op == '&&'
return self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
def IsAlpha(str):
for char in str:
if not (char.isalpha() or char.isdigit() or char == '_'):
return False
return True
class Tokenizer(object):
"""A simple string tokenizer that chops expressions into variables,
parens and operators"""
def __init__(self, expr):
self.index = 0
self.expr = expr
self.length = len(expr)
self.tokens = None
def Current(self, length = 1):
if not self.HasMore(length): return ""
return self.expr[self.index:self.index+length]
def HasMore(self, length = 1):
return self.index < self.length + (length - 1)
def Advance(self, count = 1):
self.index = self.index + count
def AddToken(self, token):
self.tokens.append(token)
def SkipSpaces(self):
while self.HasMore() and self.Current().isspace():
self.Advance()
def Tokenize(self):
self.tokens = [ ]
while self.HasMore():
self.SkipSpaces()
if not self.HasMore():
return None
if self.Current() == '(':
self.AddToken('(')
self.Advance()
elif self.Current() == ')':
self.AddToken(')')
self.Advance()
elif self.Current() == '$':
self.AddToken('$')
self.Advance()
elif self.Current() == ',':
self.AddToken(',')
self.Advance()
elif IsAlpha(self.Current()):
buf = ""
while self.HasMore() and IsAlpha(self.Current()):
buf += self.Current()
self.Advance()
self.AddToken(buf)
elif self.Current(2) == '&&':
self.AddToken('&&')
self.Advance(2)
elif self.Current(2) == '||':
self.AddToken('||')
self.Advance(2)
elif self.Current(2) == '==':
self.AddToken('==')
self.Advance(2)
else:
return None
return self.tokens
class Scanner(object):
"""A simple scanner that can serve out tokens from a given list"""
def __init__(self, tokens):
self.tokens = tokens
self.length = len(tokens)
self.index = 0
def HasMore(self):
return self.index < self.length
def Current(self):
return self.tokens[self.index]
def Advance(self):
self.index = self.index + 1
def ParseAtomicExpression(scan):
if scan.Current() == "true":
scan.Advance()
return Constant(True)
elif scan.Current() == "false":
scan.Advance()
return Constant(False)
elif IsAlpha(scan.Current()):
name = scan.Current()
scan.Advance()
return Outcome(name.lower())
elif scan.Current() == '$':
scan.Advance()
if not IsAlpha(scan.Current()):
return None
name = scan.Current()
scan.Advance()
return Variable(name.lower())
elif scan.Current() == '(':
scan.Advance()
result = ParseLogicalExpression(scan)
if (not result) or (scan.Current() != ')'):
return None
scan.Advance()
return result
else:
return None
BINARIES = ['==']
def ParseOperatorExpression(scan):
left = ParseAtomicExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in BINARIES):
op = scan.Current()
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseConditionalExpression(scan):
left = ParseOperatorExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() == 'if'):
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left= Operation(left, 'if', right)
return left
LOGICALS = ["&&", "||", ","]
def ParseLogicalExpression(scan):
left = ParseConditionalExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in LOGICALS):
op = scan.Current()
scan.Advance()
right = ParseConditionalExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseCondition(expr):
"""Parses a logical expression into an Expression object"""
tokens = Tokenizer(expr).Tokenize()
if not tokens:
print "Malformed expression: '%s'" % expr
return None
scan = Scanner(tokens)
ast = ParseLogicalExpression(scan)
if not ast:
print "Malformed expression: '%s'" % expr
return None
if scan.HasMore():
print "Malformed expression: '%s'" % expr
return None
return ast
class ClassifiedTest(object):
def __init__(self, case, outcomes):
self.case = case
self.outcomes = outcomes
self.parallel = self.case.parallel
class Configuration(object):
"""The parsed contents of a configuration file"""
def __init__(self, sections, defs):
self.sections = sections
self.defs = defs
def ClassifyTests(self, cases, env):
sections = [s for s in self.sections if s.condition.Evaluate(env, self.defs)]
all_rules = reduce(list.__add__, [s.rules for s in sections], [])
unused_rules = set(all_rules)
result = [ ]
all_outcomes = set([])
for case in cases:
matches = [ r for r in all_rules if r.Contains(case.path) ]
outcomes = set([])
for rule in matches:
outcomes = outcomes.union(rule.GetOutcomes(env, self.defs))
unused_rules.discard(rule)
if not outcomes:
outcomes = [PASS]
case.outcomes = outcomes
all_outcomes = all_outcomes.union(outcomes)
result.append(ClassifiedTest(case, outcomes))
return (result, list(unused_rules), all_outcomes)
class Section(object):
"""A section of the configuration file. Sections are enabled or
disabled prior to running the tests, based on their conditions"""
def __init__(self, condition):
self.condition = condition
self.rules = [ ]
def AddRule(self, rule):
self.rules.append(rule)
class Rule(object):
"""A single rule that specifies the expected outcome for a single
test."""
def __init__(self, raw_path, path, value):
self.raw_path = raw_path
self.path = path
self.value = value
def GetOutcomes(self, env, defs):
set = self.value.GetOutcomes(env, defs)
assert isinstance(set, ListSet)
return set.elms
def Contains(self, path):
if len(self.path) > len(path):
return False
for i in xrange(len(self.path)):
if not self.path[i].match(path[i]):
return False
return True
HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$')
def ReadConfigurationInto(path, sections, defs):
current_section = Section(Constant(True))
sections.append(current_section)
prefix = []
for line in utils.ReadLinesFrom(path):
header_match = HEADER_PATTERN.match(line)
if header_match:
condition_str = header_match.group(1).strip()
condition = ParseCondition(condition_str)
new_section = Section(condition)
sections.append(new_section)
current_section = new_section
continue
rule_match = RULE_PATTERN.match(line)
if rule_match:
path = prefix + SplitPath(rule_match.group(1).strip())
value_str = rule_match.group(2).strip()
value = ParseCondition(value_str)
if not value:
return False
current_section.AddRule(Rule(rule_match.group(1), path, value))
continue
def_match = DEF_PATTERN.match(line)
if def_match:
name = def_match.group(1).lower()
value = ParseCondition(def_match.group(2).strip())
if not value:
return False
defs[name] = value
continue
prefix_match = PREFIX_PATTERN.match(line)
if prefix_match:
prefix = SplitPath(prefix_match.group(1).strip())
continue
print "Malformed line: '%s'." % line
return False
return True
# ---------------
# --- M a i n ---
# ---------------
ARCH_GUESS = utils.GuessArchitecture()
def BuildOptions():
result = optparse.OptionParser()
result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)",
default='release')
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option('--logfile', dest='logfile',
help='write test output to file. NOTE: this only applies the tap progress indicator')
result.add_option("-S", dest="scons_flags", help="Flag to pass through to scons",
default=[], action="append")
result.add_option("-p", "--progress",
help="The style of progress indicator (verbose, dots, color, mono, tap)",
choices=PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--no-build", help="Don't build requirements",
default=True, action="store_true")
result.add_option("--build-only", help="Only build requirements, don't run the tests",
default=False, action="store_true")
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite",
default=[], action="append")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default=60, type="int")
result.add_option("--arch", help='The architecture to run tests for',
default='none')
result.add_option("--snapshot", help="Run the tests with snapshot turned on",
default=False, action="store_true")
result.add_option("--special-command", default=None)
result.add_option("--node-args", dest="node_args", help="Args to pass through to Node",
default=[], action="append")
result.add_option("--expect-fail", dest="expect_fail",
help="Expect test cases to fail", default=False, action="store_true")
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--flaky-tests",
help="Regard tests marked as flaky (run|skip|dontcare)",
default="run")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=1, type="int")
result.add_option("-J", help="Run tasks in parallel on all cores",
default=False, action="store_true")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--shell", help="Path to V8 shell", default="shell")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
result.add_option("--no-store-unexpected-output",
help="Deletes the temporary JS files from tests that fails",
dest="store_unexpected_output", action="store_false")
result.add_option("-r", "--run",
help="Divide the tests in m groups (interleaved) and run tests from group n (--run=n,m with n < m)",
default="")
result.add_option('--temp-dir',
help='Optional path to change directory used for tests', default=False)
result.add_option('--repeat',
help='Number of times to repeat given tests',
default=1, type="int")
return result
def ProcessOptions(options):
global VERBOSE
VERBOSE = options.verbose
options.arch = options.arch.split(',')
options.mode = options.mode.split(',')
options.run = options.run.split(',')
if options.run == [""]:
options.run = None
elif len(options.run) != 2:
print "The run argument must be two comma-separated integers."
return False
else:
try:
options.run = map(int, options.run)
except ValueError:
print "Could not parse the integers from the run argument."
return False
if options.run[0] < 0 or options.run[1] < 0:
print "The run argument cannot have negative integers."
return False
if options.run[0] >= options.run[1]:
print "The test group to run (n) must be smaller than number of groups (m)."
return False
if options.J:
# inherit JOBS from environment if provided. some virtualised systems
# tends to exaggerate the number of available cpus/cores.
cores = os.environ.get('JOBS')
options.j = int(cores) if cores is not None else multiprocessing.cpu_count()
if options.flaky_tests not in ["run", "skip", "dontcare"]:
print "Unknown flaky-tests mode %s" % options.flaky_tests
return False
return True
REPORT_TEMPLATE = """\
Total: %(total)i tests
* %(skipped)4d tests will be skipped
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
* %(fail)4d tests are expected to fail that we should fix\
"""
def PrintReport(cases):
def IsFailOk(o):
return (len(o) == 2) and (FAIL in o) and (OKAY in o)
unskipped = [c for c in cases if not SKIP in c.outcomes]
print REPORT_TEMPLATE % {
'total': len(cases),
'skipped': len(cases) - len(unskipped),
'pass': len([t for t in unskipped if list(t.outcomes) == [PASS]]),
'fail_ok': len([t for t in unskipped if IsFailOk(t.outcomes)]),
'fail': len([t for t in unskipped if list(t.outcomes) == [FAIL]])
}
class Pattern(object):
def __init__(self, pattern):
self.pattern = pattern
self.compiled = None
def match(self, str):
if not self.compiled:
pattern = "^" + self.pattern.replace('*', '.*') + "$"
self.compiled = re.compile(pattern)
return self.compiled.match(str)
def __str__(self):
return self.pattern
def SplitPath(s):
stripped = [ c.strip() for c in s.split('/') ]
return [ Pattern(s) for s in stripped if len(s) > 0 ]
def GetSpecialCommandProcessor(value):
if (not value) or (value.find('@') == -1):
def ExpandCommand(args):
return args
return ExpandCommand
else:
pos = value.find('@')
import urllib
prefix = urllib.unquote(value[:pos]).split()
suffix = urllib.unquote(value[pos+1:]).split()
def ExpandCommand(args):
return prefix + args + suffix
return ExpandCommand
BUILT_IN_TESTS = [
'sequential',
'parallel',
'pummel',
'message',
'internet',
'addons',
'gc',
'debugger',
'doctool',
]
def GetSuites(test_root):
def IsSuite(path):
return isdir(path) and exists(join(path, 'testcfg.py'))
return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
def FormatTime(d):
millis = round(d * 1000) % 1000
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
ch = logging.StreamHandler(sys.stdout)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
if options.logfile:
fh = logging.FileHandler(options.logfile, mode='wb')
logger.addHandler(fh)
workspace = abspath(join(dirname(sys.argv[0]), '..'))
suites = GetSuites(join(workspace, 'test'))
repositories = [TestRepository(join(workspace, 'test', name)) for name in suites]
repositories += [TestRepository(a) for a in options.suite]
root = LiteralTestSuite(repositories)
if len(args) == 0:
paths = [SplitPath(t) for t in BUILT_IN_TESTS]
else:
paths = [ ]
for arg in args:
path = SplitPath(arg)
paths.append(path)
# Check for --valgrind option. If enabled, we overwrite the special
# command flag with a command that uses the run-valgrind.py script.
if options.valgrind:
run_valgrind = join(workspace, "tools", "run-valgrind.py")
options.special_command = "python -u " + run_valgrind + " @"
shell = abspath(options.shell)
buildspace = dirname(shell)
processor = GetSpecialCommandProcessor(options.special_command)
context = Context(workspace,
buildspace,
VERBOSE,
shell,
options.node_args,
options.expect_fail,
options.timeout,
processor,
options.suppress_dialogs,
options.store_unexpected_output,
options.repeat)
# First build the required targets
if not options.no_build:
reqs = [ ]
for path in paths:
reqs += root.GetBuildRequirements(path, context)
reqs = list(set(reqs))
if len(reqs) > 0:
if options.j != 1:
options.scons_flags += ['-j', str(options.j)]
if not BuildRequirements(context, reqs, options.mode, options.scons_flags):
return 1
# Just return if we are only building the targets for running the tests.
if options.build_only:
return 0
# Get status for tests
sections = [ ]
defs = { }
root.GetTestStatus(context, sections, defs)
config = Configuration(sections, defs)
# List the tests
all_cases = [ ]
all_unused = [ ]
unclassified_tests = [ ]
globally_unused_rules = None
for path in paths:
for arch in options.arch:
for mode in options.mode:
vm = context.GetVm(arch, mode)
if not exists(vm):
print "Can't find shell executable: '%s'" % vm
continue
# archEngineContext = Execute([vm, "-p", "process.arch"], context)
# vmArch = archEngineContext.stdout.rstrip()
# if archEngineContext.exit_code is not 0 or vmArch == "undefined":
# print "Can't determine the arch of: '%s'" % vm
# print archEngineContext.stderr.rstrip()
# continue
vmArch = 'ia32'
env = {
'mode': mode,
'system': utils.GuessOS(),
'arch': vmArch,
}
test_list = root.ListTests([], path, context, arch, mode)
unclassified_tests += test_list
(cases, unused_rules, all_outcomes) = (
config.ClassifyTests(test_list, env))
if globally_unused_rules is None:
globally_unused_rules = set(unused_rules)
else:
globally_unused_rules = (
globally_unused_rules.intersection(unused_rules))
all_cases += cases
all_unused.append(unused_rules)
if options.cat:
visited = set()
for test in unclassified_tests:
key = tuple(test.path)
if key in visited:
continue
visited.add(key)
print "--- begin source: %s ---" % test.GetLabel()
source = test.GetSource().strip()
print source
print "--- end source: %s ---" % test.GetLabel()
return 0
if options.warn_unused:
for rule in globally_unused_rules:
print "Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path])
tempdir = os.environ.get('NODE_TEST_DIR') or options.temp_dir
if tempdir:
try:
os.makedirs(tempdir)
os.environ['NODE_TEST_DIR'] = tempdir
except OSError as exception:
if exception.errno != errno.EEXIST:
print "Could not create the temporary directory", options.temp_dir
sys.exit(1)
if options.report:
PrintReport(all_cases)
result = None
def DoSkip(case):
if SKIP in case.outcomes or SLOW in case.outcomes:
return True
return FLAKY in case.outcomes and options.flaky_tests == SKIP
cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
if options.run is not None:
# Must ensure the list of tests is sorted before selecting, to avoid
# silent errors if this file is changed to list the tests in a way that
# can be different in different machines
cases_to_run.sort(key=lambda c: (c.case.arch, c.case.mode, c.case.file))
cases_to_run = [ cases_to_run[i] for i
in xrange(options.run[0],
len(cases_to_run),
options.run[1]) ]
if len(cases_to_run) == 0:
print "No tests to run."
return 1
else:
try:
start = time.time()
if RunTestCases(cases_to_run, options.progress, options.j, options.flaky_tests):
result = 0
else:
result = 1
duration = time.time() - start
except KeyboardInterrupt:
print "Interrupted"
return 1
if options.time:
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ]
timed_tests.sort(lambda a, b: a.CompareTime(b))
index = 1
for entry in timed_tests[:20]:
t = FormatTime(entry.duration)
sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
index += 1
return result
if __name__ == '__main__':
sys.exit(Main())
|
pc-client.py | '''
Visualization for compressed image coefficients
Program will obtain coefficient data from TCP and display the decompressed image
Author: Brytni Richards
Last modified: 2021-03-31
Required:
pip install matplotlib
python -m pip install windows-curses
'''
# ============================================================================
# IMPORTS
# ============================================================================
import curses
import logging
import queue
import socket
import threading
import struct
import copy
import numpy as np
import matplotlib
#matplotlib.use('Qt4Agg') # For Mac
import matplotlib.pyplot as plt
import time
from dct_alg_util import get_decompressed_block
# TCP FPGA mirror server port and ip adress
host = '1.1.5.2'
tcp_port = 7
# For curses writing synchronization
lock = threading.Lock()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Image size requirements
img_size = 256
img = np.zeros((img_size,img_size))
# ============================================================================
# SOCKET AND MESSAGE CLASSES
# ============================================================================
rx_q = queue.Queue(maxsize = 2048)
rx_q_telem = queue.Queue(maxsize = 2048)
class rx_data_msg:
'''For image data'''
# args:
# int msg_type
# int msg_length
# tuple data
# Initialize with relevant values to store into the queue
def __init__(self, msg_type, msg_length, data):
self.msg_type = msg_type
self.msg_length = msg_length
self.data = copy.deepcopy(data)
class rx_telem_msg:
'''For telemetry data'''
# args:
# int msg_type
# int msg_length
# int data
def __init__(self, msg_type, msg_length, data):
self.msg_type = msg_type
self.msg_length = msg_length
self.data = data
# Message purpose
def msg_str(self):
if self.msg_type == 1:
return 'Bandwidth Telemetry: '
if self.msg_type == 2:
return 'Compression Ratio: '
return 'Invalid Telemetry Message: ' + str(self.msg_type)
def main_rx(rx_port):
''' Main entrypoint for the python RX
Args:
rx_port (int): Port to open on the socket
'''
global rx_q
# socket setup
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((host, rx_port))
logger.info("Connected to host: "+str(host)+" port: "+str(rx_port)+"\r")
while True:
# Required handshaking prompt
logger.info("Sending request...")
s.sendall(b"S")
logger.info('Listening for message ...\r')
# Receive data from server
# first 2 bytes are message type
try:
type_bytes = s.recv(2)
except:
lock.acquire()
logger.info('TCP socket closed\r')
lock.release()
break
lock.acquire()
if not type_bytes:
logger.info('TCP socket closed\r')
lock.release()
break
msg_type = struct.unpack('>BB', type_bytes)
msg_type = msg_type[1]
logger.info("Received message type "+str(msg_type)+"\r")
# collect the entire message
recv_msg = b''
if (msg_type == 0) or (msg_type == 1) or (msg_type == 2):
# second 2 bytes are message length
[msg_len,] = struct.unpack('>H', s.recv(2))
logger.info("Received message length "+str(msg_len)+"\r")
while True:
# Each TCP message has a total of 130 bytes, but only msg_len bytes are used
recv = s.recv(130 - len(recv_msg))
if not recv:
logger.info('TCP socket closed earlier than expected\r')
break
recv_msg += recv
if len(recv_msg) == 130:
# Store image data as halfwords
if (msg_type == 0):
num_halfwords = int(len(recv_msg)/2)
form_str = '>' + 'H'*num_halfwords
# Store telemetry data
else:
form_str = '>'+'B'*130
recv_msg = struct.unpack(form_str, recv_msg)
logger.info("Data message: "+str(recv_msg)+"\r")
break
# Add data into queue for processing
if (msg_type == 0):
data_msg = rx_data_msg(msg_type, msg_len, recv_msg[0:int(msg_len/2)])
rx_q.put(data_msg, block=True, timeout=5)
# Add telemetry data into queue
if (msg_type == 1) or (msg_type == 2):
# Concatenate telemetry byte data into one integer
int_data = int.from_bytes(recv_msg[0:msg_len], "little")
telem_msg = rx_telem_msg(msg_type, msg_len, int_data)
rx_q_telem.put(telem_msg, block=True, timeout=5)
lock.release()
def main_display(stdscr):
# For color
curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)
stdscr.bkgd(' ', curses.color_pair(1) | curses.A_BOLD)
stdscr.clear()
stdscr.addstr("Showing Telemetry Data\n", curses.color_pair(1))
stdscr.addstr("-"*80 + "\n\n\r")
stdscr.refresh()
while True:
# Wait for telemetry data
rx_msg = rx_q_telem.get()
lock.acquire()
# Show new telemetry data
display_msg = str(rx_msg.msg_str()) + str(rx_msg.data) + '\n\r'
stdscr.addstr(display_msg, curses.color_pair(1))
stdscr.refresh()
lock.release()
curses.napms(100)
def main_display_no_curses():
print("Showing Telemetry Data\n")
print("-"*80 + "\n\n\r")
while True:
# Wait for telemetry data
rx_msg = rx_q_telem.get()
lock.acquire()
# Show new telemetry data
display_msg = str(rx_msg.msg_str()) + str(rx_msg.data) + '\n\r'
print(display_msg)
lock.release()
time.sleep(1)
def main():
global rx_q
global rx_q_telem
# Thread to obtain TCP data
rx_t = threading.Thread(target=main_rx, args=(tcp_port,))
rx_t.start()
# Thread to run curses terminal - comment these lines to stop using curses
disp_t = threading.Thread(target=curses.wrapper, args=(main_display,))
disp_t.start()
# Uncomment these lines to use the terminal with no curses library instead
# disp_t = threading.Thread(target=main_display_no_curses)
# disp_t.start()
# Show coefficient visuals - matplotlib doesn't work inside threads
i = 0
block_size = 8
total_rows = int(img_size/block_size)
while True:
rx_msg = rx_q.get()
# process the coefficient into decompressed image data
decomp_img_data = get_decompressed_block(rx_msg.data)
# reconstruct image from decompressed data
y = int((i%total_rows)*block_size)
x = int(int(i/total_rows)*block_size)
img[y:(y+block_size), x:(x+block_size)] = decomp_img_data
i += 1
if (i == total_rows*total_rows):
break
# Show decompressed image
plt.title("Decompressed image")
plt.imshow(img)
plt.show()
if __name__ == "__main__":
main()
|
invoker.py | #
# (C) Copyright IBM Corp. 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import json
import pika
import time
import logging
import random
from threading import Thread
from types import SimpleNamespace
from multiprocessing import Process, Queue, Value
from concurrent.futures import ThreadPoolExecutor
from lithops.compute import Compute
from lithops.version import __version__
from lithops.future import ResponseFuture
from lithops.config import extract_storage_config, extract_compute_config
from lithops.utils import version_str, is_lithops_function, is_unix_system
logger = logging.getLogger(__name__)
REMOTE_INVOKER_MEMORY = 2048
INVOKER_PROCESSES = 2
class FunctionInvoker:
"""
Module responsible to perform the invocations against the compute backend
"""
def __init__(self, config, executor_id, internal_storage):
self.log_active = logger.getEffectiveLevel() != logging.WARNING
self.config = config
self.executor_id = executor_id
self.storage_config = extract_storage_config(self.config)
self.internal_storage = internal_storage
self.compute_config = extract_compute_config(self.config)
self.is_lithops_function = is_lithops_function()
self.invokers = []
self.remote_invoker = self.config['lithops'].get('remote_invoker', False)
self.workers = self.config['lithops'].get('workers')
logger.debug('ExecutorID {} - Total available workers: {}'
.format(self.executor_id, self.workers))
self.compute_handlers = []
cb = self.compute_config['backend']
regions = self.compute_config[cb].get('region')
if regions and type(regions) == list:
for region in regions:
compute_config = self.compute_config.copy()
compute_config[cb]['region'] = region
compute_handler = Compute(compute_config)
self.compute_handlers.append(compute_handler)
else:
compute_handler = Compute(self.compute_config)
self.compute_handlers.append(compute_handler)
logger.debug('ExecutorID {} - Creating function invoker'.format(self.executor_id))
self.token_bucket_q = Queue()
self.pending_calls_q = Queue()
self.running_flag = Value('i', 0)
self.ongoing_activations = 0
self.job_monitor = JobMonitor(self.config, self.internal_storage, self.token_bucket_q)
def dismantle(self):
for compute_handler in self.compute_handlers:
compute_handler.dismantle()
def select_runtime(self, job_id, runtime_memory):
"""
Auxiliary method that selects the runtime to use. To do so it gets the
runtime metadata from the storage. This metadata contains the preinstalled
python modules needed to serialize the local function. If the .metadata
file does not exists in the storage, this means that the runtime is not
installed, so this method will proceed to install it.
"""
runtime_name = self.config['lithops']['runtime']
if runtime_memory is None:
runtime_memory = self.config['lithops']['runtime_memory']
if runtime_memory:
runtime_memory = int(runtime_memory)
log_msg = ('ExecutorID {} | JobID {} - Selected Runtime: {} - {}MB'
.format(self.executor_id, job_id, runtime_name, runtime_memory))
else:
log_msg = ('ExecutorID {} | JobID {} - Selected Runtime: {}'
.format(self.executor_id, job_id, runtime_name))
logger.info(log_msg)
if not self.log_active:
print(log_msg, end=' ')
installing = False
for compute_handler in self.compute_handlers:
runtime_key = compute_handler.get_runtime_key(runtime_name, runtime_memory)
runtime_deployed = True
try:
runtime_meta = self.internal_storage.get_runtime_meta(runtime_key)
except Exception:
runtime_deployed = False
if not runtime_deployed:
logger.debug('ExecutorID {} | JobID {} - Runtime {} with {}MB is not yet '
'installed'.format(self.executor_id, job_id, runtime_name, runtime_memory))
if not self.log_active and not installing:
installing = True
print('(Installing...)')
timeout = self.config['lithops']['runtime_timeout']
logger.debug('Creating runtime: {}, memory: {}MB'.format(runtime_name, runtime_memory))
runtime_meta = compute_handler.create_runtime(runtime_name, runtime_memory, timeout=timeout)
self.internal_storage.put_runtime_meta(runtime_key, runtime_meta)
py_local_version = version_str(sys.version_info)
py_remote_version = runtime_meta['python_ver']
if py_local_version != py_remote_version:
raise Exception(("The indicated runtime '{}' is running Python {} and it "
"is not compatible with the local Python version {}")
.format(runtime_name, py_remote_version, py_local_version))
if not self.log_active and runtime_deployed:
print()
return runtime_meta
def _start_invoker_process(self):
"""
Starts the invoker process responsible to spawn pending calls in background
"""
if self.is_lithops_function or not is_unix_system():
for inv_id in range(INVOKER_PROCESSES):
p = Thread(target=self._run_invoker_process, args=(inv_id, ))
self.invokers.append(p)
p.daemon = True
p.start()
else:
for inv_id in range(INVOKER_PROCESSES):
p = Process(target=self._run_invoker_process, args=(inv_id, ))
self.invokers.append(p)
p.daemon = True
p.start()
def _run_invoker_process(self, inv_id):
"""
Run process that implements token bucket scheduling approach
"""
logger.debug('ExecutorID {} - Invoker process {} started'.format(self.executor_id, inv_id))
with ThreadPoolExecutor(max_workers=250) as executor:
while True:
try:
self.token_bucket_q.get()
job, call_id = self.pending_calls_q.get()
except KeyboardInterrupt:
break
if self.running_flag.value:
executor.submit(self._invoke, job, call_id)
else:
break
logger.debug('ExecutorID {} - Invoker process {} finished'.format(self.executor_id, inv_id))
def stop(self):
"""
Stop the invoker process
"""
if self.invokers:
logger.debug('ExecutorID {} - Stopping invoker'.format(self.executor_id))
self.running_flag.value = 0
for invoker in self.invokers:
self.token_bucket_q.put('#')
self.pending_calls_q.put((None, None))
while not self.pending_calls_q.empty():
try:
self.pending_calls_q.get(False)
except Exception:
pass
self.invokers = []
def _invoke(self, job, call_id):
"""
Method used to perform the actual invocation against the Compute Backend
"""
payload = {'config': self.config,
'log_level': logging.getLevelName(logger.getEffectiveLevel()),
'func_key': job.func_key,
'data_key': job.data_key,
'extra_env': job.extra_env,
'execution_timeout': job.execution_timeout,
'data_byte_range': job.data_ranges[int(call_id)],
'executor_id': job.executor_id,
'job_id': job.job_id,
'call_id': call_id,
'host_submit_tstamp': time.time(),
'lithops_version': __version__,
'runtime_name': job.runtime_name,
'runtime_memory': job.runtime_memory}
# do the invocation
start = time.time()
compute_handler = random.choice(self.compute_handlers)
activation_id = compute_handler.invoke(job.runtime_name, job.runtime_memory, payload)
roundtrip = time.time() - start
resp_time = format(round(roundtrip, 3), '.3f')
if not activation_id:
# reached quota limit
time.sleep(random.randint(0, 5))
self.pending_calls_q.put((job, call_id))
self.token_bucket_q.put('#')
return
logger.info('ExecutorID {} | JobID {} - Function call {} done! ({}s) - Activation'
' ID: {}'.format(job.executor_id, job.job_id, call_id, resp_time, activation_id))
return call_id
def _invoke_remote(self, job_description):
"""
Method used to send a job_description to the remote invoker
"""
start = time.time()
compute_handler = random.choice(self.compute_handlers)
job = SimpleNamespace(**job_description)
payload = {'config': self.config,
'log_level': logging.getLevelName(logger.getEffectiveLevel()),
'executor_id': job.executor_id,
'job_id': job.job_id,
'job_description': job_description,
'remote_invoker': True,
'invokers': 4,
'lithops_version': __version__}
activation_id = compute_handler.invoke(job.runtime_name, REMOTE_INVOKER_MEMORY, payload)
roundtrip = time.time() - start
resp_time = format(round(roundtrip, 3), '.3f')
if activation_id:
logger.info('ExecutorID {} | JobID {} - Remote invoker call done! ({}s) - Activation'
' ID: {}'.format(job.executor_id, job.job_id, resp_time, activation_id))
else:
raise Exception('Unable to spawn remote invoker')
def run(self, job_description):
"""
Run a job described in job_description
"""
job = SimpleNamespace(**job_description)
try:
while True:
self.token_bucket_q.get_nowait()
self.ongoing_activations -= 1
except Exception:
pass
if self.remote_invoker:
old_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
self.select_runtime(job.job_id, REMOTE_INVOKER_MEMORY)
sys.stdout = old_stdout
log_msg = ('ExecutorID {} | JobID {} - Starting remote function invocation: {}() '
'- Total: {} activations'.format(job.executor_id, job.job_id,
job.function_name, job.total_calls))
logger.info(log_msg)
if not self.log_active:
print(log_msg)
th = Thread(target=self._invoke_remote, args=(job_description,))
th.daemon = True
th.start()
time.sleep(0.1)
else:
try:
if self.running_flag.value == 0:
self.ongoing_activations = 0
self.running_flag.value = 1
self._start_invoker_process()
log_msg = ('ExecutorID {} | JobID {} - Starting function invocation: {}() - Total: {} '
'activations'.format(job.executor_id, job.job_id, job.function_name, job.total_calls))
logger.info(log_msg)
if not self.log_active:
print(log_msg)
if self.ongoing_activations < self.workers:
callids = range(job.total_calls)
total_direct = self.workers-self.ongoing_activations
callids_to_invoke_direct = callids[:total_direct]
callids_to_invoke_nondirect = callids[total_direct:]
self.ongoing_activations += len(callids_to_invoke_direct)
logger.debug('ExecutorID {} | JobID {} - Free workers: {} - Going to invoke {} function activations'
.format(job.executor_id, job.job_id, total_direct, len(callids_to_invoke_direct)))
call_futures = []
executor = ThreadPoolExecutor(max_workers=job.invoke_pool_threads)
for i in callids_to_invoke_direct:
call_id = "{:05d}".format(i)
future = executor.submit(self._invoke, job, call_id)
call_futures.append(future)
time.sleep(0.1)
# Put into the queue the rest of the callids to invoke within the process
if callids_to_invoke_nondirect:
logger.debug('ExecutorID {} | JobID {} - Putting remaining {} function invocations into pending queue'
.format(job.executor_id, job.job_id, len(callids_to_invoke_nondirect)))
for i in callids_to_invoke_nondirect:
call_id = "{:05d}".format(i)
self.pending_calls_q.put((job, call_id))
else:
logger.debug('ExecutorID {} | JobID {} - Ongoing activations reached {} workers, '
'putting {} function invocations into pending queue'
.format(job.executor_id, job.job_id, self.workers, job.total_calls))
for i in range(job.total_calls):
call_id = "{:05d}".format(i)
self.pending_calls_q.put((job, call_id))
self.job_monitor.start_job_monitoring(job)
except (KeyboardInterrupt, Exception) as e:
self.stop()
raise e
# Create all futures
futures = []
for i in range(job.total_calls):
call_id = "{:05d}".format(i)
fut = ResponseFuture(call_id, job_description, job.metadata.copy(), self.storage_config)
fut._set_state(ResponseFuture.State.Invoked)
futures.append(fut)
return futures
class JobMonitor:
def __init__(self, lithops_config, internal_storage, token_bucket_q):
self.config = lithops_config
self.internal_storage = internal_storage
self.token_bucket_q = token_bucket_q
self.is_lithops_function = is_lithops_function()
self.monitors = []
self.rabbitmq_monitor = self.config['lithops'].get('rabbitmq_monitor', False)
if self.rabbitmq_monitor:
self.rabbit_amqp_url = self.config['rabbitmq'].get('amqp_url')
def get_active_jobs(self):
active_jobs = 0
for job_monitor_th in self.monitors:
if job_monitor_th.is_alive():
active_jobs += 1
return active_jobs
def start_job_monitoring(self, job):
logger.debug('ExecutorID {} | JobID {} - Starting job monitoring'.format(job.executor_id, job.job_id))
if self.rabbitmq_monitor:
th = Thread(target=self._job_monitoring_rabbitmq, args=(job,))
else:
th = Thread(target=self._job_monitoring_os, args=(job,))
if not self.is_lithops_function:
th.daemon = True
th.start()
self.monitors.append(th)
def _job_monitoring_os(self, job):
total_callids_done_in_job = 0
time.sleep(1)
while total_callids_done_in_job < job.total_calls:
callids_running_in_job, callids_done_in_job = self.internal_storage.get_job_status(job.executor_id, job.job_id)
total_new_tokens = len(callids_done_in_job) - total_callids_done_in_job
total_callids_done_in_job = total_callids_done_in_job + total_new_tokens
for i in range(total_new_tokens):
self.token_bucket_q.put('#')
time.sleep(0.3)
def _job_monitoring_rabbitmq(self, job):
total_callids_done_in_job = 0
exchange = 'lithops-{}-{}'.format(job.executor_id, job.job_id)
queue_1 = '{}-1'.format(exchange)
params = pika.URLParameters(self.rabbit_amqp_url)
connection = pika.BlockingConnection(params)
channel = connection.channel()
def callback(ch, method, properties, body):
nonlocal total_callids_done_in_job
call_status = json.loads(body.decode("utf-8"))
if call_status['type'] == '__end__':
self.token_bucket_q.put('#')
total_callids_done_in_job += 1
if total_callids_done_in_job == job.total_calls:
ch.stop_consuming()
channel.basic_consume(callback, queue=queue_1, no_ack=True)
channel.start_consuming()
|
detector_utils.py | # Utilities for object detector.
import numpy as np
import sys
import tensorflow as tf
import os
from threading import Thread
from datetime import datetime
import cv2
from utils import label_map_util
from collections import defaultdict
detection_graph = tf.Graph()
sys.path.append("..")
# score threshold for showing bounding boxes.
_score_thresh = 0.27
#MODEL_NAME = 'hand_inference_graph'
MODEL_NAME = 'handtracking/hand_inference_graph'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join(os.getcwd(), MODEL_NAME, 'hand_label_map.pbtxt')
#PATH_TO_LABELS = os.path.join(MODEL_NAME, 'hand_label_map.pbtxt')
NUM_CLASSES = 1
# load label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load a frozen infrerence graph into memory
def load_inference_graph():
# load frozen tensorflow model into memory
print("> ====== loading HAND frozen graph into memory")
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
print("> ====== Hand Inference graph loaded.")
return detection_graph, sess
# draw the detected bounding boxes on the images
# You can modify this to also draw a label.
def draw_box_on_image(num_hands_detect, score_thresh, scores, boxes, im_width, im_height, image_np):
for i in range(num_hands_detect):
if (scores[i] > score_thresh):
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
# Show fps value on image.
def draw_fps_on_image(fps, image_np):
cv2.putText(image_np, fps, (20, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (77, 255, 9), 2)
# Actual detection .. generate scores and bounding boxes given an image
def detect_objects(image_np, detection_graph, sess):
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name(
'detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name(
'detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores,
detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
return np.squeeze(boxes), np.squeeze(scores)
# Code to thread reading camera input.
# Source : Adrian Rosebrock
# https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def size(self):
# return size of the capture device
return self.stream.get(3), self.stream.get(4)
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
statistic.py | from multiprocessing import Process
def send_statistic(token, uid, message, intent, user_type=None):
try:
# Сообщение от бота
if user_type is not None:
pass
# Сообщение от пользователя
else:
if intent != 'unknown':
pass
else:
pass
return ''
except:
pass
def track(token, uid, message, intent, user_type=None):
Process(target=send_statistic, args=(token, uid, message, intent, user_type)).start()
|
test_kudu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from kudu.schema import (
BOOL,
DOUBLE,
FLOAT,
INT16,
INT32,
INT64,
INT8,
SchemaBuilder,
STRING,
BINARY,
UNIXTIME_MICROS)
from kudu.client import Partitioning
import logging
import pytest
import random
import textwrap
import threading
import time
from datetime import datetime
from pytz import utc
from tests.common.kudu_test_suite import KuduTestSuite
from tests.common.impala_cluster import ImpalaCluster
from tests.common.skip import SkipIfNotHdfsMinicluster
from tests.verifiers.metric_verifier import MetricVerifier
KUDU_MASTER_HOSTS = pytest.config.option.kudu_master_hosts
LOG = logging.getLogger(__name__)
class TestKuduOperations(KuduTestSuite):
"""
This suite tests the different modification operations when using a kudu table.
"""
def test_out_of_range_timestamps(self, vector, cursor, kudu_client, unique_database):
"""Test timestamp values that are outside of Impala's supported date range."""
cursor.execute("""CREATE TABLE %s.times (a INT PRIMARY KEY, ts TIMESTAMP)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "times"))
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "times"))
session = kudu_client.new_session()
session.apply(table.new_insert((0, datetime(1987, 5, 19, 0, 0, tzinfo=utc))))
# Add a date before 1400
session.apply(table.new_insert((1, datetime(1300, 1, 1, 0, 0, tzinfo=utc))))
# TODO: Add a date after 9999. There isn't a way to represent a date greater than
# 9999 in Python datetime.
#session.apply(table.new_insert((2, datetime(12000, 1, 1, 0, 0, tzinfo=utc))))
session.flush()
# TODO: The test driver should have a way to specify query options in an 'options'
# section rather than having to split abort_on_error cases into separate files.
vector.get_value('exec_option')['abort_on_error'] = 0
self.run_test_case('QueryTest/kudu-overflow-ts', vector,
use_db=unique_database)
vector.get_value('exec_option')['abort_on_error'] = 1
self.run_test_case('QueryTest/kudu-overflow-ts-abort-on-error', vector,
use_db=unique_database)
def test_kudu_scan_node(self, vector, unique_database):
self.run_test_case('QueryTest/kudu-scan-node', vector, use_db=unique_database)
def test_kudu_insert(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_insert', vector, use_db=unique_database)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
def test_kudu_insert_mem_limit(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_insert_mem_limit', vector, use_db=unique_database)
def test_kudu_update(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_update', vector, use_db=unique_database)
def test_kudu_upsert(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_upsert', vector, use_db=unique_database)
def test_kudu_delete(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_delete', vector, use_db=unique_database)
def test_kudu_partition_ddl(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_partition_ddl', vector, use_db=unique_database)
@pytest.mark.skipif(pytest.config.option.testing_remote_cluster,
reason="Test references hardcoded hostnames: IMPALA-4873")
@pytest.mark.execute_serially
def test_kudu_alter_table(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_alter', vector, use_db=unique_database)
def test_kudu_stats(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_stats', vector, use_db=unique_database)
def test_kudu_describe(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_describe', vector, use_db=unique_database)
def test_kudu_limit(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_limit', vector, use_db=unique_database)
def test_kudu_column_options(self, cursor, kudu_client, unique_database):
"""Test Kudu column options"""
encodings = ["ENCODING PLAIN_ENCODING", ""]
compressions = ["COMPRESSION SNAPPY", ""]
nullability = ["NOT NULL", "NULL", ""]
defaults = ["DEFAULT 1", ""]
blocksizes = ["BLOCK_SIZE 32768", ""]
indx = 1
for encoding in encodings:
for compression in compressions:
for default in defaults:
for blocksize in blocksizes:
for nullable in nullability:
impala_tbl_name = "test_column_options_%s" % str(indx)
cursor.execute("""CREATE TABLE %s.%s (a INT PRIMARY KEY
%s %s %s %s, b INT %s %s %s %s %s) PARTITION BY HASH (a)
PARTITIONS 3 STORED AS KUDU""" % (unique_database, impala_tbl_name,
encoding, compression, default, blocksize, nullable, encoding,
compression, default, blocksize))
indx = indx + 1
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, impala_tbl_name))
def test_kudu_col_changed(self, cursor, kudu_client, unique_database):
"""Test changing a Kudu column outside of Impala results in a failure on read with
outdated metadata (IMPALA-4828)."""
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY, s STRING)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
# Load the table via the Kudu client and change col 's' to be a different type.
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.drop_column("s")
table = alterer.alter()
alterer = kudu_client.new_table_alterer(table)
alterer.add_column("s", "int32")
table = alterer.alter()
# Add some rows
session = kudu_client.new_session()
for i in range(100):
op = table.new_insert((i, i))
session.apply(op)
session.flush()
# Scanning should result in an error
try:
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert False
except Exception as e:
expected_error = "Column 's' is type INT but Impala expected STRING. The table "\
"metadata in Impala may be outdated and need to be refreshed."
assert expected_error in str(e)
# After a REFRESH the scan should succeed
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert len(cursor.fetchall()) == 100
def test_kudu_col_not_null_changed(self, cursor, kudu_client, unique_database):
"""Test changing a NOT NULL Kudu column outside of Impala results in a failure
on read with outdated metadata (IMPALA-4828)."""
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY, s STRING NOT NULL)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
# Load the table via the Kudu client and change col 's' to be a different type.
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.drop_column("s")
table = alterer.alter()
alterer = kudu_client.new_table_alterer(table)
alterer.add_column("s", "string", nullable=True)
table = alterer.alter()
# Add some rows
session = kudu_client.new_session()
for i in range(100):
op = table.new_insert((i, None))
session.apply(op)
session.flush()
# Scanning should result in an error
try:
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert False
except Exception as e:
expected_error = "Column 's' is nullable but Impala expected it to be "\
"not nullable. The table metadata in Impala may be outdated and need to be "\
"refreshed."
assert expected_error in str(e)
# After a REFRESH the scan should succeed
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert len(cursor.fetchall()) == 100
def test_kudu_col_null_changed(self, cursor, kudu_client, unique_database):
"""Test changing a NULL Kudu column outside of Impala results in a failure
on read with outdated metadata (IMPALA-4828)."""
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY, s STRING NULL)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
# Load the table via the Kudu client and change col 's' to be a different type.
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.drop_column("s")
table = alterer.alter()
alterer = kudu_client.new_table_alterer(table)
alterer.add_column("s", "string", nullable=False, default="bar")
table = alterer.alter()
# Add some rows
session = kudu_client.new_session()
for i in range(100):
op = table.new_insert((i, "foo"))
session.apply(op)
session.flush()
# Scanning should result in an error
try:
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert False
except Exception as e:
expected_error = "Column 's' is not nullable but Impala expected it to be "\
"nullable. The table metadata in Impala may be outdated and need to be "\
"refreshed."
assert expected_error in str(e)
# After a REFRESH the scan should succeed
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert len(cursor.fetchall()) == 100
def test_kudu_col_added(self, cursor, kudu_client, unique_database):
"""Test adding a Kudu column outside of Impala."""
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
# Load the table via the Kudu client and add a new col
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.add_column("b", "int32")
table = alterer.alter()
# Add some rows
session = kudu_client.new_session()
op = table.new_insert((0, 0))
session.apply(op)
session.flush()
# Only the first col is visible to Impala. Impala will not know about the missing
# column, so '*' is expanded to known columns. This doesn't have a separate check
# because the query can proceed and checking would need to fetch metadata from the
# Kudu master, which is what REFRESH is for.
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert cursor.fetchall() == [(0, )]
# After a REFRESH both cols should be visible
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert cursor.fetchall() == [(0, 0)]
def test_kudu_col_removed(self, cursor, kudu_client, unique_database):
"""Test removing a Kudu column outside of Impala."""
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY, s STRING)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
cursor.execute("insert into %s.foo values (0, 'foo')" % (unique_database))
# Load the table via the Kudu client and change col 's' to be a different type.
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.drop_column("s")
table = alterer.alter()
# Scanning should result in an error
try:
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
except Exception as e:
expected_error = "Column 's' not found in kudu table impala::test_kudu_col_removed"
assert expected_error in str(e)
# After a REFRESH the scan should succeed
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert cursor.fetchall() == [(0, )]
def test_kudu_show_unbounded_range_partition(self, cursor, kudu_client,
unique_database):
"""Check that a single unbounded range partition gets printed correctly."""
schema_builder = SchemaBuilder()
column_spec = schema_builder.add_column("id", INT64)
column_spec.nullable(False)
schema_builder.set_primary_keys(["id"])
schema = schema_builder.build()
name = unique_database + ".unbounded_range_table"
try:
kudu_client.create_table(name, schema,
partitioning=Partitioning().set_range_partition_columns(["id"]))
kudu_table = kudu_client.table(name)
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("SHOW RANGE PARTITIONS %s" % impala_table_name)
assert cursor.description == [
('RANGE (id)', 'STRING', None, None, None, None, None)]
assert cursor.fetchall() == [('UNBOUNDED',)]
finally:
if kudu_client.table_exists(name):
kudu_client.delete_table(name)
def test_column_storage_attributes(self, cursor, unique_database):
"""Tests that for every valid combination of column type, encoding, and compression,
we can insert a value and scan it back from Kudu."""
# This test takes about 2min and is unlikely to break, so only run it in exhaustive.
if self.exploration_strategy() != 'exhaustive':
pytest.skip("Only runs in exhaustive to reduce core time.")
table_name = "%s.storage_attrs" % unique_database
types = ['boolean', 'tinyint', 'smallint', 'int', 'bigint', 'float', 'double', \
'string', 'timestamp', 'decimal']
create_query = "create table %s (id int primary key" % table_name
for t in types:
create_query += ", %s_col %s" % (t, t)
create_query += ") partition by hash(id) partitions 16 stored as kudu"
cursor.execute(create_query)
encodings = ['AUTO_ENCODING', 'PLAIN_ENCODING', 'PREFIX_ENCODING', 'GROUP_VARINT', \
'RLE', 'DICT_ENCODING', 'BIT_SHUFFLE']
compressions = ['DEFAULT_COMPRESSION', 'NO_COMPRESSION', 'SNAPPY', 'LZ4', 'ZLIB']
i = 0
for e in encodings:
for c in compressions:
for t in types:
try:
cursor.execute("""alter table %s alter column %s_col
set encoding %s compression %s""" % (table_name, t, e, c))
except Exception as err:
assert "encoding %s not supported for type" % e in str(err)
cursor.execute("""insert into %s values (%s, true, 0, 0, 0, 0, 0, 0, '0',
cast('2009-01-01' as timestamp), cast(0 as decimal))""" % (table_name, i))
cursor.execute("select * from %s where id = %s" % (table_name, i))
assert cursor.fetchall() == \
[(i, True, 0, 0, 0, 0, 0.0, 0.0, '0', datetime(2009, 1, 1, 0, 0), 0)]
i += 1
cursor.execute("select count(*) from %s" % table_name)
print cursor.fetchall() == [(i, )]
def test_concurrent_schema_change(self, cursor, unique_database):
"""Tests that an insert into a Kudu table with a concurrent schema change either
succeeds or fails gracefully."""
table_name = "%s.test_schema_change" % unique_database
cursor.execute("""create table %s (col0 bigint primary key, col1 bigint)
partition by hash(col0) partitions 16 stored as kudu""" % table_name)
iters = 5
def insert_values():
threading.current_thread().errors = []
client = self.create_impala_client()
for i in range(0, iters):
time.sleep(random.random()) # sleeps for up to one second
try:
client.execute("insert into %s values (0, 0), (1, 1)" % table_name)
except Exception as e:
threading.current_thread().errors.append(e)
insert_thread = threading.Thread(target=insert_values)
insert_thread.start()
for i in range(0, iters):
time.sleep(random.random()) # sleeps for up to one second
cursor.execute("alter table %s drop column col1" % table_name)
if i % 2 == 0:
cursor.execute("alter table %s add columns (col1 string)" % table_name)
else:
cursor.execute("alter table %s add columns (col1 bigint)" % table_name)
insert_thread.join()
for error in insert_thread.errors:
msg = str(error)
# The first two are AnalysisExceptions, the next two come from KuduTableSink::Open()
# if the schema has changed since analysis, the last comes from the Kudu server if
# the schema changes between KuduTableSink::Open() and when the write ops are sent.
assert "has fewer columns (1) than the SELECT / VALUES clause returns (2)" in msg \
or "(type: TINYINT) is not compatible with column 'col1' (type: STRING)" in msg \
or "has fewer columns than expected." in msg \
or "Column col1 has unexpected type." in msg \
or "Client provided column col1[int64 NULLABLE] not present in tablet" in msg
class TestCreateExternalTable(KuduTestSuite):
def test_external_timestamp_default_value(self, cursor, kudu_client, unique_database):
"""Checks that a Kudu table created outside Impala with a default value on a
UNIXTIME_MICROS column can be loaded by Impala, and validates the DESCRIBE
output is correct."""
schema_builder = SchemaBuilder()
column_spec = schema_builder.add_column("id", INT64)
column_spec.nullable(False)
column_spec = schema_builder.add_column("ts", UNIXTIME_MICROS)
column_spec.default(datetime(2009, 1, 1, 0, 0, tzinfo=utc))
schema_builder.set_primary_keys(["id"])
schema = schema_builder.build()
name = unique_database + ".tsdefault"
try:
kudu_client.create_table(name, schema,
partitioning=Partitioning().set_range_partition_columns(["id"]))
kudu_table = kudu_client.table(name)
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("DESCRIBE %s" % impala_table_name)
table_desc = [[col.strip() if col else col for col in row] for row in cursor]
# Pytest shows truncated output on failure, so print the details just in case.
LOG.info(table_desc)
assert ["ts", "timestamp", "", "false", "true", "1230768000000000", \
"AUTO_ENCODING", "DEFAULT_COMPRESSION", "0"] in table_desc
finally:
if kudu_client.table_exists(name):
kudu_client.delete_table(name)
def test_implicit_table_props(self, cursor, kudu_client):
"""Check that table properties added internally during table creation are as
expected.
"""
with self.temp_kudu_table(kudu_client, [STRING, INT8, BOOL], num_key_cols=2) \
as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("DESCRIBE FORMATTED %s" % impala_table_name)
table_desc = [[col.strip() if col else col for col in row] for row in cursor]
LOG.info(table_desc)
# Pytest shows truncated output on failure, so print the details just in case.
assert ["", "EXTERNAL", "TRUE"] in table_desc
assert ["", "kudu.master_addresses", KUDU_MASTER_HOSTS] in table_desc
assert ["", "kudu.table_name", kudu_table.name] in table_desc
assert ["", "storage_handler", "com.cloudera.kudu.hive.KuduStorageHandler"] \
in table_desc
def test_col_types(self, cursor, kudu_client):
"""Check that a table can be created using all available column types."""
# TODO: Add DECIMAL when the Kudu python client supports decimal
kudu_types = [STRING, BOOL, DOUBLE, FLOAT, INT16, INT32, INT64, INT8]
with self.temp_kudu_table(kudu_client, kudu_types) as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("DESCRIBE %s" % impala_table_name)
kudu_schema = kudu_table.schema
for i, (col_name, col_type, _, _, _, _, _, _, _) in enumerate(cursor):
kudu_col = kudu_schema[i]
assert col_name == kudu_col.name
assert col_type.upper() == \
self.kudu_col_type_to_impala_col_type(kudu_col.type.type)
def test_unsupported_binary_col(self, cursor, kudu_client):
"""Check that external tables with BINARY columns fail gracefully.
"""
with self.temp_kudu_table(kudu_client, [INT32, BINARY]) as kudu_table:
impala_table_name = self.random_table_name()
try:
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (impala_table_name,
kudu_table.name))
assert False
except Exception as e:
assert "Kudu type 'binary' is not supported in Impala" in str(e)
def test_drop_external_table(self, cursor, kudu_client):
"""Check that dropping an external table only affects the catalog and does not delete
the table in Kudu.
"""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("SELECT COUNT(*) FROM %s" % impala_table_name)
assert cursor.fetchall() == [(0, )]
try:
cursor.execute("SELECT COUNT(*) FROM %s" % impala_table_name)
assert False
except Exception as e:
assert "Could not resolve table reference" in str(e)
assert kudu_client.table_exists(kudu_table.name)
def test_explicit_name(self, cursor, kudu_client):
"""Check that a Kudu table can be specified using a table property."""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
table_name = self.random_table_name()
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (table_name, kudu_table.name))
with self.drop_impala_table_after_context(cursor, table_name):
cursor.execute("SELECT * FROM %s" % table_name)
assert len(cursor.fetchall()) == 0
def test_explicit_name_preference(self, cursor, kudu_client):
"""Check that the table name from a table property is used when a table of the
implied name also exists.
"""
with self.temp_kudu_table(kudu_client, [INT64]) as preferred_kudu_table:
with self.temp_kudu_table(kudu_client, [INT8]) as other_kudu_table:
impala_table_name = self.get_kudu_table_base_name(other_kudu_table.name)
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (
impala_table_name, preferred_kudu_table.name))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("DESCRIBE %s" % impala_table_name)
assert cursor.fetchall() == \
[("a", "bigint", "", "true", "false", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0")]
def test_explicit_name_doesnt_exist(self, cursor, kudu_client):
kudu_table_name = self.random_table_name()
try:
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (
self.random_table_name(), kudu_table_name))
assert False
except Exception as e:
assert "Table does not exist in Kudu: '%s'" % kudu_table_name in str(e)
def test_explicit_name_doesnt_exist_but_implicit_does(self, cursor, kudu_client):
"""Check that when an explicit table name is given but that table doesn't exist,
there is no fall-through to an existing implicit table.
"""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
table_name = self.random_table_name()
try:
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (
self.get_kudu_table_base_name(kudu_table.name), table_name))
assert False
except Exception as e:
assert "Table does not exist in Kudu: '%s'" % table_name in str(e)
def test_table_without_partitioning(self, cursor, kudu_client, unique_database):
"""Test a Kudu table created without partitioning (i.e. equivalent to a single
unbounded partition). It is not possible to create such a table in Impala, but
it can be created directly in Kudu and then loaded as an external table.
Regression test for IMPALA-5154."""
schema_builder = SchemaBuilder()
column_spec = schema_builder.add_column("id", INT64)
column_spec.nullable(False)
schema_builder.set_primary_keys(["id"])
schema = schema_builder.build()
partitioning = Partitioning().set_range_partition_columns([])
name = "%s.one_big_unbounded_partition" % unique_database
try:
kudu_client.create_table(name, schema, partitioning=partitioning)
kudu_table = kudu_client.table(name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (name, props))
with self.drop_impala_table_after_context(cursor, name):
cursor.execute("INSERT INTO %s VALUES (1), (2), (3)" % name)
cursor.execute("SELECT COUNT(*) FROM %s" % name)
assert cursor.fetchall() == [(3, )]
try:
cursor.execute("SHOW RANGE PARTITIONS %s" % name)
assert False
except Exception as e:
assert "AnalysisException: SHOW RANGE PARTITIONS requested but table does "\
"not have range partitions" in str(e)
finally:
if kudu_client.table_exists(name):
kudu_client.delete_table(name)
def test_column_name_case(self, cursor, kudu_client, unique_database):
"""IMPALA-5286: Tests that an external Kudu table that was created with a column name
containing upper case letters is handled correctly."""
table_name = '%s.kudu_external_test' % unique_database
if kudu_client.table_exists(table_name):
kudu_client.delete_table(table_name)
schema_builder = SchemaBuilder()
key_col = 'Key'
schema_builder.add_column(key_col, INT64).nullable(False).primary_key()
schema = schema_builder.build()
partitioning = Partitioning().set_range_partition_columns([key_col])\
.add_range_partition([1], [10])
try:
kudu_client.create_table(table_name, schema, partitioning)
props = "tblproperties('kudu.table_name' = '%s')" % table_name
cursor.execute("create external table %s stored as kudu %s" % (table_name, props))
# Perform a variety of operations on the table.
cursor.execute("insert into %s (kEy) values (5), (1), (4)" % table_name)
cursor.execute("select keY from %s where KeY %% 2 = 0" % table_name)
assert cursor.fetchall() == [(4, )]
cursor.execute("select * from %s order by kEY" % (table_name))
assert cursor.fetchall() == [(1, ), (4, ), (5, )]
# Do a join with a runtime filter targeting the column.
cursor.execute("select count(*) from %s a, %s b where a.key = b.key" %
(table_name, table_name))
assert cursor.fetchall() == [(3, )]
cursor.execute("alter table %s add range partition 11 < values < 20" % table_name)
new_key = "KEY2"
cursor.execute("alter table %s change KEy %s bigint" % (table_name, new_key))
val_col = "vaL"
cursor.execute("alter table %s add columns (%s bigint)" % (table_name, val_col))
cursor.execute("describe %s" % table_name)
results = cursor.fetchall()
# 'describe' should print the column name in lower case.
assert new_key.lower() in results[0]
assert val_col.lower() in results[1]
cursor.execute("alter table %s drop column Val" % table_name);
cursor.execute("describe %s" % table_name)
assert len(cursor.fetchall()) == 1
cursor.execute("alter table %s drop range partition 11 < values < 20" % table_name)
finally:
if kudu_client.table_exists(table_name):
kudu_client.delete_table(table_name)
def test_conflicting_column_name(self, cursor, kudu_client, unique_database):
"""IMPALA-5283: Tests that loading an external Kudu table that was created with column
names that differ only in case results in an error."""
table_name = '%s.kudu_external_test' % unique_database
if kudu_client.table_exists(table_name):
kudu_client.delete_table(table_name)
schema_builder = SchemaBuilder()
col0 = 'col'
schema_builder.add_column(col0, INT64).nullable(False).primary_key()
col1 = 'COL'
schema_builder.add_column(col1, INT64)
schema = schema_builder.build()
partitioning = Partitioning().set_range_partition_columns([col0])\
.add_range_partition([1], [10])
try:
kudu_client.create_table(table_name, schema, partitioning)
props = "tblproperties('kudu.table_name' = '%s')" % table_name
cursor.execute("create external table %s stored as kudu %s" % (table_name, props))
assert False, 'create table should have resulted in an exception'
except Exception as e:
assert 'Error loading Kudu table: Impala does not support column names that ' \
+ 'differ only in casing' in str(e)
finally:
if kudu_client.table_exists(table_name):
kudu_client.delete_table(table_name)
class TestShowCreateTable(KuduTestSuite):
def assert_show_create_equals(self, cursor, create_sql, show_create_sql):
"""Executes 'create_sql' to create a table, then runs "SHOW CREATE TABLE" and checks
that the output is the same as 'show_create_sql'. 'create_sql' and
'show_create_sql' can be templates that can be used with str.format(). format()
will be called with 'table' and 'db' as keyword args.
"""
format_args = {"table": self.random_table_name(), "db": cursor.conn.db_name}
cursor.execute(create_sql.format(**format_args))
cursor.execute("SHOW CREATE TABLE {table}".format(**format_args))
assert cursor.fetchall()[0][0] == \
textwrap.dedent(show_create_sql.format(**format_args)).strip()
def test_primary_key_and_distribution(self, cursor):
# TODO: Add test cases with column comments once KUDU-1711 is fixed.
# TODO: Add case with BLOCK_SIZE
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT PRIMARY KEY)
PARTITION BY HASH (c) PARTITIONS 3 STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT PRIMARY KEY, d STRING NULL)
PARTITION BY HASH (c) PARTITIONS 3, RANGE (c)
(PARTITION VALUES <= 1, PARTITION 1 < VALUES <= 2,
PARTITION 2 < VALUES) STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
d STRING NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
PARTITION BY HASH (c) PARTITIONS 3, RANGE (c) (...)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT ENCODING PLAIN_ENCODING, PRIMARY KEY (c))
PARTITION BY HASH (c) PARTITIONS 3 STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING PLAIN_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT COMPRESSION LZ4, d STRING, PRIMARY KEY(c, d))
PARTITION BY HASH (c) PARTITIONS 3, HASH (d) PARTITIONS 3,
RANGE (c, d) (PARTITION VALUE = (1, 'aaa'), PARTITION VALUE = (2, 'bbb'))
STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION LZ4,
d STRING NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c, d)
)
PARTITION BY HASH (c) PARTITIONS 3, HASH (d) PARTITIONS 3, RANGE (c, d) (...)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT, d STRING, e INT NULL DEFAULT 10, PRIMARY KEY(c, d))
PARTITION BY RANGE (c) (PARTITION VALUES <= 1, PARTITION 1 < VALUES <= 2,
PARTITION 2 < VALUES <= 3, PARTITION 3 < VALUES) STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
d STRING NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
e INT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION DEFAULT 10,
PRIMARY KEY (c, d)
)
PARTITION BY RANGE (c) (...)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT PRIMARY KEY) STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
def test_timestamp_default_value(self, cursor):
create_sql_fmt = """
CREATE TABLE {table} (c INT, d TIMESTAMP,
e TIMESTAMP NULL DEFAULT CAST('%s' AS TIMESTAMP),
PRIMARY KEY(c, d))
PARTITION BY HASH(c) PARTITIONS 3
STORED AS KUDU"""
# Long lines are unfortunate, but extra newlines will break the test.
show_create_sql_fmt = """
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
d TIMESTAMP NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
e TIMESTAMP NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION DEFAULT unix_micros_to_utc_timestamp(%s),
PRIMARY KEY (c, d)
)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS)
self.assert_show_create_equals(cursor,
create_sql_fmt % ("2009-01-01 00:00:00.000001000"),
show_create_sql_fmt % ("1230768000000001"))
self.assert_show_create_equals(cursor,
create_sql_fmt % ("2009-01-01 00:00:00.000001001"),
show_create_sql_fmt % ("1230768000000001"))
self.assert_show_create_equals(cursor,
create_sql_fmt % ("2009-01-01 00:00:00.000000999"),
show_create_sql_fmt % ("1230768000000001"))
def test_external_kudu_table_name_with_show_create(self, cursor, kudu_client,
unique_database):
"""Check that the generated kudu.table_name tblproperty is present with
show create table with external Kudu tables.
"""
schema_builder = SchemaBuilder()
column_spec = schema_builder.add_column("id", INT64)
column_spec.nullable(False)
schema_builder.set_primary_keys(["id"])
partitioning = Partitioning().set_range_partition_columns(["id"])
schema = schema_builder.build()
kudu_table_name = self.random_table_name()
try:
kudu_client.create_table(kudu_table_name, schema, partitioning)
kudu_table = kudu_client.table(kudu_table_name)
table_name_prop = "'kudu.table_name'='%s'" % kudu_table.name
self.assert_show_create_equals(cursor,
"""
CREATE EXTERNAL TABLE {{table}} STORED AS KUDU
TBLPROPERTIES({props})""".format(
props=table_name_prop),
"""
CREATE EXTERNAL TABLE {db}.{{table}}
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}', {kudu_table})""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS,
kudu_table=table_name_prop))
finally:
if kudu_client.table_exists(kudu_table_name):
kudu_client.delete_table(kudu_table_name)
def test_managed_kudu_table_name_with_show_create(self, cursor):
"""Check that the generated kudu.table_name tblproperty is not present with
show create table with managed Kudu tables.
"""
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT PRIMARY KEY)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
class TestDropDb(KuduTestSuite):
def test_drop_non_empty_db(self, unique_cursor, kudu_client):
"""Check that an attempt to drop a database will fail if Kudu tables are present
and that the tables remain.
"""
db_name = unique_cursor.conn.db_name
with self.temp_kudu_table(kudu_client, [INT32], db_name=db_name) as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
unique_cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (
impala_table_name, props))
unique_cursor.execute("USE DEFAULT")
try:
unique_cursor.execute("DROP DATABASE %s" % db_name)
assert False
except Exception as e:
assert "One or more tables exist" in str(e)
unique_cursor.execute("SELECT COUNT(*) FROM %s.%s" % (db_name, impala_table_name))
assert unique_cursor.fetchall() == [(0, )]
def test_drop_db_cascade(self, unique_cursor, kudu_client):
"""Check that an attempt to drop a database will succeed even if Kudu tables are
present and that the managed tables are removed.
"""
db_name = unique_cursor.conn.db_name
with self.temp_kudu_table(kudu_client, [INT32], db_name=db_name) as kudu_table:
# Create an external Kudu table
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
unique_cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (
impala_table_name, props))
# Create a managed Kudu table
managed_table_name = self.random_table_name()
unique_cursor.execute("""
CREATE TABLE %s (a INT PRIMARY KEY) PARTITION BY HASH (a) PARTITIONS 3
STORED AS KUDU""" % managed_table_name)
kudu_table_name = "impala::" + db_name + "." + managed_table_name
assert kudu_client.table_exists(kudu_table_name)
# Create a table in HDFS
hdfs_table_name = self.random_table_name()
unique_cursor.execute("""
CREATE TABLE %s (a INT) PARTITIONED BY (x INT)""" % (hdfs_table_name))
unique_cursor.execute("USE DEFAULT")
unique_cursor.execute("DROP DATABASE %s CASCADE" % db_name)
unique_cursor.execute("SHOW DATABASES")
assert (db_name, '') not in unique_cursor.fetchall()
assert kudu_client.table_exists(kudu_table.name)
assert not kudu_client.table_exists(managed_table_name)
class TestImpalaKuduIntegration(KuduTestSuite):
def test_replace_kudu_table(self, cursor, kudu_client):
"""Check that an external Kudu table is accessible if the underlying Kudu table is
modified using the Kudu client.
"""
# Create an external Kudu table
col_names = ['a']
with self.temp_kudu_table(kudu_client, [INT32], col_names=col_names) as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (
impala_table_name, props))
cursor.execute("DESCRIBE %s" % (impala_table_name))
assert cursor.fetchall() == \
[("a", "int", "", "true", "false", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0")]
# Drop the underlying Kudu table and replace it with another Kudu table that has
# the same name but different schema
kudu_client.delete_table(kudu_table.name)
assert not kudu_client.table_exists(kudu_table.name)
new_col_names = ['b', 'c']
name_parts = kudu_table.name.split(".")
assert len(name_parts) == 2
with self.temp_kudu_table(kudu_client, [STRING, STRING], col_names=new_col_names,
db_name=name_parts[0], name= name_parts[1]) as new_kudu_table:
assert kudu_client.table_exists(new_kudu_table.name)
# Refresh the external table and verify that the new schema is loaded from
# Kudu.
cursor.execute("REFRESH %s" % (impala_table_name))
cursor.execute("DESCRIBE %s" % (impala_table_name))
assert cursor.fetchall() == \
[("b", "string", "", "true", "false", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0"),
("c", "string", "", "false", "true", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0")]
def test_delete_external_kudu_table(self, cursor, kudu_client):
"""Check that Impala can recover from the case where the underlying Kudu table of
an external table is dropped using the Kudu client.
"""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
# Create an external Kudu table
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (
impala_table_name, props))
cursor.execute("DESCRIBE %s" % (impala_table_name))
assert cursor.fetchall() == \
[("a", "int", "", "true", "false", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0")]
# Drop the underlying Kudu table
kudu_client.delete_table(kudu_table.name)
assert not kudu_client.table_exists(kudu_table.name)
err_msg = 'The table does not exist: table_name: "%s"' % (kudu_table.name)
try:
cursor.execute("REFRESH %s" % (impala_table_name))
except Exception as e:
assert err_msg in str(e)
cursor.execute("DROP TABLE %s" % (impala_table_name))
cursor.execute("SHOW TABLES")
assert (impala_table_name,) not in cursor.fetchall()
def test_delete_managed_kudu_table(self, cursor, kudu_client, unique_database):
"""Check that dropping a managed Kudu table works even if the underlying Kudu table
has been dropped externally."""
impala_tbl_name = "foo"
cursor.execute("""CREATE TABLE %s.%s (a INT PRIMARY KEY) PARTITION BY HASH (a)
PARTITIONS 3 STORED AS KUDU""" % (unique_database, impala_tbl_name))
kudu_tbl_name = KuduTestSuite.to_kudu_table_name(unique_database, impala_tbl_name)
assert kudu_client.table_exists(kudu_tbl_name)
kudu_client.delete_table(kudu_tbl_name)
assert not kudu_client.table_exists(kudu_tbl_name)
cursor.execute("DROP TABLE %s.%s" % (unique_database, impala_tbl_name))
cursor.execute("SHOW TABLES IN %s" % unique_database)
assert (impala_tbl_name,) not in cursor.fetchall()
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
class TestKuduMemLimits(KuduTestSuite):
QUERIES = ["select * from tpch_kudu.lineitem where l_orderkey = -1",
"select * from tpch_kudu.lineitem where l_commitdate like '%cheese'",
"select * from tpch_kudu.lineitem limit 90"]
# The value indicates the minimum memory requirements for the queries above, the first
# memory limit corresponds to the first query
QUERY_MEM_LIMITS = [1, 1, 10]
@pytest.mark.execute_serially
@pytest.mark.parametrize("mem_limit", [1, 10, 0])
def test_low_mem_limit_low_selectivity_scan(self, cursor, mem_limit, vector):
"""Tests that the queries specified in this test suite run under the given
memory limits."""
exec_options = dict((k, str(v)) for k, v
in vector.get_value('exec_option').iteritems())
exec_options['mem_limit'] = "{0}m".format(mem_limit)
for i, q in enumerate(self.QUERIES):
try:
cursor.execute(q, configuration=exec_options)
cursor.fetchall()
except Exception as e:
if (mem_limit > self.QUERY_MEM_LIMITS[i]):
raise
assert "Memory limit exceeded" in str(e)
# IMPALA-4654: Validate the fix for a bug where LimitReached() wasn't respected in
# the KuduScanner and the limit query above would result in a fragment running an
# additional minute. This ensures that the num fragments 'in flight' reaches 0 in
# less time than IMPALA-4654 was reproducing (~60sec) but yet still enough time that
# this test won't be flaky.
verifiers = [ MetricVerifier(i.service) for i in ImpalaCluster().impalads ]
for v in verifiers:
v.wait_for_metric("impala-server.num-fragments-in-flight", 0, timeout=30)
|
showing_terminal.py | #!/usr/bin/python
"""
- read output from a subprocess in a background thread
- show the output in the GUI
"""
import sys
from itertools import islice
from subprocess import Popen, PIPE
from textwrap import dedent
from threading import Thread
try:
import Tkinter as tk
from Queue import Queue, Empty
except ImportError:
import tkinter as tk # Python 3
from queue import Queue, Empty # Python 3
def iter_except(function, exception):
"""Works like builtin 2-argument `iter()`, but stops on `exception`."""
try:
while True:
yield function()
except exception:
return
class DisplaySubprocessOutputDemo:
def __init__(self, root):
self.root = root
# start dummy subprocess to generate some output
self.process = Popen([sys.executable, "-u", "-c", dedent("""
import itertools, time
for i in itertools.count():
print("%d.%d" % divmod(i, 10))
time.sleep(0.1)
""")], stdout=PIPE)
# launch thread to read the subprocess output
# (put the subprocess output into the queue in a background thread,
# get output from the queue in the GUI thread.
# Output chain: process.readline -> queue -> label)
q = Queue(maxsize=1024) # limit output buffering (may stall subprocess)
t = Thread(target=self.reader_thread, args=[q])
t.daemon = True # close pipe if GUI process exits
t.start()
# show subprocess' stdout in GUI
self.label = tk.Label(root, text=" ", font=(None, 200))
self.label.pack(ipadx=4, padx=4, ipady=4, pady=4, fill='both')
self.update(q) # start update loop
def reader_thread(self, q):
"""Read subprocess output and put it into the queue."""
try:
with self.process.stdout as pipe:
for line in iter(pipe.readline, b''):
q.put(line)
finally:
q.put(None)
def update(self, q):
"""Update GUI with items from the queue."""
for line in iter_except(q.get_nowait, Empty): # display all content
if line is None:
self.quit()
return
else:
self.label['text'] = line # update GUI
break # display no more than one line per 40 milliseconds
self.root.after(40, self.update, q) # schedule next update
def quit(self):
self.process.kill() # exit subprocess if GUI is closed (zombie!)
self.root.destroy()
root = tk.Tk()
app = DisplaySubprocessOutputDemo(root)
root.protocol("WM_DELETE_WINDOW", app.quit)
# center window
root.eval('tk::PlaceWindow %s center' % root.winfo_pathname(root.winfo_id()))
root.mainloop() |
test_mysqlx_connection.py | # -*- coding: utf-8 -*-
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Unittests for mysqlx.connection
"""
import logging
import os
import platform
import unittest
import sys
import tests
import time
import socket
import struct
import mysqlx
from threading import Thread
from time import sleep
from . import check_tls_versions_support
from mysqlx.connection import SocketStream, TLS_V1_3_SUPPORTED, HAVE_DNSPYTHON
from mysqlx.compat import STRING_TYPES
from mysqlx.errors import InterfaceError, OperationalError, ProgrammingError
from mysqlx.protocol import Message, MessageReaderWriter, Protocol
from mysqlx.protobuf import HAVE_MYSQLXPB_CEXT, mysqlxpb_enum, Protobuf
from mysql.connector.utils import linux_distribution
from mysql.connector.version import VERSION, LICENSE
if mysqlx.compat.PY3:
from urllib.parse import quote_plus, quote
else:
from urllib import quote_plus, quote
from .test_mysqlx_crud import drop_table
LOGGER = logging.getLogger(tests.LOGGER_NAME)
_URI_TEST_RESULTS = ( # (uri, result)
("127.0.0.1", None),
("localhost", None),
("domain.com", None),
("user:password@127.0.0.1", {"schema": "", "host": "127.0.0.1",
"password": "password", "port": 33060,
"user": "user"}),
("user:password@127.0.0.1:33061", {"schema": "", "host": "127.0.0.1",
"password": "password", "port": 33061,
"user": "user"}),
("user:@127.0.0.1", {"schema": "", "host": "127.0.0.1", "password": "",
"port": 33060, "user": "user"}),
("user:@127.0.0.1/schema", {"schema": "schema", "host": "127.0.0.1",
"password": "", "port": 33060,
"user": "user"}),
("user:@127.0.0.1/schema?use_pure=true", {"schema": "schema",
"host": "127.0.0.1",
"password": "", "port": 33060,
"user": "user",
"use-pure": True}),
("user{0}:password{0}@127.0.0.1/schema?use_pure=true"
"".format(quote("?!@#$%/:")), {"schema": "schema", "host": "127.0.0.1",
"port": 33060, "user": "user?!@#$%/:",
"password": "password?!@#$%/:",
"use-pure": True}),
("mysqlx://user:@127.0.0.1", {"schema": "", "host": "127.0.0.1",
"password": "", "port": 33060,
"user": "user"}),
("mysqlx://user:@127.0.0.1:33060/schema",
{"schema": "schema", "host": "127.0.0.1", "password": "", "port": 33060,
"user": "user"}),
("mysqlx://user@[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1", None),
("mysqlx://user:password@[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1",
{"schema": "", "host": "2001:db8:85a3:8d3:1319:8a2e:370:7348",
"password": "password", "port": 1, "user": "user"}),
("mysqlx://user:password@[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1/schema",
{"schema": "schema", "host": "2001:db8:85a3:8d3:1319:8a2e:370:7348",
"password": "password", "port": 1, "user": "user"}),
("áé'í'óú:unicode@127.0.0.1",
{"schema": "", "host": "127.0.0.1", "password": "unicode",
"port": 33060, "user": "áé'í'óú"}),
("unicode:áé'í'óú@127.0.0.1",
{"schema": "", "host": "127.0.0.1", "password": "áé'í'óú",
"port": 33060, "user": "unicode"}),
("root:@[localhost, 127.0.0.1:88, [::]:99, [a1:b1::]]",
{"routers": [{"host": "localhost", "port": 33060},
{"host": "127.0.0.1", "port": 88},
{"host": "::", "port": 99},
{"host": "a1:b1::", "port": 33060}],
"user": "root", "password": "", "schema": ""}),
("root:@[a1:a2:a3:a4:a5:a6:a7:a8]]",
{"host": "a1:a2:a3:a4:a5:a6:a7:a8", "schema": "",
"port": 33060, "user": "root", "password": ""}),
("root:@localhost", {"user": "root", "password": "",
"host": "localhost", "port": 33060, "schema": ""}),
("root:@[a1:b1::]", {"user": "root", "password": "",
"host": "a1:b1::", "port": 33060, "schema": ""}),
("root:@[a1:b1::]:88", {"user": "root", "password": "",
"host": "a1:b1::", "port": 88, "schema": ""}),
("root:@[[a1:b1::]:88]", {"user": "root", "password": "",
"routers": [{"host": "a1:b1::", "port":88}], "schema": ""}),
("root:@[(address=localhost:99, priority=99)]",
{"user": "root", "password": "", "schema": "",
"routers": [{"host": "localhost", "port": 99, "priority": 99}]})
)
_ROUTER_LIST_RESULTS = ( # (uri, result)
("áé'í'óú:unicode@127.0.0.1", {"schema": "", "host": "127.0.0.1",
"port": 33060, "password": "unicode", "user": "áé'í'óú"}),
("unicode:áé'í'óú@127.0.0.1", {"schema": "", "host": "127.0.0.1",
"port": 33060, "password": "áé'í'óú", "user": "unicode"}),
("user:password@[127.0.0.1, localhost]", {"schema": "", "routers":
[{"host": "127.0.0.1", "port": 33060}, {"host": "localhost", "port":
33060}], "password": "password", "user": "user"}),
("user:password@[(address=127.0.0.1, priority=99), (address=localhost,"
"priority=98)]", {"schema": "", "routers": [{"host": "127.0.0.1",
"port": 33060, "priority": 99}, {"host": "localhost", "port": 33060,
"priority": 98}], "password": "password", "user": "user"}),
)
_PREP_STMT_QUERY = (
"SELECT p.sql_text, p.count_execute "
"FROM performance_schema.prepared_statements_instances AS p "
"JOIN performance_schema.threads AS t ON p.owner_thread_id = t.thread_id "
"AND t.processlist_id = @@pseudo_thread_id")
def file_uri(path, brackets=True):
if brackets:
return "{0}{1}".format(path[0], quote_plus(path[1:]))
return "({0})".format(path)
def build_uri(**kwargs):
uri = "mysqlx://{0}:{1}".format(kwargs["user"], kwargs["password"])
if "host" in kwargs:
host = "[{0}]".format(kwargs["host"]) \
if ":" in kwargs["host"] else kwargs["host"]
uri = "{0}@{1}".format(uri, host)
elif "routers" in kwargs:
routers = []
for router in kwargs["routers"]:
fmt = "(address={host}{port}, priority={priority})" \
if "priority" in router else "{host}{port}"
host = "[{0}]".format(router["host"]) if ":" in router["host"] \
else router["host"]
port = ":{0}".format(router["port"]) if "port" in router else ""
routers.append(fmt.format(host=host, port=port,
priority=router.get("priority", None)))
uri = "{0}@[{1}]".format(uri, ",".join(routers))
else:
raise ProgrammingError("host or routers required.")
if "port" in kwargs:
uri = "{0}:{1}".format(uri, kwargs["port"])
if "schema" in kwargs:
uri = "{0}/{1}".format(uri, kwargs["schema"])
query = []
if "ssl_mode" in kwargs:
query.append("ssl-mode={0}".format(kwargs["ssl_mode"]))
if "ssl_ca" in kwargs:
query.append("ssl-ca={0}".format(kwargs["ssl_ca"]))
if "ssl_cert" in kwargs:
query.append("ssl-cert={0}".format(kwargs["ssl_cert"]))
if "ssl_key" in kwargs:
query.append("ssl-key={0}".format(kwargs["ssl_key"]))
if "use_pure" in kwargs:
query.append("use-pure={0}".format(kwargs["use_pure"]))
if "connect_timeout" in kwargs:
query.append("connect-timeout={0}".format(kwargs["connect_timeout"]))
if "connection_attributes" in kwargs:
conn_attrs = kwargs["connection_attributes"]
if isinstance(conn_attrs, STRING_TYPES) and \
not (conn_attrs.startswith("[") and conn_attrs.endswith("]")):
query.append("connection-attributes={}"
"".format(kwargs["connection_attributes"]))
else:
attr_list = []
for key in conn_attrs:
attr_list.append("{}={}".format(key, conn_attrs[key]))
query.append("connection-attributes={0}"
"".format("[{}]".format(",".join(attr_list))))
if "tls-versions" in kwargs:
tls_versions = kwargs["tls-versions"]
if isinstance(tls_versions, STRING_TYPES) and \
not (tls_versions.startswith("[") and tls_versions.endswith("]")):
query.append("tls-versions=[{}]"
"".format(kwargs["tls-versions"]))
else:
query.append("tls-versions=[{}]".format(",".join(tls_versions)))
if "tls-ciphersuites" in kwargs:
tls_ciphers = kwargs["tls-ciphersuites"]
if isinstance(tls_ciphers, STRING_TYPES) and \
not (tls_ciphers.startswith("[") and tls_ciphers.endswith("]")):
query.append("tls-ciphersuites=[{}]"
"".format(",".format(tls_ciphers)))
else:
query.append("tls-ciphersuites=[{}]".format(",".join(tls_ciphers)))
if len(query) > 0:
uri = "{0}?{1}".format(uri, "&".join(query))
return uri
class ServerSocketStream(SocketStream):
def __init__(self):
self._socket = None
def start_receive(self, host, port):
"""Opens a sokect to comunicate to the given host, port
Args:
host (str): host name.
port (int): host port.
Returns:
address of the communication channel
"""
my_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
my_sock.bind((host, port))
# Starting receiving...
if sys.version_info > (3, 5):
my_sock.listen()
else:
my_sock.listen(1)
self._socket, addr = my_sock.accept()
return addr
class ServerProtocol(Protocol):
def __init__(self, reader_writer):
super(ServerProtocol, self).__init__(reader_writer)
def send_auth_continue_server(self, auth_data):
"""Send Server authenticate continue.
Args:
auth_data (str): Authentication data.
"""
msg = Message("Mysqlx.Session.AuthenticateContinue",
auth_data=auth_data)
self._writer.write_message(mysqlxpb_enum(
"Mysqlx.ServerMessages.Type.SESS_AUTHENTICATE_CONTINUE"), msg)
def send_auth_ok(self):
"""Send authenticate OK.
"""
msg = Message("Mysqlx.Session.AuthenticateOk")
self._writer.write_message(mysqlxpb_enum(
"Mysqlx.ServerMessages.Type.SESS_AUTHENTICATE_OK"), msg)
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 14), "XPlugin not compatible")
class MySQLxSessionTests(tests.MySQLxTests):
def setUp(self):
self.connect_kwargs = tests.get_mysqlx_config()
self.schema_name = self.connect_kwargs["schema"]
try:
self.session = mysqlx.get_session(self.connect_kwargs)
except mysqlx.Error as err:
self.fail("{0}".format(err))
if os.name == "nt":
if "64" in platform.architecture()[0]:
self.platform_arch = "x86_64"
elif "32" in platform.architecture()[0]:
self.platform_arch = "i386"
else:
self.platform_arch = platform.architecture()
self.os_ver = "Windows-{}".format(platform.win32_ver()[1])
else:
self.platform_arch = platform.machine()
if platform.system() == "Darwin":
self.os_ver = "{}-{}".format("macOS", platform.mac_ver()[0])
else:
self.os_ver = "-".join(linux_distribution()[0:2])
license_chunks = LICENSE.split(" ")
if license_chunks[0] == "GPLv2":
self.client_license = "GPL-2.0"
else:
self.client_license = "Commercial"
def test___init__(self):
bad_config = {
"host": "bad_host",
"port": "",
"username": "root",
"password": ""
}
self.assertRaises(InterfaceError, mysqlx.Session, bad_config)
host = self.connect_kwargs["host"]
port = self.connect_kwargs["port"]
user = self.connect_kwargs["user"]
password = self.connect_kwargs["password"]
# Session to a farm using one of many routers (prios)
# Loop during connect because of network error (succeed)
routers = [{"host": "bad_host","priority": 100},
{"host": host, "port": port, "priority": 98}]
uri = build_uri(user=user, password=password, routers=routers)
session = mysqlx.get_session(uri)
session.close()
# Session to a farm using one of many routers (incomplete prios)
routers = [{"host": "bad_host", "priority": 100},
{"host": host, "port": port}]
uri = build_uri(user=user, password=password, routers=routers)
self.assertRaises(ProgrammingError, mysqlx.get_session, uri)
try:
session = mysqlx.get_session(uri)
except ProgrammingError as err:
self.assertEqual(4000, err.errno)
# Session to a farm using invalid priorities (out of range)
routers = [{"host": "bad_host", "priority": 100},
{"host": host, "port": port, "priority": 101}]
uri = build_uri(user=user, password=password, routers=routers)
self.assertRaises(ProgrammingError, mysqlx.get_session, uri)
try:
session = mysqlx.get_session(uri)
except ProgrammingError as err:
self.assertEqual(4007, err.errno)
routers = [{"host": "bad_host", "priority": 100},
{"host": host, "port": port, "priority": "A"}]
uri = build_uri(user=user, password=password, routers=routers)
self.assertRaises(ProgrammingError, mysqlx.get_session, uri)
try:
session = mysqlx.get_session(uri)
except ProgrammingError as err:
self.assertEqual(4002, err.errno)
routers = [{"host": "bad_host", "priority": 100},
{"host": host, "port": port, "priority": -101}]
settings = {"user": user, "password": password, "routers": routers}
self.assertRaises(ProgrammingError, mysqlx.get_session, **settings)
try:
session = mysqlx.get_session(**settings)
except ProgrammingError as err:
self.assertEqual(4007, err.errno)
routers = [{"host": "bad_host", "priority": 100},
{"host": host, "port": port, "priority": "A"}]
settings = {"user": user, "password": password, "routers": routers}
self.assertRaises(ProgrammingError, mysqlx.get_session, **settings)
try:
session = mysqlx.get_session(**settings)
except ProgrammingError as err:
self.assertEqual(4007, err.errno)
# Establish an Session to a farm using one of many routers (no prios)
routers = [{"host": "bad_host"}, {"host": host, "port": port}]
uri = build_uri(user=user, password=password, routers=routers)
session = mysqlx.get_session(uri)
session.close()
# Break loop during connect (non-network error)
uri = build_uri(user=user, password="bad_pass", routers=routers)
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# Break loop during connect (none left)
uri = "mysqlx://{0}:{1}@[bad_host, another_bad_host]".format(user, password)
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
try:
session = mysqlx.get_session(uri)
except InterfaceError as err:
self.assertEqual(4001, err.errno)
# Invalid option with URI
uri = "mysqlx://{0}:{1}@{2}:{3}?invalid=option" \
"".format(user, password, host, port)
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
uri = "mysqlx://{0}:{1}@{2}:{3}?user=root" \
"".format(user, password, host, port)
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
uri = "mysqlx://{0}:{1}@{2}:{3}?password=secret" \
"".format(user, password, host, port)
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# Invalid scheme
uri = "mysqlx+invalid://{0}:{1}@{2}:{3}" \
"".format(user, password, host, port)
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# Invalid option with dict
config = {
"user": user,
"password": password,
"host": host,
"port": port,
"invalid": "option"
}
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Invalid option with kwargs
self.assertRaises(InterfaceError, mysqlx.get_session, **config)
# SocketSteam.is_socket()
session = mysqlx.get_session(user=user, password=password,
host=host, port=port)
self.assertFalse(session._connection.stream.is_socket())
def test_auth(self):
sess = mysqlx.get_session(self.connect_kwargs)
sess.sql("CREATE USER 'native'@'%' IDENTIFIED WITH "
"mysql_native_password BY 'test'").execute()
sess.sql("CREATE USER 'sha256'@'%' IDENTIFIED WITH "
"sha256_password BY 'sha256'").execute()
config = {'host': self.connect_kwargs['host'],
'port': self.connect_kwargs['port']}
config['user'] = 'native'
config['password'] = 'test'
config['auth'] = 'plain'
mysqlx.get_session(config)
config['auth'] = 'mysql41'
mysqlx.get_session(config)
config['user'] = 'sha256'
config['password'] = 'sha256'
if tests.MYSQL_VERSION >= (8, 0, 1):
config['auth'] = 'plain'
mysqlx.get_session(config)
config['auth'] = 'mysql41'
self.assertRaises(InterfaceError, mysqlx.get_session, config)
sess.sql("DROP USER 'native'@'%'").execute()
sess.sql("DROP USER 'sha256'@'%'").execute()
sess.close()
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 5),
"SHA256_MEMORY authentation mechanism not available")
def test_auth_sha265_memory(self):
sess = mysqlx.get_session(self.connect_kwargs)
sess.sql("CREATE USER 'caching'@'%' IDENTIFIED WITH "
"caching_sha2_password BY 'caching'").execute()
config = {
"user": "caching",
"password": "caching",
"host": self.connect_kwargs["host"],
"port": self.connect_kwargs["port"]
}
# Session creation is not possible with SSL disabled
config["ssl-mode"] = mysqlx.SSLMode.DISABLED
self.assertRaises(InterfaceError, mysqlx.get_session, config)
config["auth"] = mysqlx.Auth.SHA256_MEMORY
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Session creation is possible with SSL enabled
config["ssl-mode"] = mysqlx.SSLMode.REQUIRED
config["auth"] = mysqlx.Auth.PLAIN
mysqlx.get_session(config)
# Disable SSL
config["ssl-mode"] = mysqlx.SSLMode.DISABLED
# Password is in cache will, session creation is possible
config["auth"] = mysqlx.Auth.SHA256_MEMORY
mysqlx.get_session(config)
sess.sql("DROP USER 'caching'@'%'").execute()
sess.close()
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 15), "--mysqlx-socket option"
" tests not available for this MySQL version")
@unittest.skipIf(os.name == 'nt', "sockets not available"
" on windows")
def test_mysqlx_socket(self):
# Connect with unix socket
uri = "mysqlx://{user}:{password}@({socket})".format(
user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
socket=self.connect_kwargs["socket"])
session = mysqlx.get_session(uri)
# No SSL with Unix Sockets
res = mysqlx.statement.SqlStatement(session._connection,
"SHOW STATUS LIKE 'Mysqlx_ssl_active'").execute().fetch_all()
self.assertEqual("OFF", res[0][1])
session.close()
# Socket parsing tests
conn = mysqlx._get_connection_settings("root:@(/path/to/sock)")
self.assertEqual("/path/to/sock", conn["socket"])
self.assertEqual("", conn["schema"])
conn = mysqlx._get_connection_settings("root:@(/path/to/sock)/schema")
self.assertEqual("/path/to/sock", conn["socket"])
self.assertEqual("schema", conn["schema"])
conn = mysqlx._get_connection_settings("root:@/path%2Fto%2Fsock")
self.assertEqual("/path/to/sock", conn["socket"])
self.assertEqual("", conn["schema"])
conn = mysqlx._get_connection_settings("root:@/path%2Fto%2Fsock/schema")
self.assertEqual("/path/to/sock", conn["socket"])
self.assertEqual("schema", conn["schema"])
conn = mysqlx._get_connection_settings("root:@.%2Fpath%2Fto%2Fsock")
self.assertEqual("./path/to/sock", conn["socket"])
self.assertEqual("", conn["schema"])
conn = mysqlx._get_connection_settings("root:@.%2Fpath%2Fto%2Fsock"
"/schema")
self.assertEqual("./path/to/sock", conn["socket"])
self.assertEqual("schema", conn["schema"])
conn = mysqlx._get_connection_settings("root:@..%2Fpath%2Fto%2Fsock")
self.assertEqual("../path/to/sock", conn["socket"])
self.assertEqual("", conn["schema"])
conn = mysqlx._get_connection_settings("root:@..%2Fpath%2Fto%2Fsock"
"/schema")
self.assertEqual("../path/to/sock", conn["socket"])
self.assertEqual("schema", conn["schema"])
@unittest.skipIf(HAVE_MYSQLXPB_CEXT == False, "C Extension not available")
def test_connection_uri(self):
uri = build_uri(user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
host=self.connect_kwargs["host"],
port=self.connect_kwargs["port"],
schema=self.connect_kwargs["schema"],
use_pure=False)
session = mysqlx.get_session(uri)
self.assertIsInstance(session, mysqlx.Session)
# Test URI parser function
for uri, res in _URI_TEST_RESULTS:
try:
settings = mysqlx._get_connection_settings(uri)
self.assertEqual(res, settings)
except mysqlx.Error:
self.assertEqual(res, None)
# Test URI parser function
for uri, res in _ROUTER_LIST_RESULTS:
try:
settings = mysqlx._get_connection_settings(uri)
self.assertEqual(res, settings)
except mysqlx.Error:
self.assertEqual(res, None)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 13),
"MySQL 8.0.13+ is required for connect timeout")
def test_connect_timeout(self):
config = self.connect_kwargs.copy()
# 0 ms disables timouts on socket connections
config["connect-timeout"] = 0
session = mysqlx.get_session(config)
session.close()
# 10000 ms should be time enough to connect
config["connect-timeout"] = 10000
session = mysqlx.get_session(config)
session.close()
# Use connect timeout in URI
session = mysqlx.get_session(build_uri(**config))
session.close()
# Timeout for an unreachable host
# https://en.wikipedia.org/wiki/IPv4#Special-use_addresses
hosts = [
"198.51.100.255",
"192.0.2.255",
"10.255.255.1",
"192.0.2.0",
"203.0.113.255",
"10.255.255.255",
"192.168.255.255",
"203.0.113.4",
"192.168.0.0",
"172.16.0.0",
"10.255.255.251",
"172.31.255.255",
"198.51.100.23",
"172.16.255.255",
"198.51.100.8",
"192.0.2.254",
]
unreach_hosts = []
config["connect-timeout"] = 2000
# Find two unreachable hosts for testing
for host in hosts:
try:
config["host"] = host
mysqlx.get_session(config)
except mysqlx.TimeoutError:
unreach_hosts.append(host)
if len(unreach_hosts) == 2:
break # We just need 2 unreachable hosts
except:
pass
total_unreach_hosts = len(unreach_hosts)
self.assertEqual(total_unreach_hosts, 2,
"Two unreachable hosts are needed, {0} found"
"".format(total_unreach_hosts))
# Multi-host scenarios
# Connect to a secondary host if the primary fails
routers = [
{"host": unreach_hosts[0], "port": config["port"], "priority": 100},
{"host": "127.0.0.1", "port": config["port"], "priority": 90}
]
uri = build_uri(user=config["user"], password=config["password"],
connect_timeout=2000, routers=routers)
session = mysqlx.get_session(uri)
session.close()
# Fail to connect to all hosts
routers = [
{"host": unreach_hosts[0], "port": config["port"], "priority": 100},
{"host": unreach_hosts[1], "port": config["port"], "priority": 90}
]
uri = build_uri(user=config["user"], password=config["password"],
connect_timeout=2000, routers=routers)
try:
mysqlx.get_session(uri)
self.fail("It should not connect to any unreachable host")
except mysqlx.TimeoutError as err:
self.assertEqual(err.msg,
"All server connection attempts were aborted. "
"Timeout of 2000 ms was exceeded for each "
"selected server")
except mysqlx.InterfaceError as err:
self.assertEqual(err.msg, "Unable to connect to any of the target hosts")
# Trying to establish a connection with a wrong password should not
# wait for timeout
config["host"] = "127.0.0.1"
config["password"] = "invalid_password"
config["connect-timeout"] = 2000
time_start = time.time()
self.assertRaises(InterfaceError, mysqlx.get_session, config)
time_elapsed = time.time() - time_start
session.close()
if time_elapsed >= config["connect-timeout"]:
self.fail("Trying to establish a connection with a wrong password "
"should not wait for timeout")
# The connection timeout value must be a positive integer
config["connect-timeout"] = -1
self.assertRaises(TypeError, mysqlx.get_session, config)
config["connect-timeout"] = 10.0983
self.assertRaises(TypeError, mysqlx.get_session, config)
config["connect-timeout"] = "abc"
self.assertRaises(TypeError, mysqlx.get_session, config)
def test_get_schemas(self):
schema_name = "test_get_schemas"
self.session.create_schema(schema_name)
schemas = self.session.get_schemas()
self.assertIsInstance(schemas, list)
self.assertTrue(schema_name in schemas)
self.session.drop_schema(schema_name)
def test_get_schema(self):
schema = self.session.get_schema(self.schema_name)
self.assertTrue(schema, mysqlx.Schema)
self.assertEqual(schema.get_name(), self.schema_name)
def test_get_default_schema(self):
schema = self.session.get_default_schema()
self.assertTrue(schema, mysqlx.Schema)
self.assertEqual(schema.get_name(), self.connect_kwargs["schema"])
self.assertTrue(schema.exists_in_database())
# Test None value is returned if no schema name is specified
settings = self.connect_kwargs.copy()
settings.pop("schema")
session = mysqlx.get_session(settings)
schema = session.get_default_schema()
self.assertIsNone(schema,
"None value was expected but got '{}'".format(schema))
session.close()
# Test SQL statements not fully qualified, which must not raise error:
# mysqlx.errors.OperationalError: No database selected
self.session.sql('CREATE DATABASE my_test_schema').execute()
self.session.sql('CREATE TABLE my_test_schema.pets(name VARCHAR(20))'
).execute()
settings = self.connect_kwargs.copy()
settings["schema"] = "my_test_schema"
session = mysqlx.get_session(settings)
schema = session.get_default_schema()
self.assertTrue(schema, mysqlx.Schema)
self.assertEqual(schema.get_name(),
"my_test_schema")
result = session.sql('SHOW TABLES').execute().fetch_all()
self.assertEqual("pets", result[0][0])
self.session.sql('DROP DATABASE my_test_schema').execute()
self.assertFalse(schema.exists_in_database())
self.assertRaises(mysqlx.ProgrammingError, session.get_default_schema)
session.close()
# Test without default schema configured at connect time (passing None)
settings = self.connect_kwargs.copy()
settings["schema"] = None
build_uri(**settings)
session = mysqlx.get_session(settings)
schema = session.get_default_schema()
self.assertIsNone(schema,
"None value was expected but got '{}'".format(schema))
session.close()
# Test not existing default schema at get_session raise error
settings = self.connect_kwargs.copy()
settings["schema"] = "nonexistent"
self.assertRaises(InterfaceError, mysqlx.get_session, settings)
# Test BUG#28942938: 'ACCESS DENIED' error for unauthorized user tries
# to use the default schema if not exists at get_session
self.session.sql("DROP USER IF EXISTS 'def_schema'@'%'").execute()
self.session.sql("CREATE USER 'def_schema'@'%' IDENTIFIED WITH "
"mysql_native_password BY 'test'").execute()
settings = self.connect_kwargs.copy()
settings['user'] = 'def_schema'
settings['password'] = 'test'
settings["schema"] = "nonexistent"
# a) Test with no Granted privileges
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings)
# Access denied for this user
self.assertEqual(1044, context.exception.errno)
# Grant privilege to one unrelated schema
self.session.sql("GRANT ALL PRIVILEGES ON nonexistent.* TO "
"'def_schema'@'%'").execute()
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings)
# Schema does not exist
self.assertNotEqual(1044, context.exception.errno)
def test_drop_schema(self):
test_schema = 'mysql_session_test_drop_schema'
schema = self.session.create_schema(test_schema)
self.session.drop_schema(test_schema)
self.assertFalse(schema.exists_in_database())
def test_create_schema(self):
schema = self.session.create_schema(self.schema_name)
self.assertTrue(schema.exists_in_database())
def test_sql(self):
statement = self.session.sql("SELECT VERSION()")
self.assertTrue(isinstance(statement, mysqlx.Statement))
# SQL statements should be strings
statement = self.session.sql(123)
self.assertRaises(mysqlx.ProgrammingError, statement.execute)
# Test unicode statements
statement = self.session.sql(u"SELECT VERSION()").execute()
self.assertTrue(isinstance(statement, mysqlx.SqlResult))
def test_rollback(self):
table_name = "t2"
schema = self.session.get_schema(self.schema_name)
if not schema.exists_in_database():
self.session.create_schema(self.schema_name)
stmt = "CREATE TABLE {0}.{1}(_id INT)"
self.session.sql(stmt.format(self.schema_name, table_name)).execute()
table = schema.get_table(table_name)
self.session.start_transaction()
table.insert("_id").values(1).execute()
self.assertEqual(table.count(), 1)
self.session.rollback()
self.assertEqual(table.count(), 0)
drop_table(schema, table_name)
def test_commit(self):
table_name = "t2"
schema = self.session.get_schema(self.schema_name)
if not schema.exists_in_database():
self.session.create_schema(self.schema_name)
stmt = "CREATE TABLE {0}.{1}(_id INT)"
self.session.sql(stmt.format(self.schema_name, table_name)).execute()
table = schema.get_table(table_name)
self.session.start_transaction()
table.insert("_id").values(1).execute()
self.assertEqual(table.count(), 1)
self.session.commit()
self.assertEqual(table.count(), 1)
drop_table(schema, table_name)
def test_savepoint(self):
collection_name = "collection_test"
schema = self.session.get_schema(self.schema_name)
# The savepoint name should be a valid string
self.assertRaises(mysqlx.errors.ProgrammingError,
self.session.set_savepoint, 123)
# The savepoint name should not be an empty string
self.assertRaises(mysqlx.errors.ProgrammingError,
self.session.set_savepoint, "")
# The savepoint name should not be a white space
self.assertRaises(mysqlx.errors.ProgrammingError,
self.session.set_savepoint, " ")
# Invalid rollback savepoint without a started transaction
sp1 = self.session.set_savepoint("sp1")
self.assertRaises(mysqlx.errors.OperationalError,
self.session.rollback_to, sp1)
collection = schema.create_collection(collection_name)
self.session.start_transaction()
collection.add({"_id": "1", "name": "Fred", "age": 21}).execute()
self.assertEqual(1, collection.count())
# Create a savepoint named 'sp2'
sp2 = self.session.set_savepoint("sp2")
self.assertEqual(sp2, "sp2")
collection.add({"_id": "2", "name": "Wilma", "age": 33}).execute()
self.assertEqual(2, collection.count())
# Create a savepoint named 'sp3'
sp3 = self.session.set_savepoint("sp3")
collection.add({"_id": "3", "name": "Betty", "age": 67}).execute()
self.assertEqual(3, collection.count())
# Rollback to 'sp3' savepoint
self.session.rollback_to(sp3)
self.assertEqual(2, collection.count())
# Rollback to 'sp2' savepoint
self.session.rollback_to(sp2)
self.assertEqual(1, collection.count())
# The 'sp3' savepoint should not exist at this point
self.assertRaises(mysqlx.errors.OperationalError,
self.session.rollback_to, sp3)
collection.add({"_id": "4", "name": "Barney", "age": 42}).execute()
self.assertEqual(2, collection.count())
# Create an unnamed savepoint
sp4 = self.session.set_savepoint()
collection.add({"_id": "3", "name": "Wilma", "age": 33}).execute()
self.assertEqual(3, collection.count())
# Release unnamed savepoint
self.session.release_savepoint(sp4)
self.assertEqual(3, collection.count())
# The 'sp4' savepoint should not exist at this point
self.assertRaises(mysqlx.errors.OperationalError,
self.session.rollback_to, sp4)
self.session.commit()
schema.drop_collection(collection_name)
def test_close(self):
session = mysqlx.get_session(self.connect_kwargs)
schema = session.get_schema(self.schema_name)
session.close()
self.assertRaises(mysqlx.OperationalError, schema.exists_in_database)
@unittest.skipIf(sys.version_info < (2, 7, 9), "The support for SSL is "
"not available for Python versions < 2.7.9.")
def test_ssl_connection(self):
config = {}
config.update(self.connect_kwargs)
socket = config.pop("socket")
# Secure by default
session = mysqlx.get_session(config)
res = mysqlx.statement.SqlStatement(session._connection,
"SHOW STATUS LIKE 'Mysqlx_ssl_active'").execute().fetch_all()
self.assertEqual("ON", res[0][1])
res = mysqlx.statement.SqlStatement(session._connection,
"SHOW STATUS LIKE 'Mysqlx_ssl_version'").execute().fetch_all()
self.assertTrue("TLS" in res[0][1])
session.close()
# Error on setting Client key without Client Certificate
config["ssl-key"] = tests.SSL_KEY
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Error on settings CRL without setting CA Certificate
config["ssl-crl"] = "/dummy/path"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
config.pop("ssl-crl")
# Error on setting SSL Mode to disabled with any SSL option
config["ssl-mode"] = "disabled"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Error on setting SSL Mode to verify_* without ssl_ca
config["ssl-mode"] = "verify_ca"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
config["ssl-mode"] = "verify_identity"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Error on SSL Mode set to required with CA set
config["ssl-ca"] = tests.SSL_CA
config["ssl-cert"] = tests.SSL_CERT
config["ssl-mode"] = "required"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Connection with ssl parameters
# Setting an invalid host name against a server certificate
config["host"] = "127.0.0.1"
# Should connect with ssl_mode=False
config["ssl-mode"] = "verify_ca"
session = mysqlx.get_session(config)
res = session.sql(
"SHOW STATUS LIKE 'Mysqlx_ssl_active'").execute().fetch_all()
self.assertEqual("ON", res[0][1])
# Should fail to connect with verify_identity
config["ssl-mode"] = "verify_identity"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Should connect with verify_identitythe and correct host name
config["host"] = "localhost"
config["ssl-mode"] = "verify_identity"
session = mysqlx.get_session(config)
res = session.sql(
"SHOW STATUS LIKE 'Mysqlx_ssl_active'").execute().fetch_all()
self.assertEqual("ON", res[0][1])
res = session.sql(
"SHOW STATUS LIKE 'Mysqlx_ssl_version'").execute().fetch_all()
self.assertTrue("TLS" in res[0][1])
session.close()
# Error if ssl-mode=disabled and ssl_* set
extra = [("ssl_mode", "disabled"),
("ssl_ca", "({0})".format(tests.SSL_CA))]
uri = build_uri(**dict(list(self.connect_kwargs.items()) + extra))
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# Error if invalid ssl-mode
extra = [("ssl_mode", "invalid")]
uri = build_uri(**dict(list(self.connect_kwargs.items()) + extra))
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# Parsing SSL Certificates
extra = [("ssl_mode", "verify_ca"),
("ssl_ca", file_uri(tests.SSL_CA, False)),
("ssl_key", file_uri(tests.SSL_KEY, False)),
("ssl_cert", file_uri(tests.SSL_CERT, False))]
uri = build_uri(**dict(list(self.connect_kwargs.items()) + extra))
session = mysqlx.get_session(uri)
extra = [("ssl_mode", "verify_ca"),
("ssl_ca", file_uri(tests.SSL_CA)),
("ssl_key", file_uri(tests.SSL_KEY)),
("ssl_cert", file_uri(tests.SSL_CERT))]
uri = build_uri(**dict(list(self.connect_kwargs.items()) + extra))
session = mysqlx.get_session(uri)
@unittest.skipIf(sys.version_info < (2, 7, 9), "The support for SSL is "
"not available for Python versions < 2.7.9.")
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 40), "TLSv1.1 incompatible")
def test_get_session_with_tls_version(self):
# Test None value is returned if no schema name is specified
settings = self.connect_kwargs.copy()
settings.pop("schema")
settings.pop("socket")
# Dictionary connection settings tests using dict settings
# Empty tls_version list
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = []
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings)
self.assertTrue(("At least one" in context.exception.msg), "Unexpected "
"exception message found: {}"
"".format(context.exception.msg))
# Empty tls_ciphersuites list using dict settings
settings["tls-ciphersuites"] = []
settings["tls-versions"] = ["TLSv1"]
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings)
self.assertTrue(("No valid cipher suite" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
# Given tls-version not in ["TLSv1.1", "TLSv1.2", "TLSv1.3"]
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = ["TLSv0.2", "TLSv1.7", "TLSv10.2"]
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings)
# Repeated values in tls-versions on dict settings
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = ["TLSv1.2", "TLSv1.1", "TLSv1.2"]
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings)
# Empty tls-versions on dict settings
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = []
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings)
self.assertTrue(("At least one TLS" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
# Verify unkown cipher suite case?
settings["tls-ciphersuites"] = ["NOT-KNOWN"]
settings["tls-versions"] = ["TLSv1.2"]
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings)
# URI string connection settings tests
# Empty tls_version list on URI
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = []
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
self.assertTrue(("At least one" in context.exception.msg), "Unexpected "
"exception message found: {}"
"".format(context.exception.msg))
# Empty tls_ciphersuites list without tls-versions
settings["tls-ciphersuites"] = []
settings.pop("tls-versions")
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
self.assertTrue(("No valid cipher suite" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
# Empty tls_ciphersuites list without tls-versions
settings["tls-ciphersuites"] = ["INVALID"]
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
self.assertTrue(("value 'INVALID' in cipher" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
settings["tls-ciphersuites"] = "INVALID"
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
self.assertTrue(("No valid cipher suite" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
# Invalid value on tls_version list on URI
settings.pop("tls-ciphersuites")
settings["tls-versions"] = "INVALID"
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
self.assertTrue(("tls-version: 'INVALID' is" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
# Empty tls_ciphersuites list
settings["tls-ciphersuites"] = []
settings["tls-versions"] = ["TLSv1"]
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
self.assertTrue(("No valid cipher suite" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
# Given tls-version not in ["TLSv1.1", "TLSv1.2", "TLSv1.3"]
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = ["TLSv0.2", "TLSv1.7", "TLSv10.2"]
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
# Empty tls-versions list
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = []
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
self.assertTrue(("At least one TLS" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
# Repeated values in tls-versions on URI
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = ["TLSv1.2", "TLSv1.1", "TLSv1.2"]
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
# Repeated tls-versions on URI
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = ["TLSv1.2", "TLSv1.3"]
uri_settings = build_uri(**settings)
uri_settings = "{}&{}".format(uri_settings,
"tls-versions=[TLSv1.1,TLSv1.2]")
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
self.assertTrue(("Duplicate option" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
# Verify InterfaceError exception is raised With invalid TLS version
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = ["TLSv8"]
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
self.assertTrue(("not recognized" in context.exception.msg),
"Unexpected exception message found: {}"
"".format(context.exception.msg))
# Verify unkown cipher suite case?
settings["tls-ciphersuites"] = ["NOT-KNOWN"]
settings["tls-versions"] = ["TLSv1.2"]
uri_settings = build_uri(**settings)
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(uri_settings)
# Verify that TLSv1.3 version is accepted (connection success)
# even if it's unsupported.
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = ["TLSv1.3", "TLSv1.2"]
uri_settings = build_uri(**settings)
# Connection must be successfully by including another TLS version
_ = mysqlx.get_session(uri_settings)
supported_tls = check_tls_versions_support(
["TLSv1.2", "TLSv1.1", "TLSv1"])
if not supported_tls:
self.fail("No TLS version to test: {}".format(supported_tls))
if len(supported_tls) > 1:
# Verify given TLS version is used
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
for tes_ver in supported_tls:
settings["tls-versions"] = [tes_ver]
uri_settings = build_uri(**settings)
session = mysqlx.get_session(uri_settings)
status = session.sql("SHOW STATUS LIKE 'Mysqlx_ssl_version%'"
).execute().fetch_all()
for row in status:
if row.get_string("Variable_name") == 'Mysqlx_ssl_version':
self.assertEqual(row.get_string("Value"), tes_ver,
"Unexpected TLS version found: {} for: {}"
"".format(row.get_string("Value"),
tes_ver))
# Following tests requires TLSv1.2
if tests.MYSQL_VERSION < (8, 0, 17):
return
if "TLSv1.1" in supported_tls:
# Verify the newest TLS version is used from the given list
exp_res = ["TLSv1.2", "TLSv1.1", "TLSv1.2"]
test_vers = [["TLSv1", "TLSv1.2", "TLSv1.1"], ["TLSv1", "TLSv1.1"],
["TLSv1.2", "TLSv1"]]
for tes_ver, exp_ver in zip(test_vers, exp_res):
settings["tls-versions"] = tes_ver
uri_settings = build_uri(**settings)
session = mysqlx.get_session(uri_settings)
status = session.sql("SHOW STATUS LIKE 'Mysqlx_ssl_version%'"
).execute().fetch_all()
for row in status:
if row.get_string('Variable_name') == 'Mysqlx_ssl_version':
self.assertEqual(row.get_string('Value'), exp_ver,
"Unexpected TLS version found: {}"
"".format(row.get_string('Value')))
# Verify given TLS cipher suite is used
exp_res = ["DHE-RSA-AES256-SHA256", "DHE-RSA-AES256-SHA256",
"DHE-RSA-AES128-GCM-SHA256"]
test_ciphers = [["TLS_DHE_RSA_WITH_AES_256_CBC_SHA256"],
["DHE-RSA-AES256-SHA256"],
["TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"]]
settings["tls-versions"] = "TLSv1.2"
for test_cipher, exp_ver in zip(test_ciphers, exp_res):
settings["tls-ciphersuites"] = test_cipher
uri_settings = build_uri(**settings)
session = mysqlx.get_session(uri_settings)
status = session.sql("SHOW STATUS LIKE 'Mysqlx_ssl_cipher%'"
).execute().fetch_all()
for row in status:
if row.get_string("Variable_name") == "Mysqlx_ssl_cipher":
self.assertEqual(row.get_string("Value"), exp_ver,
"Unexpected TLS version found: {} for: {}"
"".format(row.get_string("Value"),
test_cipher))
# Verify one of TLS cipher suite is used from the given list
exp_res = ["DHE-RSA-AES256-SHA256", "DHE-RSA-AES256-SHA256",
"DHE-RSA-AES128-GCM-SHA256"]
test_ciphers = ["TLS_DHE_RSA_WITH_AES_256_CBC_SHA256",
"DHE-RSA-AES256-SHA256",
"TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"]
settings["tls-ciphersuites"] = test_ciphers
settings["tls-versions"] = "TLSv1.2"
uri_settings = build_uri(**settings)
session = mysqlx.get_session(uri_settings)
status = session.sql("SHOW STATUS LIKE 'Mysqlx_ssl_cipher%'"
).execute().fetch_all()
for row in status:
if row.get_string("Variable_name") == "Mysqlx_ssl_cipher":
self.assertIn(row.get_string("Value"), exp_res,
"Unexpected TLS version found: {} not in {}"
"".format(row.get_string('Value'), exp_res))
if "TLSv1.1" in supported_tls:
# Verify behavior when "TLSv1.3" is not supported.
if TLS_V1_3_SUPPORTED:
exp_tls_ver = "TLSv1.3"
else:
exp_tls_ver = "TLSv1.2"
# connection success with secundary TLS given version.
settings["tls-ciphersuites"] = ["DHE-RSA-AES256-SHA"]
settings["tls-versions"] = ["TLSv1.3", "TLSv1.2"]
settings_n = 0
for settings_case in [settings, build_uri(**settings)]:
settings_n +=1
session = mysqlx.get_session(settings_case)
status = session.sql("SHOW STATUS LIKE 'Mysqlx_ssl_version%'"
).execute().fetch_all()
for row in status:
if row.get_string('Variable_name') == 'Mysqlx_ssl_version':
self.assertEqual(row.get_string('Value'), exp_tls_ver,
"Unexpected TLS version {} while using settings#{}"
": {}".format(row.get_string('Value'),
settings_n, settings_case))
# Verify error when TLSv1.3 is not supported.
if not TLS_V1_3_SUPPORTED:
settings["tls-versions"] = ["TLSv1.3"]
for settings_case in [settings, build_uri(**settings)]:
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings_case)
def test_disabled_x_protocol(self):
session = mysqlx.get_session(self.connect_kwargs)
res = session.sql("SHOW VARIABLES WHERE Variable_name = 'port'") \
.execute().fetch_all()
settings = self.connect_kwargs.copy()
settings["port"] = res[0][1] # Lets use the MySQL classic port
session.close()
self.assertRaises(ProgrammingError, mysqlx.get_session, settings)
@unittest.skipIf(HAVE_MYSQLXPB_CEXT == False, "C Extension not available")
def test_use_pure(self):
settings = self.connect_kwargs.copy()
settings["use-pure"] = False
session = mysqlx.get_session(settings)
self.assertFalse(session.use_pure)
self.assertEqual(Protobuf.mysqlxpb.__name__, "_mysqlxpb")
session.use_pure = True
self.assertTrue(session.use_pure)
self.assertEqual(Protobuf.mysqlxpb.__name__, "_mysqlxpb_pure")
# 'use_pure' should be a bool type
self.assertRaises(ProgrammingError, setattr, session, "use_pure", -1)
session.close()
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 16), "XPlugin not compatible")
def test_connection_attributes(self):
# Validate an error is raised if URL user defined connection attributes
# given in a list are invalid
invalid_conn_attrs = [2, 1.2, "[_='13']", '[_="1"]', '[_=23]', "[_2.3]",
"[_invalid]", "[valid=0,_]", "[valid=0,_nvalid]",
"[_invalid,valid=0]"]
uri = build_uri(user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
host=self.connect_kwargs["host"],
port=self.connect_kwargs["port"],
schema=self.connect_kwargs["schema"])
for invalid_attr in invalid_conn_attrs:
uri_test = "{}?connection_attributes={}".format(uri, invalid_attr)
with self.assertRaises(InterfaceError) as _:
mysqlx.get_session(uri_test)
LOGGER.error("InterfaceError not raised while testing "
"invalid attribute: {}".format(invalid_attr))
# Validate an error is raised if URL user defined connection attributes
# are not a list or a bool type
invalid_conn_attrs = ["[incompleteL", "incompleteL]", "A", "invalid",
"_invalid", "2", "2.3", "{}", "{invalid=0}",
"{[invalid=0]}", "_", 2, 0.2]
for invalid_attr in invalid_conn_attrs:
uri_test = "{}?connection_attributes={}".format(uri, invalid_attr)
with self.assertRaises(InterfaceError) as _:
mysqlx.get_session(uri_test)
LOGGER.error("InterfaceError not raised while testing "
"invalid attribute: {}".format(invalid_attr))
# Validate an error is raised if URL user defined connection attributes
# through a connection URL when a name is duplicated
connection_attributes = {
"foo": "bar",
"repeated": "attribute",
"baz": "zoom",
}
uri = build_uri(user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
host=self.connect_kwargs["host"],
port=self.connect_kwargs["port"],
schema=self.connect_kwargs["schema"],
connection_attributes=connection_attributes)
uri = "{},repeated=duplicate_attribute]".format(uri[0:-1])
with self.assertRaises(InterfaceError) as context:
mysqlx.get_session(uri)
LOGGER.error("InterfaceError not raised while testing "
"uri: {}".format(uri))
self.assertTrue("Duplicate key 'repeated' used in "
"connection-attributes" in context.exception.msg)
# Test error is raised for attribute name starting with '_'
connection_attributes = [
{"foo": "bar", "_baz": "zoom"},
{"_baz": "zoom"},
{"foo": "bar", "_baz": "zoom", "puuuuum": "kaplot"}
]
for conn_attr in connection_attributes:
connect_kwargs = self.connect_kwargs.copy()
connect_kwargs["connection_attributes"] = conn_attr
with self.assertRaises(InterfaceError) as context:
mysqlx.get_session(connect_kwargs)
LOGGER.error("InterfaceError not raised while testing "
"connect_kwargs: {}".format(connect_kwargs))
self.assertTrue("connection-attributes" in
context.exception.msg)
self.assertTrue("cannot start with '_'" in context.exception.msg)
# Test error is raised for attribute name size exceeds 32 characters
connection_attributes = [
{"foo": "bar", "p{}w".format("o"*31): "kaplot"},
{"p{}w".format("o"*31): "kaplot"},
{"baz": "zoom", "p{}w".format("o"*31): "kaplot", "a": "b"}
]
for conn_attr in connection_attributes:
connect_kwargs = self.connect_kwargs.copy()
connect_kwargs["connection_attributes"] = conn_attr
with self.assertRaises(InterfaceError) as context:
mysqlx.get_session(connect_kwargs)
LOGGER.error("InterfaceError not raised while testing "
"connection_attributes: {}".format(conn_attr))
self.assertTrue("exceeds 32 characters limit size" in
context.exception.msg)
# Test error is raised for attribute value size exceeds 1024 characters
connection_attributes = [
{"foo": "bar", "pum": "kr{}nk".format("u"*1024)},
{"pum": "kr{}nk".format("u"*1024)},
{"baz": "zoom", "pum": "kr{}nk".format("u"*1024), "a": "b"}
]
for conn_attr in connection_attributes:
connect_kwargs = self.connect_kwargs.copy()
connect_kwargs["connection-attributes"] = conn_attr
with self.assertRaises(InterfaceError) as context:
mysqlx.get_session(connect_kwargs)
LOGGER.error("InterfaceError not raised while testing "
"connection_attributes: {}".format(conn_attr))
self.assertTrue("exceeds 1024 characters limit size" in
context.exception.msg)
# Test valid generic values for the connection-attributes on URI
valid_conn_attrs = ["[]", "False", "True", "false", "true", "[valid]",
"[valid=0]", "[valid,valid2=0]", '["_valid=0]',
"[valid2='0']", "[valid=,valid2=0]", "['_valid=0]",
"[[_valid=0]]"]
uri = build_uri(user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
host=self.connect_kwargs["host"],
port=self.connect_kwargs["port"],
schema=self.connect_kwargs["schema"])
for valid_attr in valid_conn_attrs:
uri_test = "{}?connection_attributes={}".format(uri, valid_attr)
mysqlx.get_session(uri_test)
# Test valid generic values when passing a dict with connection data
valid_conn_attrs = [{}, "False", "True", "false", "true", {"valid": ""},
{"valid": None}, {"valid1": 1}, True, False, 1, 0,
[], ['a1=2', 'a3'], {"valid"}, {"foo", "bar"}]
for conn_attr in valid_conn_attrs:
connect_kwargs = self.connect_kwargs.copy()
connect_kwargs["connection_attributes"] = conn_attr
mysqlx.get_session(connect_kwargs)
# Test invalid generic values when passing a dict with connection data
invalid_conn_attrs = [{1:"1"}, {1:2}, {"_invalid":""}, {"_": ""},
123, 123.456, None, {"_invalid"}, ['_a1=2',]]
for conn_attr in invalid_conn_attrs:
connect_kwargs = self.connect_kwargs.copy()
connect_kwargs["connection_attributes"] = conn_attr
with self.assertRaises(InterfaceError) as context:
mysqlx.get_session(connect_kwargs)
LOGGER.error("InterfaceError not raised while testing "
"connection_attributes: {}".format(conn_attr))
# Validate the user defined attributes are created in the server
# Test user defined connection attributes through a connection URL
connection_attributes = {
"foo": "bar",
"baz": "zoom",
"quash": "",
"puuuuum": "kaplot"
}
uri = build_uri(user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
host=self.connect_kwargs["host"],
port=self.connect_kwargs["port"],
schema=self.connect_kwargs["schema"],
connection_attributes=connection_attributes)
# Verify user defined session-connection-attributes are in the server
my_session = mysqlx.get_session(uri)
row = my_session.sql("SHOW VARIABLES LIKE \"pseudo_thread_id\"").\
execute().fetch_all()[0]
get_attrs = ("SELECT ATTR_NAME, ATTR_VALUE FROM "
"performance_schema.session_account_connect_attrs "
"where PROCESSLIST_ID = \"{}\"")
rows = my_session.sql(get_attrs.format(row.get_string('Value'))).\
execute().fetch_all()
expected_attrs = connection_attributes.copy()
expected_attrs.update({
"_pid": str(os.getpid()),
"_platform": self.platform_arch,
"_source_host": socket.gethostname(),
"_client_name": "mysql-connector-python",
"_client_license": self.client_license,
"_client_version": ".".join([str(x) for x in VERSION[0:3]]),
"_os": self.os_ver
})
# Note that for an empty string "" value the server stores a Null value
expected_attrs["quash"] = "None"
for row in rows:
self.assertEqual(expected_attrs[row.get_string('ATTR_NAME')],
row.get_string('ATTR_VALUE'),
"Attribute {} with value {} differs of {}".format(
row.get_string('ATTR_NAME'),
row.get_string('ATTR_VALUE'),
expected_attrs[row.get_string('ATTR_NAME')]))
# Verify connection-attributes can be skiped to be set on server
# by URI as "connection_attributes"=false
uri = build_uri(user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
host=self.connect_kwargs["host"],
port=self.connect_kwargs["port"],
schema=self.connect_kwargs["schema"],
connection_attributes="false")
my_session = mysqlx.get_session(uri)
row = my_session.sql("SHOW VARIABLES LIKE \"pseudo_thread_id\"").\
execute().fetch_all()[0]
get_attrs = ("SELECT ATTR_NAME, ATTR_VALUE FROM "
"performance_schema.session_account_connect_attrs "
"where PROCESSLIST_ID = \"{}\"")
rows = my_session.sql(get_attrs.format(row.get_string('Value'))).\
execute().fetch_all()
self.assertEqual(len(rows), 0, "connection attributes where created "
"while was specified to not do so: {}".format(rows))
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 19),
"MySQL 8.0.19+ is required for DNS SRV")
@unittest.skipIf(not HAVE_DNSPYTHON,
"dnspython module is required for DNS SRV")
def test_dns_srv(self):
# The value of 'dns-srv' must be a boolean
uri = "root:@localhost/myschema?dns-srv=invalid"
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
config = {"host": "localhost", "user": "root", "dns-srv": 0}
self.assertRaises(InterfaceError, mysqlx.get_session, config)
config = {"host": "localhost", "user": "root", "dns-srv": 1}
self.assertRaises(InterfaceError, mysqlx.get_session, config)
config = {"host": "localhost", "user": "root", "dns-srv": None}
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Using Unix domain sockets with DNS SRV lookup is not allowed
uri = "mysqlx+srv://root:@localhost/myschema?socket=/tmp/mysql.sock"
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# Specifying a port number with DNS SRV lookup is not allowed
uri = "mysqlx+srv://root:@localhost:33060/myschema"
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# Specifying multiple hostnames with DNS SRV look up is not allowed
uri = "mysqlx+srv://root:@[host1, host2, host3]/myschema"
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# The option 'dns-srv' is now allowed in connection string options
uri = "mysqlx+srv://root:@localhost/myschema?dns-srv=true"
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 14), "XPlugin not compatible")
class MySQLxInnitialNoticeTests(tests.MySQLxTests):
def setUp(self):
self.connect_kwargs = tests.get_mysqlx_config()
self.settings = {
"user": "root",
"password": "",
"host": "localhost",
"ssl-mode": "disabled",
"use_pure": True
}
def _server_thread(self, host="localhost", port=33061, notice=1):
stream = ServerSocketStream()
stream.start_receive(host, port)
reader_writer = MessageReaderWriter(stream)
protocol = ServerProtocol(reader_writer)
# Read message header
hdr = stream.read(5)
msg_len, msg_type = struct.unpack("<LB", hdr)
self.assertEqual(msg_type, 4)
# Read payload
_ = stream.read(msg_len - 1)
# send handshake
if notice == 1:
# send empty notice
stream.sendall(b"\x01\x00\x00\x00\x0b")
else:
# send notice frame with explicit default
stream.sendall(b"\x03\x00\x00\x00\x0b\x08\x01")
#stream.sendall(b"\x01\x00\x00\x00\x0b")
# send auth start")
protocol.send_auth_continue_server("00000000000000000000")
# Capabilities are not check for ssl-mode: disabled
# Reading auth_continue from client
hdr = stream.read(5)
msg_len, msg_type = struct.unpack("<LB", hdr)
self.assertEqual(msg_type, 5)
# Read payload
_ = stream.read(msg_len - 1)
# Send auth_ok
protocol.send_auth_ok()
# Read message
hdr = stream.read(5)
msg_len, msg_type = struct.unpack("<LB", hdr)
self.assertEqual(msg_type, 12)
# Read payload
_ = stream.read(msg_len - 1)
# send empty notice
if notice == 1:
# send empty notice
stream.sendall(b"\x01\x00\x00\x00\x0b")
else:
# send notice frame with explicit default
stream.sendall(b"\x03\x00\x00\x00\x0b\x08\x01")
# msg_type: 12 Mysqlx.Resultset.ColumnMetaData
stream.sendall(b"\x31\x00\x00\x00\x0c"
b"\x08\x07\x12\x08\x44\x61\x74\x61\x62\x61\x73"
b"\x65\x1a\x08\x44\x61\x74\x61\x62\x61\x73\x65"
b"\x22\x08\x53\x43\x48\x45\x4d\x41\x54\x41\x2a"
b"\x00\x32\x00\x3a\x03\x64\x65\x66\x40\x4c\x50"
b"\xc0\x01\x58\x10")
# send unexpected notice
if notice == 1:
# send empty notice
stream.sendall(b"\x01\x00\x00\x00\x0b")
else:
# send notice frame with explicit default
stream.sendall(b"\x03\x00\x00\x00\x0b\x08\x01")
# msg_type: 13 Mysqlx.Resultset.Row
stream.sendall(b"\x16\x00\x00\x00\x0d"
b"\x0a\x13\x69\x6e\x66\x6f\x72\x6d\x61\x74\x69"
b"\x6f\x6e\x5f\x73\x63\x68\x65\x6d\x61\x00")
# msg_type: 14 Mysqlx.Resultset.FetchDone
stream.sendall(b"\x01\x00\x00\x00\x0e")
# msg_type: 11 Mysqlx.Notice.Frame
stream.sendall(b"\x0f\x00\x00\x00\x0b\x08\x03\x10\x02\x1a\x08\x08"
b"\x04\x12\x04\x08\x02\x18\x00")
# send unexpected notice
if notice == 1:
# send empty notice
stream.sendall(b"\x01\x00\x00\x00\x0b")
else:
# send notice frame with explicit default
stream.sendall(b"\x03\x00\x00\x00\x0b\x08\x01")
# msg_type: 17 Mysqlx.Sql.StmtExecuteOk
stream.sendall(b"\x01\x00\x00\x00\x11")
# Read message
hdr = stream.read(5)
msg_len, msg_type = struct.unpack("<LB", hdr)
self.assertEqual(msg_type, 7)
# Read payload
_ = stream.read(msg_len - 1)
stream.sendall(b"\x07\x00\x00\x00\x00\n\x04bye!")
# Close socket
stream.close()
@unittest.skipIf(HAVE_MYSQLXPB_CEXT == False, "C Extension not available")
def test_initial_empty_notice_cext(self):
connect_kwargs = self.connect_kwargs.copy()
host = "localhost"
port = connect_kwargs["port"] + 10
worker1 = Thread(target=self._server_thread, args=[host, port, 1])
worker1.daemon = True
worker1.start()
sleep(1)
settings = self.settings.copy()
settings["port"] = port
settings["use_pure"] = False
session = mysqlx.get_session(settings)
rows = session.sql("show databases").execute().fetch_all()
self.assertEqual(rows[0][0], "information_schema")
session.close()
def test_initial_empty_notice_pure(self):
connect_kwargs = self.connect_kwargs.copy()
host = "localhost"
port = connect_kwargs["port"] + 20
worker2 = Thread(target=self._server_thread, args=[host, port, 1])
worker2.daemon = True
worker2.start()
sleep(2)
settings = self.settings.copy()
settings["port"] = port
settings["use_pure"] = True
session = mysqlx.get_session(settings)
rows = session.sql("show databases").execute().fetch_all()
self.assertEqual(rows[0][0], "information_schema")
session.close()
@unittest.skipIf(HAVE_MYSQLXPB_CEXT == False, "C Extension not available")
def test_initial_notice_cext(self):
connect_kwargs = self.connect_kwargs.copy()
host = "localhost"
port = connect_kwargs["port"] + 11
worker1 = Thread(target=self._server_thread, args=[host, port, 2])
worker1.daemon = True
worker1.start()
sleep(1)
settings = self.settings.copy()
settings["port"] = port
settings["use_pure"] = False
session = mysqlx.get_session(settings)
rows = session.sql("show databases").execute().fetch_all()
self.assertEqual(rows[0][0], "information_schema")
session.close()
def test_initial_notice_pure(self):
connect_kwargs = self.connect_kwargs.copy()
host = "localhost"
port = connect_kwargs["port"] + 21
worker2 = Thread(target=self._server_thread, args=[host, port, 2])
worker2.daemon = True
worker2.start()
sleep(2)
settings = self.settings.copy()
settings["port"] = port
settings["use_pure"] = True
session = mysqlx.get_session(settings)
rows = session.sql("show databases").execute().fetch_all()
self.assertEqual(rows[0][0], "information_schema")
session.close()
|
Test (1).py | #!/usr/bin/env python3
# coding:utf-8
import cv2
import math
import numpy as np
import threading
import time
import datetime
import CMDcontrol
robot_IP = "192.168.1.102"
camera_out = "chest"
stream_pic = True
action_DEBUG = False
#################################################初始化#########################################################
if stream_pic:
stream_head = "http://" + robot_IP + ":8082/?action=stream?dummy=param.mjpg"
cap_head = cv2.VideoCapture(stream_head)
stream_chest = "http://" + robot_IP + ":8080/?action=stream?dummy=param.mjpg"
cap_chest = cv2.VideoCapture(stream_chest)
else:
cap_chest = cv2.VideoCapture(0)
cap_head = cv2.VideoCapture(2)
box_debug = False
debug = False
img_debug = False
state = 1
step = 0
state_sel = 'hole'
reset = 0
skip = 0
chest_ret = False # 读取图像标志位
ret = False # 读取图像标志位
ChestOrg_img = None # 原始图像更新
HeadOrg_img = None # 原始图像更新
ChestOrg_copy = None
HeadOrg_copy = None
r_width = 480
r_height = 640
chest_r_width = 480
chest_r_height = 640
head_r_width = 640
head_r_height = 480
################################################读取图像线程#################################################
def get_img():
global ChestOrg_img, HeadOrg_img, HeadOrg_img, chest_ret
global ret
global cap_chest
while True:
if cap_chest.isOpened():
chest_ret, ChestOrg_img = cap_chest.read()
ret, HeadOrg_img = cap_head.read()
if (chest_ret == False) or (ret == False):
print("ret faile ------------------")
if HeadOrg_img is None:
print("HeadOrg_img error")
if ChestOrg_img is None:
print("ChestOrg_img error")
else:
time.sleep(1)
ret = True
print("pic error ")
# 读取图像线程
th1 = threading.Thread(target=get_img)
th1.setDaemon(True)
th1.start()
################################################动作执行线程#################################################
def move_action():
global org_img
global step, level
global golf_angle_hole
global golf_angle_ball, golf_angle
global golf_dis, golf_dis_y
global golf_angle_flag, golf_dis_flag
global golf_angle_start, golf_dis_start
global golf_ok
global golf_hole, golf_ball
CMDcontrol.CMD_transfer()
# 动作执行线程
th2 = threading.Thread(target=move_action)
th2.setDaemon(True)
th2.start()
acted_name = ""
def action_append(act_name):
global acted_name
# print("please enter to continue...")
# cv2.waitKey(0)
if action_DEBUG == False:
if act_name == "forwardSlow0403" and (acted_name == "Forwalk02RL" or acted_name == "Forwalk02L"):
acted_name = "Forwalk02LR"
elif act_name == "forwardSlow0403" and (acted_name == "Forwalk02LR" or acted_name == "Forwalk02R"):
acted_name = "Forwalk02RL"
elif act_name != "forwardSlow0403" and (acted_name == "Forwalk02LR" or acted_name == "Forwalk02R"):
# CMDcontrol.action_list.append("Forwalk02RS")
# acted_name = act_name
print(act_name, "动作未执行 执行 Stand")
acted_name = "Forwalk02RS"
elif act_name != "forwardSlow0403" and (acted_name == "Forwalk02RL" or acted_name == "Forwalk02L"):
# CMDcontrol.action_list.append("Forwalk02LS")
# acted_name = act_name
print(act_name, "动作未执行 执行 Stand")
acted_name = "Forwalk02LS"
elif act_name == "forwardSlow0403":
acted_name = "Forwalk02R"
else:
acted_name = act_name
CMDcontrol.actionComplete = False
if len(CMDcontrol.action_list) > 0:
print("队列超过一个动作")
CMDcontrol.action_list.append(acted_name)
else:
CMDcontrol.action_list.append(acted_name)
CMDcontrol.action_wait()
else:
print("-----------------------执行动作名:", act_name)
time.sleep(2)
color_range = {
'yellow_door': [(20, 140, 60), (40, 240, 150)],
'black_door': [(25, 25, 10), (110, 150, 30)],
'black_gap': [(0, 0, 0), (180, 255, 70)],
'yellow_hole': [(20, 120, 95), (30, 250, 190)],
'black_hole': [(5, 80, 20), (40, 255, 100)],
'chest_red_floor': [(0, 40, 60), (20, 200, 190)],
'chest_red_floor1': [(0, 100, 60), (20, 200, 210)],
'chest_red_floor2': [(110, 100, 60), (180, 200, 210)],
'green_bridge': [(50, 75, 70), (80, 240, 210)],
'grey_ground': [(30, 0, 0), (180, 100, 150)],
'blue': [(100, 80, 46), (124, 255, 255)],
'white': [(0, 0, 221), (180, 30, 255)],
}
color_dist = {'red': {'Lower': np.array([0, 160, 100]), 'Upper': np.array([180, 255, 250])},
'black_dir': {'Lower': np.array([0, 0, 10]), 'Upper': np.array([170, 170, 45])},
'black_line': {'Lower': np.array([0, 0, 20]), 'Upper': np.array([100, 160, 80])},
'blue': {'Lower': np.array([100, 80, 46]), 'Upper': np.array([124, 255, 255])},
'ball_red': {'Lower': np.array([160, 100, 70]), 'Upper': np.array([190, 215, 145])},
'blue_hole': {'Lower': np.array([100, 130, 80]), 'Upper': np.array([130, 255, 150])},
}
###############得到线形的总的轮廓###############
# 这个比值适应调整 handling
# 排除掉肩部黑色
def getLine_SumContour(contours, area=1):
global handling
contours_sum = None
for c in contours: # 初始化 contours_sum
area_temp = math.fabs(cv2.contourArea(c))
rect = cv2.minAreaRect(c) # 最小外接矩形
box = np.int0(cv2.boxPoints(rect)) # 最小外接矩形的四个顶点
edge1 = math.sqrt(math.pow(box[3, 1] - box[0, 1], 2) + math.pow(box[3, 0] - box[0, 0], 2))
edge2 = math.sqrt(math.pow(box[3, 1] - box[2, 1], 2) + math.pow(box[3, 0] - box[2, 0], 2))
ratio = edge1 / edge2 # 长与宽的比值大于3认为是条线
center_y = (box[0, 1] + box[1, 1] + box[2, 1] + box[3, 1]) / 4
if (area_temp > area) and (ratio > 3 or ratio < 0.33) and center_y > 240:
contours_sum = c
break
for c in contours:
area_temp = math.fabs(cv2.contourArea(c))
rect = cv2.minAreaRect(c) # 最小外接矩形
box = np.int0(cv2.boxPoints(rect)) # 最小外接矩形的四个顶点
edge1 = math.sqrt(math.pow(box[3, 1] - box[0, 1], 2) + math.pow(box[3, 0] - box[0, 0], 2))
edge2 = math.sqrt(math.pow(box[3, 1] - box[2, 1], 2) + math.pow(box[3, 0] - box[2, 0], 2))
ratio = edge1 / edge2
# print("ratio:",ratio,"area_temp:",area_temp)
if (area_temp > area) and (ratio > 3 or ratio < 0.33): # 满足面积条件 长宽比条件
rect = cv2.minAreaRect(c) # 最小外接矩形
box = np.int0(cv2.boxPoints(rect)) # 最小外接矩形的四个顶点
center_x = (box[0, 0] + box[1, 0] + box[2, 0] + box[3, 0]) / 4
center_y = (box[0, 1] + box[1, 1] + box[2, 1] + box[3, 1]) / 4
if center_y > 240: # 满足中心点坐标条件
contours_sum = np.concatenate((contours_sum, c), axis=0) # 将所有轮廓点拼接到一起
if box_debug:
cv2.drawContours(handling, [box], -1, (0, 255, 0), 5)
cv2.imshow('handling', handling)
cv2.waitKey(10)
else:
if box_debug:
cv2.drawContours(handling, [box], -1, (0, 0, 255), 5)
cv2.imshow('handling', handling)
cv2.waitKey(10)
else: # 弃
rect = cv2.minAreaRect(c) # 最小外接矩形
box = np.int0(cv2.boxPoints(rect)) # 最小外接矩形的四个顶点
if box_debug:
cv2.drawContours(handling, [box], -1, (0, 0, 255), 5)
cv2.imshow('handling', handling)
cv2.waitKey(10)
return contours_sum
# 得到最大轮廓和对应的最大面积
def getAreaMaxContour1(contours): # 返回轮廓 和 轮廓面积
contour_area_temp = 0
contour_area_max = 0
area_max_contour = None
for c in contours: # 历遍所有轮廓
contour_area_temp = math.fabs(cv2.contourArea(c)) # 计算轮廓面积
if contour_area_temp > contour_area_max:
contour_area_max = contour_area_temp
if contour_area_temp > 25: # 只有在面积大于25时,最大面积的轮廓才是有效的,以过滤干扰
area_max_contour = c
return area_max_contour, contour_area_max # 返回最大的轮廓
########得到最大轮廓############
def getAreaMaxContour2(contours, area=1):
contour_area_max = 0
area_max_contour = None
for c in contours:
contour_area_temp = math.fabs(cv2.contourArea(c))
if contour_area_temp > contour_area_max:
contour_area_max = contour_area_temp
if contour_area_temp > area: # 面积大于1
area_max_contour = c
return area_max_contour
# 将所有面积大于1的轮廓点拼接到一起
def getSumContour(contours, area=1):
contours_sum = None
# print(len(contours))
for c in contours: # 初始化contours
area_temp = math.fabs(cv2.contourArea(c))
if (area_temp > area):
contours_sum = c
break
for c in contours:
area_temp = math.fabs(cv2.contourArea(c))
if (area_temp > area):
contours_sum = np.concatenate((contours_sum, c), axis=0) # 将所有面积大于1的轮廓点拼接到一起
return contours_sum
######### 得到所有轮廓的面积##########
def getAreaSumContour(contours):
contour_area_sum = 0
for c in contours: # 历遍所有轮廓
contour_area_sum += math.fabs(cv2.contourArea(c)) # 计算轮廓面积
return contour_area_sum # 返回最大的面积
# 通过两边的黑线,调整左右位置 和 角度
def head_angle_dis():
global HeadOrg_img, chest_copy, reset, skip
global handling
angle_ok_flag = False
angle = 90
dis = 0
bottom_centreX = 0
bottom_centreY = 0
see = False
dis_ok_count = 0
headTURN = 0
step = 1
print("/-/-/-/-/-/-/-/-/-head*angle*dis")
while True:
OrgFrame = HeadOrg_img.copy()
x_start = 260
blobs = OrgFrame[int(0):int(480), int(x_start):int(380)] # 只对中间部分识别处理 Y , X
# cv2.rectangle(blobs,(0,460),(120,480),(255,255,255),-1) # 涂白
handling = blobs.copy()
frame_mask = blobs.copy()
# 获取图像中心点坐标x, y
center = []
# 开始处理图像
hsv = cv2.cvtColor(frame_mask, cv2.COLOR_BGR2HSV)
hsv = cv2.GaussianBlur(hsv, (3, 3), 0)
Imask = cv2.inRange(hsv, color_range['grey_ground'][0], color_range['grey_ground'][1])
# Imask = cv2.erode(Imask, None, iterations=1)
Imask = cv2.dilate(Imask, np.ones((3, 3), np.uint8), iterations=2)
_, cnts, hierarchy = cv2.findContours(Imask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) # 找出所有轮廓
# print("327L len:",len(cnts))
cnt_sum = getLine_SumContour(cnts, area=300)
# 初始化
L_R_angle = 0
blackLine_L = [0, 0]
blackLine_R = [0, 0]
if cnt_sum is not None:
see = True
rect = cv2.minAreaRect(cnt_sum) # 最小外接矩形
box = np.int0(cv2.boxPoints(rect)) # 最小外接矩形的四个顶点
# cv2.drawContours(OrgFrame, [box], 0, (0, 255, 0), 2) # 将大矩形画在图上
if math.sqrt(math.pow(box[3, 1] - box[0, 1], 2) + math.pow(box[3, 0] - box[0, 0], 2)) > math.sqrt(
math.pow(box[3, 1] - box[2, 1], 2) + math.pow(box[3, 0] - box[2, 0], 2)):
if box[3, 0] - box[0, 0] == 0:
angle = 90
else:
angle = - math.atan((box[3, 1] - box[0, 1]) / (box[3, 0] - box[0, 0])) * 180.0 / math.pi
if box[3, 1] + box[0, 1] > box[2, 1] + box[1, 1]:
Ycenter = int((box[2, 1] + box[1, 1]) / 2)
Xcenter = int((box[2, 0] + box[1, 0]) / 2)
if box[2, 1] > box[1, 1]:
blackLine_L = [box[2, 0], box[2, 1]]
blackLine_R = [box[1, 0], box[1, 1]]
else:
blackLine_L = [box[1, 0], box[1, 1]]
blackLine_R = [box[2, 0], box[2, 1]]
cv2.circle(OrgFrame, (Xcenter + x_start, Ycenter), 10, (255, 255, 0), -1) # 画出中心点
else:
Ycenter = int((box[3, 1] + box[0, 1]) / 2)
Xcenter = int((box[3, 0] + box[0, 0]) / 2)
if box[3, 1] > box[0, 1]:
blackLine_L = [box[3, 0], box[3, 1]]
blackLine_R = [box[0, 0], box[0, 1]]
else:
blackLine_L = [box[0, 0], box[0, 1]]
blackLine_R = [box[3, 0], box[3, 1]]
cv2.circle(OrgFrame, (Xcenter + x_start, Ycenter), 10, (255, 255, 0), -1) # 画出中心点
else:
if box[3, 0] - box[2, 0] == 0:
angle = 90
else:
angle = - math.atan(
(box[3, 1] - box[2, 1]) / (box[3, 0] - box[2, 0])) * 180.0 / math.pi # 负号是因为坐标原点的问题
if box[3, 1] + box[2, 1] > box[0, 1] + box[1, 1]:
Ycenter = int((box[1, 1] + box[0, 1]) / 2)
Xcenter = int((box[1, 0] + box[0, 0]) / 2)
if box[0, 1] > box[1, 1]:
blackLine_L = [box[0, 0], box[0, 1]]
blackLine_R = [box[1, 0], box[1, 1]]
else:
blackLine_L = [box[1, 0], box[1, 1]]
blackLine_R = [box[0, 0], box[0, 1]]
cv2.circle(OrgFrame, (Xcenter + x_start, Ycenter), 10, (255, 255, 0), -1) # 画出中心点
else:
Ycenter = int((box[2, 1] + box[3, 1]) / 2)
Xcenter = int((box[2, 0] + box[3, 0]) / 2)
if box[3, 1] > box[2, 1]:
blackLine_L = [box[3, 0], box[3, 1]]
blackLine_R = [box[2, 0], box[2, 1]]
else:
blackLine_L = [box[2, 0], box[2, 1]]
blackLine_R = [box[3, 0], box[3, 1]]
cv2.circle(OrgFrame, (Xcenter + x_start, Ycenter), 10, (255, 255, 0), -1) # 画出中心点
if blackLine_L[0] == blackLine_R[0]:
L_R_angle = 0
else:
L_R_angle = -math.atan(
(blackLine_L[1] - blackLine_R[1]) / (blackLine_L[0] - blackLine_R[0])) * 180.0 / math.pi
if img_debug:
cv2.circle(OrgFrame, (blackLine_L[0] + x_start, blackLine_L[1]), 5, [0, 255, 255], 2)
cv2.circle(OrgFrame, (blackLine_R[0] + x_start, blackLine_R[1]), 5, [255, 0, 255], 2)
cv2.line(OrgFrame, (blackLine_R[0] + x_start, blackLine_R[1]),
(blackLine_L[0] + x_start, blackLine_L[1]), (0, 255, 255), thickness=2)
cv2.putText(OrgFrame, "L_R_angle:" + str(L_R_angle), (10, OrgFrame.shape[0] - 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(OrgFrame, "Xcenter:" + str(Xcenter + x_start), (10, OrgFrame.shape[0] - 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(OrgFrame, "Ycenter:" + str(Ycenter), (200, OrgFrame.shape[0] - 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
# cv2.drawContours(frame_mask, cnt_sum, -1, (255, 0, 255), 3)
# cv2.imshow('frame_mask', frame_mask)
cv2.imshow('black', Imask)
cv2.imshow('OrgFrame', OrgFrame)
cv2.waitKey(10)
else:
see = False
# 决策执行动作
if step == 1:
print("157L 向右看 HeadTurn015")
action_append("HeadTurn015")
time.sleep(1) # timefftest
step = 2
elif step == 2:
if not see: # not see the edge
print("276L 右侧看不到黑线 左侧移 Left3move")
action_append("Left3move")
headTURN += 1
if headTURN > 3:
headTURN = 0
print("276L 右侧看不到黑线 转为左看 waitKey")
step = 3
else: # 0
headTURN = 0
if L_R_angle > 2:
if L_R_angle > 7:
print("416L 左da旋转 turn001L ", L_R_angle)
action_append("turn001L")
# elif L_R_angle > 5:
# print("419L 左da旋转 turn001L ",L_R_angle)
# action_append("turn001L")
else:
print("422L 左旋转 turn000L ", L_R_angle)
action_append("turn000L")
# time.sleep(1) # timefftest
elif L_R_angle < -2:
if L_R_angle < -7:
print("434L 右da旋转 turn001R ", L_R_angle)
action_append("turn001R")
# elif L_R_angle < -5:
# print("437L 右da旋转 turn001R ",L_R_angle)
# action_append("turn001R")
else:
print("461L 右旋转 turn000R ", L_R_angle)
action_append("turn000R")
# time.sleep(1) # timefftest
elif Ycenter >= 430:
if Ycenter > 450:
print("451L 左da侧移 Left3move >440 ", Ycenter)
action_append("Left3move")
else:
print("439L 左侧移 Left02move > 365 ", Ycenter)
action_append("Left02move")
elif Ycenter < 390:
if Ycenter < 370:
print("474L 右da侧移 Right3move <380 ", Ycenter)
action_append("Right3move")
else:
print("448L 右侧移 Right02move <400 ", Ycenter)
action_append("Right02move")
else:
dis_ok_count
print("444L 右看 X位置ok")
cv2.destroyAllWindows()
break
elif step == 3:
print("157L 向左看 HeadTurn180")
action_append("HeadTurn180")
time.sleep(1) # timefftest
step = 4
elif step == 4:
if not see: # not see the edge
print("294L 左侧 看不到黑线 转为右看")
headTURN += 1
if headTURN > 5:
headTURN = 0
print("error 两侧都看不到 右侧移 Right3move")
action_append("Right3move")
else: # 0 +-1
headTURN = 0
if L_R_angle > 3:
if L_R_angle > 8:
print("304L 左da旋转 turn001L ", L_R_angle)
action_append("turn001L")
else:
print("304L 左旋转 turn000L ", L_R_angle)
action_append("turn000L")
# time.sleep(1) # timefftest
elif L_R_angle < -3:
if L_R_angle < -8:
print("307L 右da旋转 turn001R ", L_R_angle)
action_append("turn001R")
else:
print("307L 右旋转 turn000R ", L_R_angle)
action_append("turn000R")
# time.sleep(1) # timefftest
elif Ycenter >= 430:
if Ycenter > 450:
print("498L 右da侧移 Right3move ", L_R_angle)
action_append("Right3move")
else:
print("501L 右侧移 Right02move ", L_R_angle)
action_append("Right02move")
elif Ycenter < 390:
if Ycenter < 370:
print("497L 左da侧移 Left3move ", L_R_angle)
action_append("Left02move")
else:
print("500L 左侧移 Left02move ", L_R_angle)
action_append("Left02move")
else:
dis_ok_count
print("495L 左看 X位置ok")
cv2.destroyAllWindows()
break
#################################################第二关:台阶##########################################
def floor():
global org_img, state, state_sel, step, reset, skip, debug
global camera_out
if (state == 2 or state == 6 or state == 8) and state_sel == 'floor': # 初始化
print("/-/-/-/-/-/-/-/-/-进入floor")
step = 0
r_w = chest_r_width
r_h = chest_r_height
top_angle = 0
T_B_angle = 0
topcenter_x = 0.5 * r_w
topcenter_y = 0
bottomcenter_x = 0.5 * r_w
bottomcenter_y = 0
state_sel = 'floor'
while state_sel == 'floor':
# 分析图像 # chest
if True: # 上下边沿
Corg_img = ChestOrg_img.copy()
Corg_img = np.rot90(Corg_img)
OrgFrame = Corg_img.copy()
# 初始化 bottom_right bottom_left
bottom_right = (480, 0)
bottom_left = (0, 0)
top_right = (480, 0) # 右上角点坐标
top_left = (0, 0) # 左上角点坐标
frame = cv2.resize(OrgFrame, (chest_r_width, chest_r_height), interpolation=cv2.INTER_LINEAR)
frame_copy = frame.copy()
# 获取图像中心点坐标x, y
center = []
# 开始处理图像
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hsv = cv2.GaussianBlur(hsv, (3, 3), 0)
Imask = cv2.inRange(hsv, color_range['chest_red_floor1'][0],
color_range['chest_red_floor1'][1]) # 对原图像和掩模(颜色的字典)进行位运算
# opened = cv2.morphologyEx(Imask, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8)) # 开运算 去噪点
# Imask = cv2.morphologyEx(opened, cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8)) # 闭运算 封闭连接
# Imask = cv2.erode(Imask, None, iterations=2)
Imask = cv2.dilate(Imask, np.ones((3, 3), np.uint8), iterations=2)
_, cnts, hierarchy = cv2.findContours(Imask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) # 找出所有轮廓
cnt_sum, area_max = getAreaMaxContour1(cnts) # 找出最大轮廓
C_percent = round(area_max * 100 / (r_w * r_h), 2) # 最大轮廓百分比
cv2.drawContours(frame, cnt_sum, -1, (255, 0, 255), 3)
if cnt_sum is not None:
see = True
rect = cv2.minAreaRect(cnt_sum) # 最小外接矩形
box = np.int0(cv2.boxPoints(rect)) # 最小外接矩形的四个顶点
bottom_right = cnt_sum[0][0] # 右下角点坐标
bottom_left = cnt_sum[0][0] # 左下角点坐标
top_right = cnt_sum[0][0] # 右上角点坐标
top_left = cnt_sum[0][0] # 左上角点坐标
for c in cnt_sum:
if c[0][0] + 1 * (r_h - c[0][1]) < bottom_left[0] + 1 * (r_h - bottom_left[1]):
bottom_left = c[0]
if c[0][0] + 1 * c[0][1] > bottom_right[0] + 1 * bottom_right[1]:
bottom_right = c[0]
if c[0][0] + 3 * c[0][1] < top_left[0] + 3 * top_left[1]:
top_left = c[0]
if (r_w - c[0][0]) + 3 * c[0][1] < (r_w - top_right[0]) + 3 * top_right[1]:
top_right = c[0]
# if debug:
# handling = ChestOrg_img.copy()
# cv2.circle(handling, (c[0][0], c[0][1]), 5, [0, 255, 0], 2)
# cv2.circle(handling, (bottom_left[0], bottom_left[1]), 5, [255, 255, 0], 2)
# cv2.circle(handling, (bottom_right[0], bottom_right[1]), 5, [255, 0, 255], 2)
# cv2.imshow('handling', handling) # 显示图像
# cv2.waitKey(2)
bottomcenter_x = (bottom_left[0] + bottom_right[0]) / 2 # 得到bottom中心坐标
bottomcenter_y = (bottom_left[1] + bottom_right[1]) / 2
topcenter_x = (top_right[0] + top_left[0]) / 2 # 得到top中心坐标
topcenter_y = (top_left[1] + top_right[1]) / 2
bottom_angle = -math.atan(
(bottom_right[1] - bottom_left[1]) / (bottom_right[0] - bottom_left[0])) * 180.0 / math.pi
top_angle = -math.atan((top_right[1] - top_left[1]) / (top_right[0] - top_left[0])) * 180.0 / math.pi
if math.fabs(topcenter_x - bottomcenter_x) <= 1: # 得到连线的角度
T_B_angle = 90
else:
T_B_angle = - math.atan(
(topcenter_y - bottomcenter_y) / (topcenter_x - bottomcenter_x)) * 180.0 / math.pi
if img_debug:
cv2.drawContours(frame_copy, [box], 0, (0, 255, 0), 2) # 将大矩形画在图上
cv2.line(frame_copy, (bottom_left[0], bottom_left[1]), (bottom_right[0], bottom_right[1]),
(255, 255, 0), thickness=2)
cv2.line(frame_copy, (top_left[0], top_left[1]), (top_right[0], top_right[1]), (255, 255, 0),
thickness=2)
cv2.line(frame_copy, (int(bottomcenter_x), int(bottomcenter_y)),
(int(topcenter_x), int(topcenter_y)), (255, 255, 255), thickness=2) # T_B_line
cv2.putText(frame_copy, "bottom_angle:" + str(bottom_angle), (30, 450), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(frame_copy, "top_angle:" + str(top_angle), (30, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.65,
(0, 0, 0), 2)
cv2.putText(frame_copy, "T_B_angle:" + str(T_B_angle), (30, 400), cv2.FONT_HERSHEY_SIMPLEX, 0.65,
(0, 0, 255), 2)
cv2.putText(frame_copy, "bottomcenter_x:" + str(bottomcenter_x), (30, 480),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(frame_copy, "y:" + str(int(bottomcenter_y)), (300, 480), cv2.FONT_HERSHEY_SIMPLEX, 0.65,
(0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(frame_copy, "topcenter_x:" + str(topcenter_x), (30, 180), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(frame_copy, "topcenter_y:" + str(int(topcenter_y)), (230, 180),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(frame_copy, 'C_percent:' + str(C_percent) + '%', (30, 100), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (0, 0, 0), 2)
cv2.putText(frame_copy, "step:" + str(step), (30, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0),
2) # (0, 0, 255)BGR
cv2.circle(frame_copy, (int(topcenter_x), int(topcenter_y)), 5, [255, 0, 255], 2)
cv2.circle(frame_copy, (int(bottomcenter_x), int(bottomcenter_y)), 5, [255, 0, 255], 2)
cv2.circle(frame_copy, (top_right[0], top_right[1]), 5, [0, 255, 255], 2)
cv2.circle(frame_copy, (top_left[0], top_left[1]), 5, [0, 255, 255], 2)
cv2.circle(frame_copy, (bottom_right[0], bottom_right[1]), 5, [0, 255, 255], 2)
cv2.circle(frame_copy, (bottom_left[0], bottom_left[1]), 5, [0, 255, 255], 2)
cv2.imshow('Chest_Camera', frame_copy) # 显示图像
cv2.imshow('chest_red_mask', Imask)
cv2.waitKey(100)
else:
print("chest NONE")
# 决策执行动作
angle_ok_flag = False
if step == 0: # 前进依据chest 调整大致位置,方向 看底边线调整角度
if C_percent > 1 and bottomcenter_y < 380:
print("676L 继续前行 forwardSlow0403", bottomcenter_y)
action_append("forwardSlow0403")
elif 380 <= bottomcenter_y < 430:
if bottom_angle > 3: # 需要左转
if bottom_angle > 6:
print("725L 大左转一下 turn001L ", bottom_angle)
action_append("turn001L")
else:
print("728L bottom_angle > 3 需要小左转 turn001L ", bottom_angle)
action_append("turn001L")
elif bottom_angle < -3: # 需要右转
if bottom_angle < -6:
print("732L 右da旋转 turn001R < -6 ", Head_L_R_angle)
action_append("turn001R")
else:
print("735L bottom_angle < -3 需要小右转 turn001R ", bottom_angle)
action_append("turn001R")
elif -3 <= bottom_angle <= 3: # 角度正确
print("448L 角度合适")
angle_ok_flag = True
if angle_ok_flag:
if bottomcenter_x < 200:
print("431L 向左侧移 Left02move ", bottomcenter_x)
action_append("Left02move")
elif bottomcenter_x > 260:
print("433L 向右侧移 Right02move ", bottomcenter_x)
action_append("Right02move")
else:
print("483L 变小步继续前行 Forwalk01", bottomcenter_y)
action_append("Forwalk01")
elif 430 <= bottomcenter_y <= 540:
if bottom_angle > 4: # 需要左转
if bottom_angle > 6:
print("746L 大左转一下 turn001L ", bottom_angle)
action_append("turn001L")
else:
print("749L bottom_angle > 4 需要小左转 turn001L ", bottom_angle)
action_append("turn001L")
elif bottom_angle < -4: # 需要右转
if bottom_angle < -6:
print("338L 右da旋转 turn001R < -6 ", bottom_angle)
action_append("turn001R")
else:
print("746L bottom_angle < -4 需要小右转 turn001R ", bottom_angle)
action_append("turn001R")
elif -3 <= bottom_angle <= 3: # 角度正确
print("448L 角度合适")
angle_ok_flag = True
if angle_ok_flag:
if bottomcenter_x < 200:
print("431L 向左侧移 Left1move ", bottomcenter_x)
action_append("Left1move")
elif bottomcenter_x > 260:
print("433L 向右侧移 Right1move ", bottomcenter_x)
action_append("Right1move")
else:
print("486L 到达上台阶边沿,变前挪动 Forwalk00 bottomcenter_y:", bottomcenter_y)
action_append("Forwalk00")
elif bottomcenter_y > 540:
print("然后开始第二步------")
step = 1
angle_ok_flag = False
else: # C_percent < 1 and bottomcenter_y < 380
print("error769L 前进")
elif step == 1: # 看中线调整角度
print("719L 上台阶 上台阶 UpBridge")
action_append("UpBridge")
step = 2
elif step == 2: # 已经上台阶 调整方向 快走三步 看上方顶点边线
if 0 < T_B_angle < 86: # 右转
print("730L 右转 turn001R T_B_angle:", T_B_angle)
action_append("turn001R")
# time.sleep(1) # timefftest
elif -86 < T_B_angle < 0: # 左转
print("359L 左转 turn001L T_B_angle:", T_B_angle)
action_append("turn001L")
# time.sleep(1) # timefftest
elif T_B_angle <= -86 or T_B_angle >= 86: # 角度正确
print("738L T_B_angle 角度合适 ")
if topcenter_x < 200:
print("740L <210 向左侧移 Left1move ", topcenter_x)
action_append("Left1move")
elif topcenter_x > 240:
print("743L >260 向右侧移 Right1move ", topcenter_x)
action_append("Right1move")
elif topcenter_y < 360:
print("516L 上台阶后,快走 Forwalk05 topcenter_y:", topcenter_y)
action_append("Forwalk05")
print("step 3333 ,", topcenter_y)
step = 3
elif step == 3: # 快走结束
if topcenter_y < 510 and C_percent > 6:
if top_angle > 1.5: # 需要左转
if top_angle <= 3:
print("468L 3 < < 1.5 需要小左转 turn000L ", top_angle)
action_append("turn000L")
else:
print("468L > 3 需要小左转 turn001L ", top_angle)
action_append("turn001L")
elif top_angle < -1.5: # 需要右转
if top_angle > -3:
print("470L -3 < < -1.5 需要小右转 turn000R ", top_angle)
action_append("turn000R")
else:
print("470L < -3 需要小右转 turn001R ", top_angle)
action_append("turn001R")
elif -1.5 <= top_angle <= 1.5: # 角度正确
print("474L top_angle 角度合适 ")
if topcenter_x < 190:
print("456L <210 向左侧移 Left1move ", topcenter_x)
action_append("Left1move")
elif topcenter_x > 260:
print("458L >260 向右侧移 Right1move ", topcenter_x)
action_append("Right1move")
else:
print("590L <topcenter_y<510 Forwalk01 ", topcenter_y)
action_append("Forwalk01")
else: # > 510
print("step 4444 ,", topcenter_y)
step = 4
elif step == 4: # 调整角度
if topcenter_y > 550 and C_percent < 6:
if C_percent >= 2:
print("823L 下台阶前前进一点点 Forwalk00")
action_append("Forwalk00")
print("487L 下台阶 下台阶 DownBridge")
action_append("DownBridge")
step = 5
else:
print("566L 微微前挪 y:", topcenter_y, " C_percent:", C_percent)
action_append("Forwalk00")
elif step == 5:
print("899L 完成bridge")
cv2.destroyAllWindows()
break
#################################################第三关:雷阵#############################################
head_flag = "MM"
Head_L_R_angle = 0
see_flag = False
Bbox_centerY = 0
head_step = 1
# 通过两边的黑线,仅仅调整角度
def obstacle():
global state, HeadOrg_img, step, reset, skip
global Head_L_R_angle, Bbox_centerY, blue_rail
state = 3
blue_rail = False
if state == 3:
print("/-/-/-/-/-/-/-/-/-进入obscle")
count = 0
step = 1
k = 1
isgo = False
straight = False # 直行信号
left = False # 左移信号
left2 = False # 遇到右边缘且前方有障碍
right = False # 右移
right2 = False # 遇到左边缘且前方有障碍
else:
return
# 初始化 delta
delta = datetime.datetime.now()
delta = delta - delta
while state == 3:
if True:
Corg_img = ChestOrg_img.copy()
Corg_img = np.rot90(Corg_img)
Corg_img = Corg_img.copy()
# cv2.rectangle(Corg_img,(0,630),(480,640),(255,255,255),-1)
hsv = cv2.cvtColor(Corg_img, cv2.COLOR_BGR2HSV)
hsv = cv2.GaussianBlur(hsv, (3, 3), 0)
# blue 分析图像 决策执行
Bumask = cv2.inRange(hsv, color_dist['blue']['Lower'], color_dist['blue']['Upper'])
Bumask = cv2.erode(Bumask, None, iterations=2)
Bumask = cv2.dilate(Bumask, np.ones((3, 3), np.uint8), iterations=2)
# cv2.imshow('Bluemask', Bumask)
_, cntsblue, hierarchy = cv2.findContours(Bumask, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) # 找出轮廓
if cntsblue is not None:
cnt_large = getAreaMaxContour2(cntsblue) # 取最大轮廓
else:
print("1135L cnt_large is None")
continue
if cnt_large is not None:
rect_blue = cv2.minAreaRect(cnt_large)
box_blue = np.int0(cv2.boxPoints(rect_blue)) # 点的坐标
Bbox_centerX = int((box_blue[3, 0] + box_blue[2, 0] + box_blue[1, 0] + box_blue[0, 0]) / 4)
Bbox_centerY = int((box_blue[3, 1] + box_blue[2, 1] + box_blue[1, 1] + box_blue[0, 1]) / 4)
Bbox_center = [Bbox_centerX, Bbox_centerY]
cv2.circle(Corg_img, (Bbox_center[0], Bbox_center[1]), 7, (0, 0, 255), -1) # 圆点标记
cv2.drawContours(Corg_img, [box_blue], -1, (255, 0, 0), 3)
obscle_area_blue = 0
# 当遇到蓝色门槛时停止
for c in cntsblue:
obscle_area_blue += math.fabs(cv2.contourArea(c))
if Bbox_centerY >= 280 and obscle_area_blue > 0.05 * 640 * 480: # and go_up: # 320 obscle_area_blue > 0.05 * 640 * 480 and
state = 4
if img_debug:
cv2.imshow('Corg_img', Corg_img)
cv2.waitKey(10)
print("遇到蓝色门槛-----*-----*-----*-----* Bbox_center Y:", Bbox_centerY)
action_append("Stand")
blue_rail = True
cv2.destroyAllWindows()
break
# black 分析图像 决策执行
Imask = cv2.inRange(hsv, color_dist['black_dir']['Lower'], color_dist['black_dir']['Upper'])
Imask = cv2.erode(Imask, None, iterations=3)
Imask = cv2.dilate(Imask, np.ones((3, 3), np.uint8), iterations=2)
# cv2.imshow('black', Imask)
_, contours, hierarchy = cv2.findContours(Imask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) # 找出所有轮廓
# print("contours lens:",len(contours))
cv2.drawContours(Corg_img, contours, -1, (255, 0, 255), 2)
left_point = [640, 0]
right_point = [0, 0]
if len(contours) != 0:
Big_battle = [0, 0]
for c in contours:
rect = cv2.minAreaRect(c) # 最小外接矩形
box = cv2.boxPoints(rect) # 我们需要矩形的4个顶点坐标box, 通过函数 cv2.cv.BoxPoints() 获得
box = np.intp(box) # 最小外接矩形的四个顶点
box_Ax, box_Ay = box[0, 0], box[0, 1]
box_Bx, box_By = box[1, 0], box[1, 1]
box_Cx, box_Cy = box[2, 0], box[2, 1]
box_Dx, box_Dy = box[3, 0], box[3, 1]
box_centerX = int((box_Ax + box_Bx + box_Cx + box_Dx) / 4)
box_centerY = int((box_Ay + box_By + box_Cy + box_Dy) / 4)
box_center = [box_centerX, box_centerY]
# cv2.circle(Corg_img, (box_centerX,box_centerY), 7, (0, 255, 0), -1) #距离比较点 绿圆点标记
# cv2.drawContours(Corg_img, [box], -1, (255,0,0), 3)
# 剔除图像上部分点 和底部点
if box_centerY < 250 or box_centerY > 610:
continue
# 遍历点 画圈
if box_debug:
cv2.circle(Corg_img, (box_centerX, box_centerY), 8, (0, 0, 255), 2) # 圆点标记识别黑点
cv2.imshow('Corg_img', Corg_img)
cv2.waitKey(1)
# 找出最左点与最右点
if box_centerX < left_point[0]:
left_point = box_center
if box_centerX > right_point[0]:
right_point = box_center
if box_centerX <= 80 or box_centerX >= 400: # 排除左右边沿点 box_centerXbox_centerX 240
continue
if math.pow(box_centerX - 240, 2) + math.pow(box_centerY - 640, 2) < math.pow(Big_battle[0] - 240,
2) + math.pow(
Big_battle[1] - 640, 2):
Big_battle = box_center # 这个是要规避的黑点
# print("1272L go_up False ",Big_battle[0],Big_battle[1])
# 显示图
if img_debug:
cv2.circle(Corg_img, (left_point[0], left_point[1]), 7, (0, 255, 0), -1) # 圆点标记
cv2.circle(Corg_img, (right_point[0], right_point[1]), 7, (0, 255, 255), -1) # 圆点标记
cv2.circle(Corg_img, (Big_battle[0], Big_battle[1]), 7, (255, 255, 0), -1) # 圆点标记
cv2.putText(Corg_img, "Head_L_R_angle:" + str(int(Head_L_R_angle)), (230, 400),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Corg_img, "see_flag:" + str(int(see_flag)), (230, 440), cv2.FONT_HERSHEY_SIMPLEX, 0.65,
(0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Corg_img, "Bbox_centerY:" + str(int(Bbox_centerY)), (230, 460),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Corg_img, "Big_battle x,y:" + str(int(Big_battle[0])) + ', ' + str(int(Big_battle[1])),
(230, 480), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.line(Corg_img, (Big_battle[0], Big_battle[1]), (240, 640), (0, 255, 255), thickness=2)
# 500线
cv2.line(Corg_img, (0, 500), (480, 500), (255, 255, 255), thickness=2)
# cv2.imshow('handling', handling)
cv2.imshow('Corg_img', Corg_img)
k = cv2.waitKey(100)
if k == 27:
cv2.destroyWindow('closed_pic')
cv2.destroyWindow('org_img_copy')
break
elif k == ord('s'):
print("save picture123")
cv2.imwrite("picture123.jpg", HeadOrg_img) # 保存图片
# black 决策执行动作
if Big_battle[1] <= 370:
print("608L 前进靠近 forwardSlow0403 ", Big_battle[1])
action_append("forwardSlow0403")
# action_append("forwardSlow0403")
# elif Big_battle[1] <= 430:
# print("565L 前进靠近 forwardSlow0403 ",Big_battle[1])
# action_append("forwardSlow0403")
# elif Big_battle[1] < 500 and (Big_battle[0] <= (100+(640-Big_battle[1])*0.15) or Big_battle[0] >= (380-(640-Big_battle[1])*0.15)) :
# print("568L 前进靠近 forwardSlow0403 ",Big_battle[1])
# action_append("forwardSlow0403")
elif Big_battle[1] < 460:
print("571L 慢慢前进靠近 Forwalk01 ", Big_battle[1])
action_append("Forwalk01")
# 80---140---*240*---340---400 要同步修改 box_centerX
# elif Big_battle == right_point and (Big_battle != left_point):
# print("277L 右平移两步 Right3move")
# action_append("Right3move")
# action_append("Right3move")
# elif Big_battle == left_point and (Big_battle != right_point):
# print("279L 向左平移两步 Left3move")
# action_append("Left3move")
# action_append("Left3move")
elif (80 <= Big_battle[0] and Big_battle[0] < 140):
print("275L 右平移一点点 Right02move")
action_append("Right02move")
action_append("Right02move")
elif (140 <= Big_battle[0] and Big_battle[0] < 240):
print("277L 右平移一步 Right3move")
action_append("Right3move")
elif (240 <= Big_battle[0] and Big_battle[0] < 340):
print("279L 向左平移一步 Left3move")
action_append("Left3move")
elif (340 <= Big_battle[0] < 400):
print("281L 向左平移一点点 Left02move")
action_append("Left02move")
action_append("Left02move")
else:
print("1321L error 不在范围")
else:
print("287L 无障碍,可前进")
action_append("forwardSlow0403")
Big_battle = [0, 0]
if img_debug:
cv2.circle(Corg_img, (left_point[0], left_point[1]), 7, (0, 255, 0), -1) # 圆点标记
cv2.circle(Corg_img, (right_point[0], right_point[1]), 7, (0, 255, 255), -1) # 圆点标记
cv2.circle(Corg_img, (Big_battle[0], Big_battle[1]), 7, (255, 255, 0), -1) # 圆点标记
cv2.putText(Corg_img, "Head_L_R_angle:" + str(int(Head_L_R_angle)), (230, 400),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Corg_img, "see_flag:" + str(int(see_flag)), (230, 440), cv2.FONT_HERSHEY_SIMPLEX, 0.65,
(0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Corg_img, "Bbox_centerY:" + str(int(Bbox_centerY)), (230, 460),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.line(Corg_img, (Big_battle[0], Big_battle[1]), (240, 640), (0, 255, 255), thickness=2)
# 500线
cv2.line(Corg_img, (0, 500), (480, 500), (255, 255, 255), thickness=2)
cv2.imshow('handling', handling)
cv2.imshow('Corg_img', Corg_img)
k = cv2.waitKey(100)
if k == 27:
cv2.destroyWindow('closed_pic')
cv2.destroyWindow('org_img_copy')
break
elif k == ord('s'):
print("save picture123")
cv2.imwrite("picture123.jpg", HeadOrg_img) # 保存图片
#################################################第四关:挡板###########################################
def baffle():
global state, org_img, step, reset, skip
global handling
state = 4
if state == 4:
print("/-/-/-/-/-/-/-/-/-进入baffle")
step = 0
baffle_dis_Y_flag = False
baffle_angle = 0
notok = True
see = False
finish = False
angle = 45
dis = 0
dis_flag = False
angle_flag = False
else:
return
while state == 4:
if True:
Corg_img = ChestOrg_img.copy()
Corg_img = np.rot90(Corg_img)
OrgFrame = Corg_img.copy()
handling = Corg_img.copy()
frame = Corg_img.copy()
center = []
# 开始处理图像
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hsv = cv2.GaussianBlur(hsv, (3, 3), 0)
Imask = cv2.inRange(hsv, color_dist['blue']['Lower'], color_dist['blue']['Upper'])
Imask = cv2.erode(Imask, None, iterations=2)
Imask = cv2.dilate(Imask, np.ones((3, 3), np.uint8), iterations=2)
# cv2.imshow('BLcolor', Imask)
_, cnts, hieracy = cv2.findContours(Imask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) # 找出所有轮廓
# print("cnts len:",len(cnts))
if cnts is not None:
cnt_large = getAreaMaxContour2(cnts, area=1000)
else:
print("1135L cnt_large is None")
continue
blue_bottom_Y = 0
if cnt_large is not None:
rect = cv2.minAreaRect(cnt_large) # 最小外接矩形
box = np.int0(cv2.boxPoints(rect)) # 最小外接矩形的四个顶点
Ax = box[0, 0]
Ay = box[0, 1]
Bx = box[1, 0]
By = box[1, 1]
Cx = box[2, 0]
Cy = box[2, 1]
Dx = box[3, 0]
Dy = box[3, 1]
pt1_x, pt1_y = box[0, 0], box[0, 1]
pt3_x, pt3_y = box[2, 0], box[2, 1]
center_x = int((pt1_x + pt3_x) / 2)
center_y = int((pt1_y + pt3_y) / 2)
center.append([center_x, center_y])
cv2.drawContours(OrgFrame, [box], -1, [0, 0, 255, 255], 3)
cv2.circle(OrgFrame, (center_x, center_y), 10, (0, 0, 255), -1) # 画出中心点
# 求得大矩形的旋转角度,if条件是为了判断长的一条边的旋转角度,因为box存储的点的顺序不确定\
if math.sqrt(math.pow(box[3, 1] - box[0, 1], 2) + math.pow(box[3, 0] - box[0, 0], 2)) > math.sqrt(
math.pow(box[3, 1] - box[2, 1], 2) + math.pow(box[3, 0] - box[2, 0], 2)):
baffle_angle = - math.atan((box[3, 1] - box[0, 1]) / (box[3, 0] - box[0, 0])) * 180.0 / math.pi
else:
baffle_angle = - math.atan(
(box[3, 1] - box[2, 1]) / (box[3, 0] - box[2, 0])) * 180.0 / math.pi # 负号是因为坐标原点的问题
if center_y > blue_bottom_Y:
blue_bottom_Y = center_y
baffle_dis_Y = blue_bottom_Y
if baffle_dis_Y > 240:
baffle_dis_Y_flag = True
if img_debug:
cv2.putText(OrgFrame, "baffle_dis_Y:" + str(baffle_dis_Y),
(10, OrgFrame.shape[0] - 35), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(OrgFrame, "baffle_dis_Y_flag:" + str(baffle_dis_Y_flag),
(10, OrgFrame.shape[0] - 55), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(OrgFrame, "baffle_angle:" + str(baffle_angle),
(10, OrgFrame.shape[0] - 75), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(OrgFrame, "step:" + str(step), (30, OrgFrame.shape[0] - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.65,
(0, 0, 0), 2) # (0, 0, 255)BGR
cv2.imshow('OrgFrame', OrgFrame)
k = cv2.waitKey(10)
if k == 27:
cv2.destroyWindow('closed_pic')
cv2.destroyWindow('org_img_copy')
break
elif k == ord('s'):
print("save picture123")
cv2.imwrite("picture123.jpg", org_img) # 保存图片
# 决策执行动作
if step == 0:
if baffle_dis_Y <= 250:
print("294L 大步前进 Forwalk02")
action_append("Forwalk02")
elif baffle_dis_Y > 250:
step = 1
elif step == 1: # 调整角度 -5 ~ 5
if baffle_angle > 5:
if baffle_angle > 8:
print("1471L 大左转一下 turn001L baffle_angle:", baffle_angle)
action_append("turn001L")
else:
print("1474L 左转 turn000L baffle_angle:", baffle_angle)
action_append("turn000L")
elif baffle_angle < -5:
if baffle_angle < -8:
print("1478L 大右转一下 turn001R baffle_angle:", baffle_angle)
action_append("turn001R")
else:
print("1481L 右转 turn000R baffle_angle:", baffle_angle)
action_append("turn000R")
else:
step = 2
elif step == 2: # 调整前进位置 调整左右位置
if baffle_dis_Y < 390:
print("318L 大一步前进 forwardSlow0403")
action_append("forwardSlow0403")
elif 390 < baffle_dis_Y < 460:
print("320L 向前挪动 Forwalk00")
action_append("Forwalk00")
elif 460 < baffle_dis_Y:
step = 3
elif step == 3: # 调整角度
if baffle_angle > 2:
if baffle_angle > 5:
print("316L 大左转一下 turn001L ", baffle_angle)
action_append("turn001L")
else:
print("318L 左转 turn001L")
action_append("turn001L")
elif baffle_angle < -2:
if baffle_angle < -5:
print("321L 大右转一下 turn001R ", baffle_angle)
action_append("turn001R")
else:
print("323L 右转 turn001R ", baffle_angle)
action_append("turn001R")
elif baffle_dis_Y_flag:
step = 4
elif step == 4: # 跨栏后调整方向
print("342L 前挪一点点")
print("326L 翻栏杆 翻栏杆 RollRail")
action_append("Stand")
action_append("RollRail")
print("step step step 444 ")
action_append("turn004L")
action_append("turn004L")
action_append("turn004L")
# action_append("turn004L")
# action_append("turn004L")
cv2.destroyAllWindows()
step = 5
break
################################################第六关:绿独木桥##########################################
def Greenbridge():
global state_sel, org_img, step, reset, skip, debug, chest_ret
r_w = chest_r_width
r_h = chest_r_height
step = 0
state = 6
green = 1
print("/-/-/-/-/-/-/-/-/-进入Greenbridge")
if green == 1:
while (state == 6): # 初始化
# 开始处理图像
chest_copy = np.rot90(ChestOrg_img)
chest_copy = chest_copy.copy()
# chest
cv2.rectangle(chest_copy, (0, 0), (480, 150), (255, 255, 255), -1)
border = cv2.copyMakeBorder(chest_copy, 12, 12, 16, 16, borderType=cv2.BORDER_CONSTANT,
value=(255, 255, 255)) # 扩展白边,防止边界无法识别
Chest_img_copy = cv2.resize(border, (r_w, r_h), interpolation=cv2.INTER_CUBIC) # 将图片缩放
Chest_frame_gauss = cv2.GaussianBlur(Chest_img_copy, (3, 3), 0) # 高斯模糊
Chest_frame_hsv = cv2.cvtColor(Chest_frame_gauss, cv2.COLOR_BGR2HSV) # 将图片转换到HSV空间
Chest_frame_green = cv2.inRange(Chest_frame_hsv, color_range['green_bridge'][0],
color_range['green_bridge'][1]) # 对原图像和掩模(颜色的字典)进行位运算
Chest_opened = cv2.morphologyEx(Chest_frame_green, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8)) # 开运算 去噪点
Chest_closed = cv2.morphologyEx(Chest_opened, cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8)) # 闭运算 封闭连接
(_, Chest_contours, hierarchy) = cv2.findContours(Chest_closed, cv2.RETR_LIST,
cv2.CHAIN_APPROX_NONE) # 找出轮廓cv2.CHAIN_APPROX_NONE
# print("Chest_contours len:",len(Chest_contours))
Chest_areaMaxContour, Chest_area_max = getAreaMaxContour1(Chest_contours) # 找出最大轮廓
Chest_percent = round(Chest_area_max * 100 / (r_w * r_h), 2)
if Chest_areaMaxContour is not None:
Chest_rect = cv2.minAreaRect(Chest_areaMaxContour)
# center, w_h, Head_angle = rect # 中心点 宽高 旋转角度
Chest_box = np.int0(cv2.boxPoints(Chest_rect)) # 点的坐标
# 初始化四个顶点坐标
Chest_top_left = Chest_areaMaxContour[0][0]
Chest_top_right = Chest_areaMaxContour[0][0]
Chest_bottom_left = Chest_areaMaxContour[0][0]
Chest_bottom_right = Chest_areaMaxContour[0][0]
for c in Chest_areaMaxContour: # 遍历找到四个顶点
if c[0][0] + 1.5 * c[0][1] < Chest_top_left[0] + 1.5 * Chest_top_left[1]:
Chest_top_left = c[0]
if (r_w - c[0][0]) + 1.5 * c[0][1] < (r_w - Chest_top_right[0]) + 1.5 * Chest_top_right[1]:
Chest_top_right = c[0]
if c[0][0] + 1.5 * (r_h - c[0][1]) < Chest_bottom_left[0] + 1.5 * (r_h - Chest_bottom_left[1]):
Chest_bottom_left = c[0]
if c[0][0] + 1.5 * c[0][1] > Chest_bottom_right[0] + 1.5 * Chest_bottom_right[1]:
Chest_bottom_right = c[0]
angle_top = - math.atan(
(Chest_top_right[1] - Chest_top_left[1]) / (
Chest_top_right[0] - Chest_top_left[0])) * 180.0 / math.pi
angle_bottom = - math.atan((Chest_bottom_right[1] - Chest_bottom_left[1]) / (
Chest_bottom_right[0] - Chest_bottom_left[0])) * 180.0 / math.pi
Chest_top_center_x = int((Chest_top_right[0] + Chest_top_left[0]) / 2)
Chest_top_center_y = int((Chest_top_right[1] + Chest_top_left[1]) / 2)
Chest_bottom_center_x = int((Chest_bottom_right[0] + Chest_bottom_left[0]) / 2)
Chest_bottom_center_y = int((Chest_bottom_right[1] + Chest_bottom_left[1]) / 2)
Chest_center_x = int((Chest_top_center_x + Chest_bottom_center_x) / 2)
Chest_center_y = int((Chest_top_center_y + Chest_bottom_center_y) / 2)
if img_debug:
cv2.drawContours(Chest_img_copy, [Chest_box], 0, (0, 0, 255), 2) # 将大矩形画在图上
cv2.circle(Chest_img_copy, (Chest_top_right[0], Chest_top_right[1]), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_top_left[0], Chest_top_left[1]), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_bottom_right[0], Chest_bottom_right[1]), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_bottom_left[0], Chest_bottom_left[1]), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_top_center_x, Chest_top_center_y), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_bottom_center_x, Chest_bottom_center_y), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_center_x, Chest_center_y), 7, [255, 255, 255], 2)
cv2.line(Chest_img_copy, (Chest_top_center_x, Chest_top_center_y),
(Chest_bottom_center_x, Chest_bottom_center_y), [0, 255, 255], 2) # 画出上下中点连线
if math.fabs(Chest_top_center_x - Chest_bottom_center_x) <= 1: # 得到连线的角度
Chest_angle = 90
else:
Chest_angle = - math.atan((Chest_top_center_y - Chest_bottom_center_y) / (
Chest_top_center_x - Chest_bottom_center_x)) * 180.0 / math.pi
else:
Chest_angle = 90
# center_x = 0.5*r_w
Chest_center_x = 0
Chest_bottom_center_x = 0
Chest_bottom_center_y = 0
Chest_top_center_x = 0
Chest_top_center_y = 0
angle_top = 90
angle_bottom = 90
if img_debug:
cv2.drawContours(Chest_img_copy, Chest_contours, -1, (255, 0, 255), 1)
cv2.putText(Chest_img_copy, 'Chest_percent:' + str(Chest_percent) + '%', (30, 140),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy, "Chest_angle:" + str(int(Chest_angle)), (30, 170), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy,
"Chest_bottom_center(x,y): " + str(int(Chest_bottom_center_x)) + " , " + str(
int(Chest_bottom_center_y)), (30, 240), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0),
2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy,
"Chest_top_center(x,y): " + str(int(Chest_top_center_x)) + " , " + str(
int(Chest_top_center_y)),
(30, 220), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy, "angle_top:" + str(int(angle_top)), (30, 260), cv2.FONT_HERSHEY_SIMPLEX,
0.65,
(0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy, "angle_bottom:" + str(int(angle_bottom)), (30, 280),
cv2.FONT_HERSHEY_SIMPLEX,
0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy, "step :" + str(int(step)), (30, 300), cv2.FONT_HERSHEY_SIMPLEX, 0.65,
(0, 0, 0),
2) # (0, 0, 255)BGR
cv2.imshow('Chest_Camera', Chest_img_copy) # 显示图像
# cv2.imshow('chest_green_mask', Chest_closed) # 显示图像
cv2.waitKey(100)
# 决策执行动作
if step == 0: # 接近 看下边沿 角度 Chest_percent > 5
if Chest_bottom_center_y < 260:
# print("296L y<360 大步前进 两步 forwardSlow0403")
# action_append("forwardSlow0403")
# action_append("forwardSlow0403")
print("1694L 快速前进 ", Chest_bottom_center_y)
action_append("fastForward04")
elif Chest_bottom_center_y > 460: # 450
step = 1
# 260< Chest_bottom_center_y <460
elif angle_bottom > 5:
if angle_bottom > 8:
print("1658L 大左转一下 > 8 turn001L ", angle_bottom)
action_append("turn001L")
else:
print("1661L 小左转 turn001L ", angle_bottom)
action_append("turn001L")
# time.sleep(1)
elif angle_bottom < -5:
if angle_bottom < -8:
print("1666L 大右转一下 < -8 turn001R ", angle_bottom)
action_append("turn001R")
else:
print("1669L 小右转 turn001R ", angle_bottom)
action_append("turn001R")
# time.sleep(1)
elif Chest_bottom_center_x > 260: # 右移 center_x
print("161 向右移 Right02move x>250")
action_append("Right02move")
elif Chest_bottom_center_x < 220: # 左移 center_x
print("160 向左移 Left02move x<220")
action_append("Left02move")
elif 220 <= Chest_bottom_center_x <= 260: # Chest_bottom_center_y < 450
# print("239 前进两步 forwardSlow0403")
# action_append("forwardSlow0403")
print("1705L 快走333 fastForward03")
action_append("Forwalk02")
elif step == 1: # 到绿桥边沿,对准绿桥阶段
if Chest_bottom_center_y > 580:
step = 2
elif angle_bottom > 3:
if angle_bottom > 6:
print("1678L 大左转一下 > 6 turn001L ", angle_bottom)
action_append("turn001L")
else:
print("1690L 小左转 turn000L ", angle_bottom)
action_append("turn000L")
# time.sleep(1)
elif angle_bottom < -3:
if angle_bottom < -6:
print("1695L 大右转一下 < -6 turn001R ", angle_bottom)
action_append("turn001R")
else:
print("1698L 小右转 turn000R ", angle_bottom)
action_append("turn000R")
# time.sleep(1)
elif Chest_bottom_center_x > 260: # 右移 center_x
print("1702L 向右移 Right02move x>250")
action_append("Right02move")
elif Chest_bottom_center_x < 220: # 左移 center_x
print("1705L 向左移 Left02move x<220")
action_append("Left02move")
elif 220 <= Chest_bottom_center_x <= 260:
print("1708L 对准 快走 fastForward03")
action_append("Forwalk02")
elif step == 2: # 已经在独木桥阶段 行走独木桥 调整角度 位置 看中线 角度
if Chest_percent > 2 and Chest_top_center_y > 360:
step = 3
elif Chest_percent > 2 and Chest_top_center_y > 100:
# 调整角度位置
if Chest_bottom_center_x >= 250: # 右移 center_x
print("1767L 向右移 Right02move >250 ,", Chest_bottom_center_x)
action_append("Right02move")
elif Chest_bottom_center_x <= 230: # 左移 center_x
print("1770L 向左移 Left02move <230 ,", Chest_bottom_center_x)
action_append("Left02move")
elif 230 < Chest_bottom_center_x < 250:
if 0 < Chest_angle < 86: # 右转
print("1775L 右转 turn001R Chest_angle:", Chest_angle)
action_append("turn001R")
# time.sleep(1) # timefftest
elif -86 < Chest_angle < 0: # 左转
print("1779L 左转 turn001L Chest_angle:", Chest_angle)
action_append("turn001L")
# time.sleep(1) # timefftest
else: # 走三步
# print("337L 前进一步 forwardSlow0403")
# action_append("forwardSlow0403")
print("1753L 上桥后,快走 fastForward03 Ccenter_y:", Chest_center_x)
action_append("Forwalk02")
time.sleep(1) # timefftest 快走停顿
else:
# print("341L 没有看到绿桥向前直行 forwardSlow0403")
# action_append("forwardSlow0403")
print("1741L 已经下桥")
step = 3
elif step == 3: # 接近 看上边沿 调整角度 Chest_percent > 5
if Chest_percent < 1 or Chest_top_center_y > 500:
# print("297L 接近桥终点 直行两步离开桥 forwardSlow0403")
# action_append("forwardSlow0403")
# action_append("forwardSlow0403")
# action_append("forwardSlow0403")
# action_append("Stand")
print("1778LL 接近桥终点 快走离开桥 Forwalk02")
action_append("Forwalk02")
step = 4
elif angle_top > 3:
if angle_top > 6:
print("298L 大左转一下 turn001L")
action_append("turn001L")
else:
print("1727L 左转 turn001L")
action_append("turn001L")
elif angle_top < -3:
if angle_top < -6:
print("303L 大右转一下 turn001R")
action_append("turn001R")
else:
print("305L 右转 turn001R")
action_append("turn001R")
elif Chest_top_center_x > 250: # 右移 center_x
print("363L 向右移 >250")
action_append("Right1move")
elif Chest_top_center_x < 220: # 左移 center_x
print("366L 向左移 <220")
action_append("Left1move")
elif 220 <= Chest_top_center_x <= 250:
# print("1802L 前进一步 forwardSlow0403")
# action_append("forwardSlow0403")
print("1804L 快走 Forwalk02")
action_append("Forwalk02")
elif step == 4: # 离开独木桥阶段 chest 出现bridge 依据chest调整角度位置
print("623L 离开桥")
print("过桥结束,step = -1 下一关 踢球")
cv2.destroyAllWindows()
step = 100
print("--continue---")
break
else: # 初始化
while (state == 6): # 开始处理图像
chest_copy = np.rot90(ChestOrg_img)
chest_copy = chest_copy.copy()
# chest
cv2.rectangle(chest_copy, (0, 0), (480, 150), (255, 255, 255), -1)
border = cv2.copyMakeBorder(chest_copy, 12, 12, 16, 16, borderType=cv2.BORDER_CONSTANT,
value=(255, 255, 255)) # 扩展白边,防止边界无法识别
Chest_img_copy = cv2.resize(border, (r_w, r_h), interpolation=cv2.INTER_CUBIC) # 将图片缩放
Chest_frame_gauss = cv2.GaussianBlur(Chest_img_copy, (3, 3), 0) # 高斯模糊
Chest_frame_hsv = cv2.cvtColor(Chest_frame_gauss, cv2.COLOR_BGR2HSV) # 将图片转换到HSV空间
Chest_frame_blue = cv2.inRange(Chest_frame_hsv, color_range['blue'][0],
color_range['blue'][1]) # 对原图像和掩模(颜色的字典)进行位运算
Chest_opened = cv2.morphologyEx(Chest_frame_blue, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8)) # 开运算 去噪点
Chest_closed = cv2.morphologyEx(Chest_opened, cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8)) # 闭运算 封闭连接
(_, Chest_contours, hierarchy) = cv2.findContours(Chest_closed, cv2.RETR_LIST,
cv2.CHAIN_APPROX_NONE) # 找出轮廓cv2.CHAIN_APPROX_NONE
# print("Chest_contours len:",len(Chest_contours))
Chest_areaMaxContour, Chest_area_max = getAreaMaxContour1(Chest_contours) # 找出最大轮廓
Chest_percent = round(Chest_area_max * 100 / (r_w * r_h), 2)
if Chest_areaMaxContour is not None:
Chest_rect = cv2.minAreaRect(Chest_areaMaxContour)
# center, w_h, Head_angle = rect # 中心点 宽高 旋转角度
Chest_box = np.int0(cv2.boxPoints(Chest_rect)) # 点的坐标
# 初始化四个顶点坐标
Chest_top_left = Chest_areaMaxContour[0][0]
Chest_top_right = Chest_areaMaxContour[0][0]
Chest_bottom_left = Chest_areaMaxContour[0][0]
Chest_bottom_right = Chest_areaMaxContour[0][0]
for c in Chest_areaMaxContour: # 遍历找到四个顶点
if c[0][0] + 1.5 * c[0][1] < Chest_top_left[0] + 1.5 * Chest_top_left[1]:
Chest_top_left = c[0]
if (r_w - c[0][0]) + 1.5 * c[0][1] < (r_w - Chest_top_right[0]) + 1.5 * Chest_top_right[1]:
Chest_top_right = c[0]
if c[0][0] + 1.5 * (r_h - c[0][1]) < Chest_bottom_left[0] + 1.5 * (r_h - Chest_bottom_left[1]):
Chest_bottom_left = c[0]
if c[0][0] + 1.5 * c[0][1] > Chest_bottom_right[0] + 1.5 * Chest_bottom_right[1]:
Chest_bottom_right = c[0]
angle_top = - math.atan(
(Chest_top_right[1] - Chest_top_left[1]) / (
Chest_top_right[0] - Chest_top_left[0])) * 180.0 / math.pi
angle_bottom = - math.atan((Chest_bottom_right[1] - Chest_bottom_left[1]) / (
Chest_bottom_right[0] - Chest_bottom_left[0])) * 180.0 / math.pi
Chest_top_center_x = int((Chest_top_right[0] + Chest_top_left[0]) / 2)
Chest_top_center_y = int((Chest_top_right[1] + Chest_top_left[1]) / 2)
Chest_bottom_center_x = int((Chest_bottom_right[0] + Chest_bottom_left[0]) / 2)
Chest_bottom_center_y = int((Chest_bottom_right[1] + Chest_bottom_left[1]) / 2)
Chest_center_x = int((Chest_top_center_x + Chest_bottom_center_x) / 2)
Chest_center_y = int((Chest_top_center_y + Chest_bottom_center_y) / 2)
if img_debug:
cv2.drawContours(Chest_img_copy, [Chest_box], 0, (0, 0, 255), 2) # 将大矩形画在图上
cv2.circle(Chest_img_copy, (Chest_top_right[0], Chest_top_right[1]), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_top_left[0], Chest_top_left[1]), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_bottom_right[0], Chest_bottom_right[1]), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_bottom_left[0], Chest_bottom_left[1]), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_top_center_x, Chest_top_center_y), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_bottom_center_x, Chest_bottom_center_y), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_center_x, Chest_center_y), 7, [255, 255, 255], 2)
cv2.line(Chest_img_copy, (Chest_top_center_x, Chest_top_center_y),
(Chest_bottom_center_x, Chest_bottom_center_y), [0, 255, 255], 2) # 画出上下中点连线
if math.fabs(Chest_top_center_x - Chest_bottom_center_x) <= 1: # 得到连线的角度
Chest_angle = 90
else:
Chest_angle = - math.atan((Chest_top_center_y - Chest_bottom_center_y) / (
Chest_top_center_x - Chest_bottom_center_x)) * 180.0 / math.pi
else:
Chest_angle = 90
# center_x = 0.5*r_w
Chest_center_x = 0
Chest_bottom_center_x = 0
Chest_bottom_center_y = 0
Chest_top_center_x = 0
Chest_top_center_y = 0
angle_top = 90
angle_bottom = 90
if img_debug:
cv2.drawContours(Chest_img_copy, Chest_contours, -1, (255, 0, 255), 1)
cv2.putText(Chest_img_copy, 'Chest_percent:' + str(Chest_percent) + '%', (30, 140),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy, "Chest_angle:" + str(int(Chest_angle)), (30, 170), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy,
"Chest_bottom_center(x,y): " + str(int(Chest_bottom_center_x)) + " , " + str(
int(Chest_bottom_center_y)), (30, 240), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0),
2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy,
"Chest_top_center(x,y): " + str(int(Chest_top_center_x)) + " , " + str(
int(Chest_top_center_y)),
(30, 220), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy, "angle_top:" + str(int(angle_top)), (30, 260), cv2.FONT_HERSHEY_SIMPLEX,
0.65,
(0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy, "angle_bottom:" + str(int(angle_bottom)), (30, 280),
cv2.FONT_HERSHEY_SIMPLEX,
0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy, "step :" + str(int(step)), (30, 300), cv2.FONT_HERSHEY_SIMPLEX, 0.65,
(0, 0, 0),
2) # (0, 0, 255)BGR
cv2.imshow('Chest_Camera', Chest_img_copy) # 显示图像
# cv2.imshow('chest_green_mask', Chest_closed) # 显示图像
cv2.waitKey(100)
# 决策执行动作
if step == 0: # 接近 看下边沿 角度 Chest_percent > 5
if Chest_bottom_center_y < 260:
# print("296L y<360 大步前进 两步 forwardSlow0403")
# action_append("forwardSlow0403")
# action_append("forwardSlow0403")
print("1694L 快速前进 ", Chest_bottom_center_y)
action_append("fastForward04")
elif Chest_bottom_center_y > 460: # 450
step = 1
# 260< Chest_bottom_center_y <460
elif angle_bottom > 5:
if angle_bottom > 8:
print("1658L 大左转一下 > 8 turn001L ", angle_bottom)
action_append("turn001L")
else:
print("1661L 小左转 turn001L ", angle_bottom)
action_append("turn001L")
# time.sleep(1)
elif angle_bottom < -5:
if angle_bottom < -8:
print("1666L 大右转一下 < -8 turn001R ", angle_bottom)
action_append("turn001R")
else:
print("1669L 小右转 turn001R ", angle_bottom)
action_append("turn001R")
# time.sleep(1)
elif Chest_bottom_center_x > 260: # 右移 center_x
print("161 向右移 Right02move x>250")
action_append("Right02move")
elif Chest_bottom_center_x < 220: # 左移 center_x
print("160 向左移 Left02move x<220")
action_append("Left02move")
elif 220 <= Chest_bottom_center_x <= 260: # Chest_bottom_center_y < 450
# print("239 前进两步 forwardSlow0403")
# action_append("forwardSlow0403")
print("1705L 快走333 Forwalk02")
action_append("Forwalk02")
elif step == 1: # 到绿桥边沿,对准绿桥阶段
if Chest_bottom_center_y > 580:
step = 2
elif angle_bottom > 3:
if angle_bottom > 6:
print("1678L 大左转一下 > 6 turn001L ", angle_bottom)
action_append("turn001L")
else:
print("1690L 小左转 turn000L ", angle_bottom)
action_append("turn000L")
# time.sleep(1)
elif angle_bottom < -3:
if angle_bottom < -6:
print("1695L 大右转一下 < -6 turn001R ", angle_bottom)
action_append("turn001R")
else:
print("1698L 小右转 turn000R ", angle_bottom)
action_append("turn000R")
# time.sleep(1)
elif Chest_bottom_center_x > 260: # 右移 center_x
print("1702L 向右移 Right02move x>250")
action_append("Right02move")
elif Chest_bottom_center_x < 220: # 左移 center_x
print("1705L 向左移 Left02move x<220")
action_append("Left02move")
elif 220 <= Chest_bottom_center_x <= 260:
print("1708L 对准 快走 Forwalk02")
action_append("Forwalk02")
elif step == 2: # 已经在独木桥阶段 行走独木桥 调整角度 位置 看中线 角度
if Chest_percent > 2 and Chest_top_center_y > 360:
step = 3
elif Chest_percent > 2 and Chest_top_center_y > 100:
# 调整角度位置
if Chest_bottom_center_x >= 250: # 右移 center_x
print("1767L 向右移 Right02move >250 ,", Chest_bottom_center_x)
action_append("Right02move")
elif Chest_bottom_center_x <= 230: # 左移 center_x
print("1770L 向左移 Left02move <230 ,", Chest_bottom_center_x)
action_append("Left02move")
elif 230 < Chest_bottom_center_x < 250:
if 0 < Chest_angle < 86: # 右转
print("1775L 右转 turn001R Chest_angle:", Chest_angle)
action_append("turn001R")
# time.sleep(1) # timefftest
elif -86 < Chest_angle < 0: # 左转
print("1779L 左转 turn001L Chest_angle:", Chest_angle)
action_append("turn001L")
# time.sleep(1) # timefftest
else: # 走三步
# print("337L 前进一步 forwardSlow0403")
# action_append("forwardSlow0403")
print("1753L 上桥后,快走 Forwalk02 Ccenter_y:", Chest_center_x)
action_append("Forwalk02")
time.sleep(1) # timefftest 快走停顿
else:
# print("341L 没有看到绿桥向前直行 forwardSlow0403")
# action_append("forwardSlow0403")
print("1741L 已经下桥")
step = 3
elif step == 3: # 接近 看上边沿 调整角度 Chest_percent > 5
if Chest_percent < 1 or Chest_top_center_y > 500:
# print("297L 接近桥终点 直行两步离开桥 forwardSlow0403")
# action_append("forwardSlow0403")
# action_append("forwardSlow0403")
# action_append("forwardSlow0403")
# action_append("Stand")
print("1778LL 接近桥终点 快走离开桥 Forwalk02")
action_append("Forwalk02")
step = 4
elif angle_top > 3:
if angle_top > 6:
print("298L 大左转一下 turn001L")
action_append("turn001L")
else:
print("1727L 左转 turn001L")
action_append("turn001L")
elif angle_top < -3:
if angle_top < -6:
print("303L 大右转一下 turn001R")
action_append("turn001R")
else:
print("305L 右转 turn001R")
action_append("turn001R")
elif Chest_top_center_x > 250: # 右移 center_x
print("363L 向右移 >250")
action_append("Right1move")
elif Chest_top_center_x < 220: # 左移 center_x
print("366L 向左移 <220")
action_append("Left1move")
elif 220 <= Chest_top_center_x <= 250:
# print("1802L 前进一步 forwardSlow0403")
# action_append("forwardSlow0403")
print("1804L 快走 Forwalk02")
action_append("Forwalk02")
elif step == 4: # 离开独木桥阶段 chest 出现bridge 依据chest调整角度位置
print("623L 离开桥")
print("过桥结束,step = -1 下一关 踢球")
cv2.destroyAllWindows()
step = 100
print("--continue---")
break
################################################第六关:过门##########################################
def bluedoor():
global HeadOrg_img, chest_copy, reset, skip
global handling
angle_ok_flag = False
angle = 90
dis = 0
bottom_centreX = 0
bottom_centreY = 0
see = False
dis_ok_count = 0
headTURN = 0
step = 1
print("/-/-/-/-/-/-/-/-/-bluedoor")
while True:
OrgFrame = HeadOrg_img.copy()
x_start = 260
blobs = OrgFrame[int(0):int(480), int(x_start):int(380)] # 只对中间部分识别处理 Y , X
# cv2.rectangle(blobs,(0,460),(120,480),(255,255,255),-1) # 涂白
handling = blobs.copy()
frame_mask = blobs.copy()
# 获取图像中心点坐标x, y
center = []
# 开始处理图像
hsv = cv2.cvtColor(frame_mask, cv2.COLOR_BGR2HSV)
hsv = cv2.GaussianBlur(hsv, (3, 3), 0)
Imask = cv2.inRange(hsv, color_range['blue'][0], color_range['blue'][1])
# Imask = cv2.erode(Imask, None, iterations=1)
Imask = cv2.dilate(Imask, np.ones((3, 3), np.uint8), iterations=2)
_, cnts, hierarchy = cv2.findContours(Imask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) # 找出所有轮廓
# print("327L len:",len(cnts))
cnt_sum = getLine_SumContour(cnts, area=300)
# 初始化
L_R_angle = 0
blackLine_L = [0, 0]
blackLine_R = [0, 0]
if cnt_sum is not None:
see = True
rect = cv2.minAreaRect(cnt_sum) # 最小外接矩形
box = np.int0(cv2.boxPoints(rect)) # 最小外接矩形的四个顶点
# cv2.drawContours(OrgFrame, [box], 0, (0, 255, 0), 2) # 将大矩形画在图上
if math.sqrt(math.pow(box[3, 1] - box[0, 1], 2) + math.pow(box[3, 0] - box[0, 0], 2)) > math.sqrt(
math.pow(box[3, 1] - box[2, 1], 2) + math.pow(box[3, 0] - box[2, 0], 2)):
if box[3, 0] - box[0, 0] == 0:
angle = 90
else:
angle = - math.atan((box[3, 1] - box[0, 1]) / (box[3, 0] - box[0, 0])) * 180.0 / math.pi
if box[3, 1] + box[0, 1] > box[2, 1] + box[1, 1]:
Ycenter = int((box[2, 1] + box[1, 1]) / 2)
Xcenter = int((box[2, 0] + box[1, 0]) / 2)
if box[2, 1] > box[1, 1]:
blackLine_L = [box[2, 0], box[2, 1]]
blackLine_R = [box[1, 0], box[1, 1]]
else:
blackLine_L = [box[1, 0], box[1, 1]]
blackLine_R = [box[2, 0], box[2, 1]]
cv2.circle(OrgFrame, (Xcenter + x_start, Ycenter), 10, (255, 255, 0), -1) # 画出中心点
else:
Ycenter = int((box[3, 1] + box[0, 1]) / 2)
Xcenter = int((box[3, 0] + box[0, 0]) / 2)
if box[3, 1] > box[0, 1]:
blackLine_L = [box[3, 0], box[3, 1]]
blackLine_R = [box[0, 0], box[0, 1]]
else:
blackLine_L = [box[0, 0], box[0, 1]]
blackLine_R = [box[3, 0], box[3, 1]]
cv2.circle(OrgFrame, (Xcenter + x_start, Ycenter), 10, (255, 255, 0), -1) # 画出中心点
else:
if box[3, 0] - box[2, 0] == 0:
angle = 90
else:
angle = - math.atan(
(box[3, 1] - box[2, 1]) / (box[3, 0] - box[2, 0])) * 180.0 / math.pi # 负号是因为坐标原点的问题
if box[3, 1] + box[2, 1] > box[0, 1] + box[1, 1]:
Ycenter = int((box[1, 1] + box[0, 1]) / 2)
Xcenter = int((box[1, 0] + box[0, 0]) / 2)
if box[0, 1] > box[1, 1]:
blackLine_L = [box[0, 0], box[0, 1]]
blackLine_R = [box[1, 0], box[1, 1]]
else:
blackLine_L = [box[1, 0], box[1, 1]]
blackLine_R = [box[0, 0], box[0, 1]]
cv2.circle(OrgFrame, (Xcenter + x_start, Ycenter), 10, (255, 255, 0), -1) # 画出中心点
else:
Ycenter = int((box[2, 1] + box[3, 1]) / 2)
Xcenter = int((box[2, 0] + box[3, 0]) / 2)
if box[3, 1] > box[2, 1]:
blackLine_L = [box[3, 0], box[3, 1]]
blackLine_R = [box[2, 0], box[2, 1]]
else:
blackLine_L = [box[2, 0], box[2, 1]]
blackLine_R = [box[3, 0], box[3, 1]]
cv2.circle(OrgFrame, (Xcenter + x_start, Ycenter), 10, (255, 255, 0), -1) # 画出中心点
if blackLine_L[0] == blackLine_R[0]:
L_R_angle = 0
else:
L_R_angle = -math.atan(
(blackLine_L[1] - blackLine_R[1]) / (blackLine_L[0] - blackLine_R[0])) * 180.0 / math.pi
if img_debug:
cv2.circle(OrgFrame, (blackLine_L[0] + x_start, blackLine_L[1]), 5, [0, 255, 255], 2)
cv2.circle(OrgFrame, (blackLine_R[0] + x_start, blackLine_R[1]), 5, [255, 0, 255], 2)
cv2.line(OrgFrame, (blackLine_R[0] + x_start, blackLine_R[1]),
(blackLine_L[0] + x_start, blackLine_L[1]), (0, 255, 255), thickness=2)
cv2.putText(OrgFrame, "L_R_angle:" + str(L_R_angle), (10, OrgFrame.shape[0] - 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(OrgFrame, "Xcenter:" + str(Xcenter + x_start), (10, OrgFrame.shape[0] - 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(OrgFrame, "Ycenter:" + str(Ycenter), (200, OrgFrame.shape[0] - 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
# cv2.drawContours(frame_mask, cnt_sum, -1, (255, 0, 255), 3)
# cv2.imshow('frame_mask', frame_mask)
cv2.imshow('black', Imask)
cv2.imshow('OrgFrame', OrgFrame)
cv2.waitKey(10)
else:
see = False
# 决策执行动作
if step == 1:
print("157L 向右看 HeadTurn015")
time.sleep(1) # timefftest
step = 2
elif step == 2:
if not see: # not see the edge
print("276L 右侧看不到黑线 左侧移 Left3move")
action_append("Left3move")
headTURN += 1
if headTURN > 3:
headTURN = 0
print("276L 右侧看不到黑线 转为左看 waitKey")
step = 3
else: # 0
headTURN = 0
if L_R_angle > 2:
if L_R_angle > 7:
print("416L 左da旋转 turn001L ", L_R_angle)
action_append("turn001L")
# elif L_R_angle > 5:
# print("419L 左da旋转 turn001L ",L_R_angle)
# action_append("turn001L")
else:
print("422L 左旋转 turn000L ", L_R_angle)
action_append("turn000L")
# time.sleep(1) # timefftest
elif L_R_angle < -2:
if L_R_angle < -7:
print("434L 右da旋转 turn001R ", L_R_angle)
action_append("turn001R")
# elif L_R_angle < -5:
# print("437L 右da旋转 turn001R ",L_R_angle)
# action_append("turn001R")
else:
print("461L 右旋转 turn000R ", L_R_angle)
action_append("turn000R")
# time.sleep(1) # timefftest
elif Ycenter >= 430:
if Ycenter > 450:
print("451L 左da侧移 Left3move >440 ", Ycenter)
action_append("Left3move")
else:
print("439L 左侧移 Left02move > 365 ", Ycenter)
action_append("Left02move")
elif Ycenter < 390:
if Ycenter < 370:
print("474L 右da侧移 Right3move <380 ", Ycenter)
action_append("Right3move")
else:
print("448L 右侧移 Right02move <400 ", Ycenter)
action_append("Right02move")
else:
dis_ok_count
print("444L 右看 X位置ok")
cv2.destroyAllWindows()
break
elif step == 3:
print("157L 向左看 HeadTurn180")
time.sleep(1) # timefftest
step = 4
elif step == 4:
if not see: # not see the edge
print("294L 左侧 看不到黑线 转为右看")
headTURN += 1
if headTURN > 5:
headTURN = 0
print("error 两侧都看不到 右侧移 Right3move")
action_append("Right3move")
else: # 0 +-1
headTURN = 0
if L_R_angle > 3:
if L_R_angle > 8:
print("304L 左da旋转 turn001L ", L_R_angle)
action_append("turn001L")
else:
print("304L 左旋转 turn000L ", L_R_angle)
action_append("turn000L")
# time.sleep(1) # timefftest
elif L_R_angle < -3:
if L_R_angle < -8:
print("307L 右da旋转 turn001R ", L_R_angle)
action_append("turn001R")
else:
print("307L 右旋转 turn000R ", L_R_angle)
action_append("turn000R")
# time.sleep(1) # timefftest
elif Ycenter >= 430:
if Ycenter > 450:
print("498L 右da侧移 Right3move ", L_R_angle)
action_append("Right3move")
else:
print("501L 右侧移 Right02move ", L_R_angle)
action_append("Right02move")
elif Ycenter < 390:
if Ycenter < 370:
print("497L 左da侧移 Left3move ", L_R_angle)
action_append("Left02move")
else:
print("500L 左侧移 Left02move ", L_R_angle)
action_append("Left02move")
else:
dis_ok_count
print("495L 左看 X位置ok")
cv2.destroyAllWindows()
break
################################################第六关:蓝独木桥##########################################
def Bluebridge():
global state_sel, org_img, step, reset, skip, debug, chest_ret
r_w = chest_r_width
r_h = chest_r_height
step = 0
state = 6
green = 0
print("/-/-/-/-/-/-/-/-/-进入Greenbridge")
if green == 1:
while (state == 6): # 初始化
# 开始处理图像
chest_copy = np.rot90(ChestOrg_img)
chest_copy = chest_copy.copy()
# chest
cv2.rectangle(chest_copy, (0, 0), (480, 150), (255, 255, 255), -1)
border = cv2.copyMakeBorder(chest_copy, 12, 12, 16, 16, borderType=cv2.BORDER_CONSTANT,
value=(255, 255, 255)) # 扩展白边,防止边界无法识别
Chest_img_copy = cv2.resize(border, (r_w, r_h), interpolation=cv2.INTER_CUBIC) # 将图片缩放
Chest_frame_gauss = cv2.GaussianBlur(Chest_img_copy, (3, 3), 0) # 高斯模糊
Chest_frame_hsv = cv2.cvtColor(Chest_frame_gauss, cv2.COLOR_BGR2HSV) # 将图片转换到HSV空间
Chest_frame_green = cv2.inRange(Chest_frame_hsv, color_range['green_bridge'][0],
color_range['green_bridge'][1]) # 对原图像和掩模(颜色的字典)进行位运算
Chest_opened = cv2.morphologyEx(Chest_frame_green, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8)) # 开运算 去噪点
Chest_closed = cv2.morphologyEx(Chest_opened, cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8)) # 闭运算 封闭连接
(_, Chest_contours, hierarchy) = cv2.findContours(Chest_closed, cv2.RETR_LIST,
cv2.CHAIN_APPROX_NONE) # 找出轮廓cv2.CHAIN_APPROX_NONE
# print("Chest_contours len:",len(Chest_contours))
Chest_areaMaxContour, Chest_area_max = getAreaMaxContour1(Chest_contours) # 找出最大轮廓
Chest_percent = round(Chest_area_max * 100 / (r_w * r_h), 2)
if Chest_areaMaxContour is not None:
Chest_rect = cv2.minAreaRect(Chest_areaMaxContour)
# center, w_h, Head_angle = rect # 中心点 宽高 旋转角度
Chest_box = np.int0(cv2.boxPoints(Chest_rect)) # 点的坐标
# 初始化四个顶点坐标
Chest_top_left = Chest_areaMaxContour[0][0]
Chest_top_right = Chest_areaMaxContour[0][0]
Chest_bottom_left = Chest_areaMaxContour[0][0]
Chest_bottom_right = Chest_areaMaxContour[0][0]
for c in Chest_areaMaxContour: # 遍历找到四个顶点
if c[0][0] + 1.5 * c[0][1] < Chest_top_left[0] + 1.5 * Chest_top_left[1]:
Chest_top_left = c[0]
if (r_w - c[0][0]) + 1.5 * c[0][1] < (r_w - Chest_top_right[0]) + 1.5 * Chest_top_right[1]:
Chest_top_right = c[0]
if c[0][0] + 1.5 * (r_h - c[0][1]) < Chest_bottom_left[0] + 1.5 * (r_h - Chest_bottom_left[1]):
Chest_bottom_left = c[0]
if c[0][0] + 1.5 * c[0][1] > Chest_bottom_right[0] + 1.5 * Chest_bottom_right[1]:
Chest_bottom_right = c[0]
angle_top = - math.atan(
(Chest_top_right[1] - Chest_top_left[1]) / (
Chest_top_right[0] - Chest_top_left[0])) * 180.0 / math.pi
angle_bottom = - math.atan((Chest_bottom_right[1] - Chest_bottom_left[1]) / (
Chest_bottom_right[0] - Chest_bottom_left[0])) * 180.0 / math.pi
Chest_top_center_x = int((Chest_top_right[0] + Chest_top_left[0]) / 2)
Chest_top_center_y = int((Chest_top_right[1] + Chest_top_left[1]) / 2)
Chest_bottom_center_x = int((Chest_bottom_right[0] + Chest_bottom_left[0]) / 2)
Chest_bottom_center_y = int((Chest_bottom_right[1] + Chest_bottom_left[1]) / 2)
Chest_center_x = int((Chest_top_center_x + Chest_bottom_center_x) / 2)
Chest_center_y = int((Chest_top_center_y + Chest_bottom_center_y) / 2)
if img_debug:
cv2.drawContours(Chest_img_copy, [Chest_box], 0, (0, 0, 255), 2) # 将大矩形画在图上
cv2.circle(Chest_img_copy, (Chest_top_right[0], Chest_top_right[1]), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_top_left[0], Chest_top_left[1]), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_bottom_right[0], Chest_bottom_right[1]), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_bottom_left[0], Chest_bottom_left[1]), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_top_center_x, Chest_top_center_y), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_bottom_center_x, Chest_bottom_center_y), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_center_x, Chest_center_y), 7, [255, 255, 255], 2)
cv2.line(Chest_img_copy, (Chest_top_center_x, Chest_top_center_y),
(Chest_bottom_center_x, Chest_bottom_center_y), [0, 255, 255], 2) # 画出上下中点连线
if math.fabs(Chest_top_center_x - Chest_bottom_center_x) <= 1: # 得到连线的角度
Chest_angle = 90
else:
Chest_angle = - math.atan((Chest_top_center_y - Chest_bottom_center_y) / (
Chest_top_center_x - Chest_bottom_center_x)) * 180.0 / math.pi
else:
Chest_angle = 90
# center_x = 0.5*r_w
Chest_center_x = 0
Chest_bottom_center_x = 0
Chest_bottom_center_y = 0
Chest_top_center_x = 0
Chest_top_center_y = 0
angle_top = 90
angle_bottom = 90
if img_debug:
cv2.drawContours(Chest_img_copy, Chest_contours, -1, (255, 0, 255), 1)
cv2.putText(Chest_img_copy, 'Chest_percent:' + str(Chest_percent) + '%', (30, 140),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy, "Chest_angle:" + str(int(Chest_angle)), (30, 170), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy,
"Chest_bottom_center(x,y): " + str(int(Chest_bottom_center_x)) + " , " + str(
int(Chest_bottom_center_y)), (30, 240), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0),
2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy,
"Chest_top_center(x,y): " + str(int(Chest_top_center_x)) + " , " + str(
int(Chest_top_center_y)),
(30, 220), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy, "angle_top:" + str(int(angle_top)), (30, 260), cv2.FONT_HERSHEY_SIMPLEX,
0.65,
(0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy, "angle_bottom:" + str(int(angle_bottom)), (30, 280),
cv2.FONT_HERSHEY_SIMPLEX,
0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy, "step :" + str(int(step)), (30, 300), cv2.FONT_HERSHEY_SIMPLEX, 0.65,
(0, 0, 0),
2) # (0, 0, 255)BGR
cv2.imshow('Chest_Camera', Chest_img_copy) # 显示图像
# cv2.imshow('chest_green_mask', Chest_closed) # 显示图像
cv2.waitKey(100)
# 决策执行动作
if step == 0: # 接近 看下边沿 角度 Chest_percent > 5
if Chest_bottom_center_y < 260:
# print("296L y<360 大步前进 两步 forwardSlow0403")
# action_append("forwardSlow0403")
# action_append("forwardSlow0403")
print("1694L 快速前进 ", Chest_bottom_center_y)
action_append("fastForward04")
elif Chest_bottom_center_y > 460: # 450
step = 1
# 260< Chest_bottom_center_y <460
elif angle_bottom > 5:
if angle_bottom > 8:
print("1658L 大左转一下 > 8 turn001L ", angle_bottom)
action_append("turn001L")
else:
print("1661L 小左转 turn001L ", angle_bottom)
action_append("turn001L")
# time.sleep(1)
elif angle_bottom < -5:
if angle_bottom < -8:
print("1666L 大右转一下 < -8 turn001R ", angle_bottom)
action_append("turn001R")
else:
print("1669L 小右转 turn001R ", angle_bottom)
action_append("turn001R")
# time.sleep(1)
elif Chest_bottom_center_x > 260: # 右移 center_x
print("161 向右移 Right02move x>250")
action_append("Right02move")
elif Chest_bottom_center_x < 220: # 左移 center_x
print("160 向左移 Left02move x<220")
action_append("Left02move")
elif 220 <= Chest_bottom_center_x <= 260: # Chest_bottom_center_y < 450
# print("239 前进两步 forwardSlow0403")
# action_append("forwardSlow0403")
print("1705L 快走333 fastForward03")
action_append("Forwalk02")
elif step == 1: # 到绿桥边沿,对准绿桥阶段
if Chest_bottom_center_y > 580:
step = 2
elif angle_bottom > 3:
if angle_bottom > 6:
print("1678L 大左转一下 > 6 turn001L ", angle_bottom)
action_append("turn001L")
else:
print("1690L 小左转 turn000L ", angle_bottom)
action_append("turn000L")
# time.sleep(1)
elif angle_bottom < -3:
if angle_bottom < -6:
print("1695L 大右转一下 < -6 turn001R ", angle_bottom)
action_append("turn001R")
else:
print("1698L 小右转 turn000R ", angle_bottom)
action_append("turn000R")
# time.sleep(1)
elif Chest_bottom_center_x > 260: # 右移 center_x
print("1702L 向右移 Right02move x>250")
action_append("Right02move")
elif Chest_bottom_center_x < 220: # 左移 center_x
print("1705L 向左移 Left02move x<220")
action_append("Left02move")
elif 220 <= Chest_bottom_center_x <= 260:
print("1708L 对准 快走 fastForward03")
action_append("Forwalk02")
elif step == 2: # 已经在独木桥阶段 行走独木桥 调整角度 位置 看中线 角度
if Chest_percent > 2 and Chest_top_center_y > 360:
step = 3
elif Chest_percent > 2 and Chest_top_center_y > 100:
# 调整角度位置
if Chest_bottom_center_x >= 250: # 右移 center_x
print("1767L 向右移 Right02move >250 ,", Chest_bottom_center_x)
action_append("Right02move")
elif Chest_bottom_center_x <= 230: # 左移 center_x
print("1770L 向左移 Left02move <230 ,", Chest_bottom_center_x)
action_append("Left02move")
elif 230 < Chest_bottom_center_x < 250:
if 0 < Chest_angle < 86: # 右转
print("1775L 右转 turn001R Chest_angle:", Chest_angle)
action_append("turn001R")
# time.sleep(1) # timefftest
elif -86 < Chest_angle < 0: # 左转
print("1779L 左转 turn001L Chest_angle:", Chest_angle)
action_append("turn001L")
# time.sleep(1) # timefftest
else: # 走三步
# print("337L 前进一步 forwardSlow0403")
# action_append("forwardSlow0403")
print("1753L 上桥后,快走 fastForward03 Ccenter_y:", Chest_center_x)
action_append("Forwalk02")
time.sleep(1) # timefftest 快走停顿
else:
# print("341L 没有看到绿桥向前直行 forwardSlow0403")
# action_append("forwardSlow0403")
print("1741L 已经下桥")
step = 3
elif step == 3: # 接近 看上边沿 调整角度 Chest_percent > 5
if Chest_percent < 1 or Chest_top_center_y > 500:
# print("297L 接近桥终点 直行两步离开桥 forwardSlow0403")
# action_append("forwardSlow0403")
# action_append("forwardSlow0403")
# action_append("forwardSlow0403")
# action_append("Stand")
print("1778LL 接近桥终点 快走离开桥 Forwalk02")
action_append("Forwalk02")
step = 4
elif angle_top > 3:
if angle_top > 6:
print("298L 大左转一下 turn001L")
action_append("turn001L")
else:
print("1727L 左转 turn001L")
action_append("turn001L")
elif angle_top < -3:
if angle_top < -6:
print("303L 大右转一下 turn001R")
action_append("turn001R")
else:
print("305L 右转 turn001R")
action_append("turn001R")
elif Chest_top_center_x > 250: # 右移 center_x
print("363L 向右移 >250")
action_append("Right1move")
elif Chest_top_center_x < 220: # 左移 center_x
print("366L 向左移 <220")
action_append("Left1move")
elif 220 <= Chest_top_center_x <= 250:
# print("1802L 前进一步 forwardSlow0403")
# action_append("forwardSlow0403")
print("1804L 快走 Forwalk02")
action_append("Forwalk02")
elif step == 4: # 离开独木桥阶段 chest 出现bridge 依据chest调整角度位置
print("623L 离开桥")
print("过桥结束,step = -1 下一关 踢球")
cv2.destroyAllWindows()
step = 100
print("--continue---")
break
else: # 初始化
while (state == 6): # 开始处理图像
chest_copy = np.rot90(ChestOrg_img)
chest_copy = chest_copy.copy()
# chest
cv2.rectangle(chest_copy, (0, 0), (480, 150), (255, 255, 255), -1)
border = cv2.copyMakeBorder(chest_copy, 12, 12, 16, 16, borderType=cv2.BORDER_CONSTANT,
value=(255, 255, 255)) # 扩展白边,防止边界无法识别
Chest_img_copy = cv2.resize(border, (r_w, r_h), interpolation=cv2.INTER_CUBIC) # 将图片缩放
Chest_frame_gauss = cv2.GaussianBlur(Chest_img_copy, (3, 3), 0) # 高斯模糊
Chest_frame_hsv = cv2.cvtColor(Chest_frame_gauss, cv2.COLOR_BGR2HSV) # 将图片转换到HSV空间
Chest_frame_blue = cv2.inRange(Chest_frame_hsv, color_range['blue'][0],
color_range['blue'][1]) # 对原图像和掩模(颜色的字典)进行位运算
Chest_opened = cv2.morphologyEx(Chest_frame_blue, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8)) # 开运算 去噪点
Chest_closed = cv2.morphologyEx(Chest_opened, cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8)) # 闭运算 封闭连接
(_, Chest_contours, hierarchy) = cv2.findContours(Chest_closed, cv2.RETR_LIST,
cv2.CHAIN_APPROX_NONE) # 找出轮廓cv2.CHAIN_APPROX_NONE
# print("Chest_contours len:",len(Chest_contours))
Chest_areaMaxContour, Chest_area_max = getAreaMaxContour1(Chest_contours) # 找出最大轮廓
Chest_percent = round(Chest_area_max * 100 / (r_w * r_h), 2)
if Chest_areaMaxContour is not None:
Chest_rect = cv2.minAreaRect(Chest_areaMaxContour)
# center, w_h, Head_angle = rect # 中心点 宽高 旋转角度
Chest_box = np.int0(cv2.boxPoints(Chest_rect)) # 点的坐标
# 初始化四个顶点坐标
Chest_top_left = Chest_areaMaxContour[0][0]
Chest_top_right = Chest_areaMaxContour[0][0]
Chest_bottom_left = Chest_areaMaxContour[0][0]
Chest_bottom_right = Chest_areaMaxContour[0][0]
for c in Chest_areaMaxContour: # 遍历找到四个顶点
if c[0][0] + 1.5 * c[0][1] < Chest_top_left[0] + 1.5 * Chest_top_left[1]:
Chest_top_left = c[0]
if (r_w - c[0][0]) + 1.5 * c[0][1] < (r_w - Chest_top_right[0]) + 1.5 * Chest_top_right[1]:
Chest_top_right = c[0]
if c[0][0] + 1.5 * (r_h - c[0][1]) < Chest_bottom_left[0] + 1.5 * (r_h - Chest_bottom_left[1]):
Chest_bottom_left = c[0]
if c[0][0] + 1.5 * c[0][1] > Chest_bottom_right[0] + 1.5 * Chest_bottom_right[1]:
Chest_bottom_right = c[0]
angle_top = - math.atan(
(Chest_top_right[1] - Chest_top_left[1]) / (
Chest_top_right[0] - Chest_top_left[0])) * 180.0 / math.pi
angle_bottom = - math.atan((Chest_bottom_right[1] - Chest_bottom_left[1]) / (
Chest_bottom_right[0] - Chest_bottom_left[0])) * 180.0 / math.pi
Chest_top_center_x = int((Chest_top_right[0] + Chest_top_left[0]) / 2)
Chest_top_center_y = int((Chest_top_right[1] + Chest_top_left[1]) / 2)
Chest_bottom_center_x = int((Chest_bottom_right[0] + Chest_bottom_left[0]) / 2)
Chest_bottom_center_y = int((Chest_bottom_right[1] + Chest_bottom_left[1]) / 2)
Chest_center_x = int((Chest_top_center_x + Chest_bottom_center_x) / 2)
Chest_center_y = int((Chest_top_center_y + Chest_bottom_center_y) / 2)
if img_debug:
cv2.drawContours(Chest_img_copy, [Chest_box], 0, (0, 0, 255), 2) # 将大矩形画在图上
cv2.circle(Chest_img_copy, (Chest_top_right[0], Chest_top_right[1]), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_top_left[0], Chest_top_left[1]), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_bottom_right[0], Chest_bottom_right[1]), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_bottom_left[0], Chest_bottom_left[1]), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_top_center_x, Chest_top_center_y), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_bottom_center_x, Chest_bottom_center_y), 5, [0, 255, 255], 2)
cv2.circle(Chest_img_copy, (Chest_center_x, Chest_center_y), 7, [255, 255, 255], 2)
cv2.line(Chest_img_copy, (Chest_top_center_x, Chest_top_center_y),
(Chest_bottom_center_x, Chest_bottom_center_y), [0, 255, 255], 2) # 画出上下中点连线
if math.fabs(Chest_top_center_x - Chest_bottom_center_x) <= 1: # 得到连线的角度
Chest_angle = 90
else:
Chest_angle = - math.atan((Chest_top_center_y - Chest_bottom_center_y) / (
Chest_top_center_x - Chest_bottom_center_x)) * 180.0 / math.pi
else:
Chest_angle = 90
# center_x = 0.5*r_w
Chest_center_x = 0
Chest_bottom_center_x = 0
Chest_bottom_center_y = 0
Chest_top_center_x = 0
Chest_top_center_y = 0
angle_top = 90
angle_bottom = 90
if img_debug:
cv2.drawContours(Chest_img_copy, Chest_contours, -1, (255, 0, 255), 1)
cv2.putText(Chest_img_copy, 'Chest_percent:' + str(Chest_percent) + '%', (30, 140),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy, "Chest_angle:" + str(int(Chest_angle)), (30, 170), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy,
"Chest_bottom_center(x,y): " + str(int(Chest_bottom_center_x)) + " , " + str(
int(Chest_bottom_center_y)), (30, 240), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0),
2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy,
"Chest_top_center(x,y): " + str(int(Chest_top_center_x)) + " , " + str(
int(Chest_top_center_y)),
(30, 220), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy, "angle_top:" + str(int(angle_top)), (30, 260), cv2.FONT_HERSHEY_SIMPLEX,
0.65,
(0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy, "angle_bottom:" + str(int(angle_bottom)), (30, 280),
cv2.FONT_HERSHEY_SIMPLEX,
0.65, (0, 0, 0), 2) # (0, 0, 255)BGR
cv2.putText(Chest_img_copy, "step :" + str(int(step)), (30, 300), cv2.FONT_HERSHEY_SIMPLEX, 0.65,
(0, 0, 0),
2) # (0, 0, 255)BGR
cv2.imshow('Chest_Camera', Chest_img_copy) # 显示图像
# cv2.imshow('chest_green_mask', Chest_closed) # 显示图像
cv2.waitKey(100)
# 决策执行动作
if step == 0: # 接近 看下边沿 角度 Chest_percent > 5
if Chest_bottom_center_y < 260:
# print("296L y<360 大步前进 两步 forwardSlow0403")
# action_append("forwardSlow0403")
# action_append("forwardSlow0403")
print("1694L 快速前进 ", Chest_bottom_center_y)
action_append("fastForward04")
elif Chest_bottom_center_y > 460: # 450
step = 1
# 260< Chest_bottom_center_y <460
elif angle_bottom > 5:
if angle_bottom > 8:
print("1658L 大左转一下 > 8 turn001L ", angle_bottom)
action_append("turn001L")
else:
print("1661L 小左转 turn001L ", angle_bottom)
action_append("turn001L")
# time.sleep(1)
elif angle_bottom < -5:
if angle_bottom < -8:
print("1666L 大右转一下 < -8 turn001R ", angle_bottom)
action_append("turn001R")
else:
print("1669L 小右转 turn001R ", angle_bottom)
action_append("turn001R")
# time.sleep(1)
elif Chest_bottom_center_x > 260: # 右移 center_x
print("161 向右移 Right02move x>250")
action_append("Right02move")
elif Chest_bottom_center_x < 220: # 左移 center_x
print("160 向左移 Left02move x<220")
action_append("Left02move")
elif 220 <= Chest_bottom_center_x <= 260: # Chest_bottom_center_y < 450
# print("239 前进两步 forwardSlow0403")
# action_append("forwardSlow0403")
print("1705L 快走333 Forwalk02")
action_append("Forwalk02")
elif step == 1: # 到绿桥边沿,对准绿桥阶段
if Chest_bottom_center_y > 580:
step = 2
elif angle_bottom > 3:
if angle_bottom > 6:
print("1678L 大左转一下 > 6 turn001L ", angle_bottom)
action_append("turn001L")
else:
print("1690L 小左转 turn000L ", angle_bottom)
action_append("turn000L")
# time.sleep(1)
elif angle_bottom < -3:
if angle_bottom < -6:
print("1695L 大右转一下 < -6 turn001R ", angle_bottom)
action_append("turn001R")
else:
print("1698L 小右转 turn000R ", angle_bottom)
action_append("turn000R")
# time.sleep(1)
elif Chest_bottom_center_x > 260: # 右移 center_x
print("1702L 向右移 Right02move x>250")
action_append("Right02move")
elif Chest_bottom_center_x < 220: # 左移 center_x
print("1705L 向左移 Left02move x<220")
action_append("Left02move")
elif 220 <= Chest_bottom_center_x <= 260:
print("1708L 对准 快走 Forwalk02")
action_append("Forwalk02")
elif step == 2: # 已经在独木桥阶段 行走独木桥 调整角度 位置 看中线 角度
if Chest_percent > 2 and Chest_top_center_y > 360:
step = 3
elif Chest_percent > 2 and Chest_top_center_y > 100:
# 调整角度位置
if Chest_bottom_center_x >= 250: # 右移 center_x
print("1767L 向右移 Right02move >250 ,", Chest_bottom_center_x)
action_append("Right02move")
elif Chest_bottom_center_x <= 230: # 左移 center_x
print("1770L 向左移 Left02move <230 ,", Chest_bottom_center_x)
action_append("Left02move")
elif 230 < Chest_bottom_center_x < 250:
if 0 < Chest_angle < 86: # 右转
print("1775L 右转 turn001R Chest_angle:", Chest_angle)
action_append("turn001R")
# time.sleep(1) # timefftest
elif -86 < Chest_angle < 0: # 左转
print("1779L 左转 turn001L Chest_angle:", Chest_angle)
action_append("turn001L")
# time.sleep(1) # timefftest
else: # 走三步
# print("337L 前进一步 forwardSlow0403")
# action_append("forwardSlow0403")
print("1753L 上桥后,快走 Forwalk02 Ccenter_y:", Chest_center_x)
action_append("Forwalk02")
time.sleep(1) # timefftest 快走停顿
else:
# print("341L 没有看到绿桥向前直行 forwardSlow0403")
# action_append("forwardSlow0403")
print("1741L 已经下桥")
step = 3
elif step == 3: # 接近 看上边沿 调整角度 Chest_percent > 5
if Chest_percent < 1 or Chest_top_center_y > 500:
# print("297L 接近桥终点 直行两步离开桥 forwardSlow0403")
# action_append("forwardSlow0403")
# action_append("forwardSlow0403")
# action_append("forwardSlow0403")
# action_append("Stand")
print("1778LL 接近桥终点 快走离开桥 Forwalk02")
action_append("Forwalk02")
step = 4
elif angle_top > 3:
if angle_top > 6:
print("298L 大左转一下 turn001L")
action_append("turn001L")
else:
print("1727L 左转 turn001L")
action_append("turn001L")
elif angle_top < -3:
if angle_top < -6:
print("303L 大右转一下 turn001R")
action_append("turn001R")
else:
print("305L 右转 turn001R")
action_append("turn001R")
elif Chest_top_center_x > 250: # 右移 center_x
print("363L 向右移 >250")
action_append("Right1move")
elif Chest_top_center_x < 220: # 左移 center_x
print("366L 向左移 <220")
action_append("Left1move")
elif 220 <= Chest_top_center_x <= 250:
# print("1802L 前进一步 forwardSlow0403")
# action_append("forwardSlow0403")
print("1804L 快走 Forwalk02")
action_append("Forwalk02")
elif step == 4: # 离开独木桥阶段 chest 出现bridge 依据chest调整角度位置
print("623L 离开桥")
print("过桥结束,step = -1 下一关 踢球")
cv2.destroyAllWindows()
step = 100
print("--continue---")
break
################################################第七关:踢球进洞########################################
golf_angle_ball = 90
Chest_ball_angle = 90
hole_Angle = 45
golf_angle = 0
ball_x = 0
ball_y = 0
golf_dis_flag = False # 未使用
golf_angle_flag = False
golf_dis_start = True
golf_angle_start = False
golf_ok = False
hole_flag = False
Chest_ball_flag = False
Chest_golf_angle = 0
ball_dis_start = True
hole_angle_start = False
head_state = 0 # 90 ~ -90 左+90 右-90
hole_x = 0
hole_y = 0
angle_dis_count = 0
picnum = 0
fast_run = True
def act_move():
global step, state, reset, skip
global hole_Angle, ball_hole
global golf_angle_ball, golf_angle, Chest_ball_angle, Chest_golf_angle
global ball_x, ball_y, Chest_ball_x, Chest_ball_y
global golf_angle_flag, golf_dis_flag # golf_dis_flag未使用
global golf_angle_start, golf_dis_start
global golf_ok
global hole_flag, Chest_ball_flag
global ball_dis_start, hole_angle_start
global head_state, angle_dis_count, fast_run
ball_hole_angle_ok = False
# 由脚底到红球延伸出一条射线,依据球洞与该射线的关系,调整机器人位置
# ball_hole_local()
if True:
if step == 0: # 发现球,发现球洞,记录球与球洞的相对位置
# print("看黑线调整居中")
if Chest_ball_flag == True: # 前进到球跟前
if fast_run:
if Chest_ball_y <= 270: # 340
print("1870L 快走前进 fastForward04 ", Chest_ball_y)
# action_append("forwardSlow0403")
# action_append("forwardSlow0403")
action_append("fastForward04")
head_angle_dis() # headfftest
elif Chest_ball_y <= 290: # 340
print("1902L 快走前进 fastForward03 ", Chest_ball_y)
action_append("fastForward03")
# head_angle_dis() # headfftest
else:
print("1902L 快走完成", Chest_ball_y)
fast_run = False
else:
if Chest_ball_y < 360: # 390
# X
if Chest_ball_x < 140: # 240 - 100
print("159L Chest_ball_x < 180 左侧移 ", Chest_ball_x)
action_append("Left3move")
elif Chest_ball_x > 340: # 240 + 100
print("161L Chest_ball_x > 300 右侧移 ", Chest_ball_x)
action_append("Right3move")
else:
print("168L 前挪一点点 1111111 ", Chest_ball_y)
action_append("forwardSlow0403")
else: # Chest_ball_y>360
print("goto step1 ", Chest_ball_y)
step = 1
else:
print("183L 未发现红球 左右旋转头部摄像头 寻找红球")
print("238L 前进 fastForward03")
action_append("fastForward03") # ffetst
# 目前假设红球在正前方,能看到
# if head_state == 0:
# print("头右转(-60)寻找球")
# head_state = -60
# elif head_state == -60:
# print("头由右转变为左转(+60)寻找球")
# head_state = 60
# elif head_state == 60:
# print("头部 恢复0 向前迈进")
elif step == 1: # 看球调整位置 逐步前进调整至看球洞
if Chest_ball_y <= 350:
print("174L 前挪一点点 Forwalk00 < 380 ", Chest_ball_y)
action_append("Forwalk00")
elif Chest_ball_y > 480:
print("1903L 后一步 Back2Run > 480", Chest_ball_y)
action_append("Back2Run")
elif 350 < Chest_ball_y <= 480:
if hole_flag == True:
if head_state == -60:
print("头右看,看到球洞")
step = 2
# print("172L 头恢复0 向右平移")
# head_state = 0
elif head_state == 60:
print("头左看,看到球洞")
step = 3
# print("172L 头恢复0 向左平移")
# head_state = 0
elif head_state == 0: # 头前看 看到球洞
print("270L step4")
step = 4
else:
print("273error 左右旋转头 寻找球洞 ")
# 目前假设球洞在前方,head能看到
# if head_state == 0:
# print("头右转(-60)寻找球")
# head_state = -60
# elif head_state == -60:
# print("头由右转变为左转(+60)寻找球")
# head_state = 60
# elif head_state == 60:
# print("头部 恢复0 向前迈进")
elif step == 2:
# 头右看,看到球洞
print("22222222222找红球与球洞")
if Chest_ball_y < 160:
print("174L 一大步前进")
elif Chest_ball_y < 360:
print("177L 后挪一点点")
elif 160 < Chest_ball_y < 320:
print("找到了在左边跳第4步,找到了在右边跳第3步")
if hole_flag == True:
if head_state == -60:
print("头右看,看到球")
step = 3
# print("172L 头恢复0 向右平移")
# head_state = 0
elif head_state == 60:
print("头左看,看到球")
step = 4
# print("172L 头恢复0 向左平移")
# head_state = 0
elif head_state == 0: # 头前看 看到球洞
step = 1
else:
print("左右旋转头 寻找球洞")
# 目前假设球洞在前方,head能看到
if head_state == 0:
print("头右转(-60)寻找球")
head_state = -60
elif head_state == -60:
print("头由右转变为左转(+60)寻找球")
head_state = 60
elif head_state == 60:
print("头部 恢复0 向前迈进")
elif step == 3:
# 头左看,看到球洞
print("33333333333左侧移")
if Chest_ball_y > 280:
print("后挪一点点")
elif Chest_ball_y < 150:
print("前挪一点点")
elif Chest_ball_x < 450:
print("左侧移")
if hole_flag == False:
print("右转")
else:
step = 1
ball_dis_start = True
hole_angle_start = False
# 完成左侧移后 右转
# 找球洞
elif step == 4: # 粗略调整朝向 球与球洞大致在一条线
# print("调整红球在左脚正前方不远处,看球洞的位置调整")
if ball_dis_start:
if Chest_ball_x <= 200:
if 240 - Chest_ball_x > 40:
print("373L4 需要左侧移 Left3move", Chest_ball_x)
action_append("Left3move")
else:
print("376L4 需要左侧移 Left02move", Chest_ball_x)
action_append("Left02move")
angle_dis_count = 0
elif Chest_ball_x > 280:
if Chest_ball_x - 240 > 40:
print("359L4 需要右侧移 Right3move", Chest_ball_x)
action_append("Right3move")
else:
print("384L4 需要右侧移 Right02move", Chest_ball_x)
action_append("Right02move")
angle_dis_count = 0
else:
print("388L4 Chest_ball_y---位置ok")
ball_dis_start = False
hole_angle_start = True
if hole_angle_start:
if hole_Angle <= 0:
# angle
if hole_Angle > -86:
if hole_Angle >= -82:
if Chest_ball_y > 480:
print("392L4 需要后挪一点 Back2Run ", Chest_ball_y)
action_append("Back2Run")
angle_dis_count = 0
elif Chest_ball_y < 350:
print("395L4 需要前挪一点 Forwalk00", Chest_ball_y)
action_append("Forwalk00")
angle_dis_count = 0
print("381L4 大左转一下 turn004L ", hole_Angle)
action_append("turn004L")
else:
if Chest_ball_y > 485:
print("386L4 需要后挪一点 Back1Run ", Chest_ball_y)
action_append("Back1Run")
angle_dis_count = 0
elif Chest_ball_y < 350:
print("427L4 需要前挪一点 Forwalk00 ", Chest_ball_y)
action_append("Forwalk00")
angle_dis_count = 0
print("397L4 左转一下 turn001L ", hole_Angle)
action_append("turn001L")
else:
print("401L4 hole_Angle---角度ok")
angle_dis_count = angle_dis_count + 1
ball_dis_start = True
hole_angle_start = False
# ball_dis_start = True
# hole_angle_start = False
if hole_Angle > 0:
# angle
if hole_Angle < 86:
if hole_Angle <= 82:
if Chest_ball_y > 480:
print("409L4 需要后挪一点 Back2Run ", Chest_ball_y)
action_append("Back2Run")
angle_dis_count = 0
elif Chest_ball_y < 350:
print("427L4 需要前挪一点 Forwalk00 ", Chest_ball_y)
action_append("Forwalk00")
angle_dis_count = 0
print("250L4 大右转一下 turn004R ", hole_Angle)
action_append("turn004R")
else:
if Chest_ball_y > 485:
print("421L4 需要后挪一点 Back1Run ", Chest_ball_y)
action_append("Back1Run")
angle_dis_count = 0
elif Chest_ball_y < 350:
print("427L4 需要前挪一点 Forwalk00 ", Chest_ball_y)
action_append("Forwalk00")
angle_dis_count = 0
print("352L4 右转一下 turn001R ", hole_Angle)
action_append("turn001R")
else:
print("417L4 hole_Angle---角度OK")
angle_dis_count = angle_dis_count + 1
ball_dis_start = True
hole_angle_start = False
# ball_dis_start = True
# hole_angle_start = False
if angle_dis_count > 3:
angle_dis_count = 0
print("step step 5555")
step = 5
elif step == 5: # 调整 球与球洞在一条直线 球范围 230<Chest_ball_y<250
# print("55555 球与球洞都在")
# print("调整红球在左脚正前方不远处,看球洞的位置调整")
if ball_dis_start: # 390<y<450 230<x<250
if Chest_ball_x < 220:
# if 240 - Chest_ball_x > 40:
# print("443L 需要左侧移 Left02move")
# action_append("Left02move")
# else:
print("446L 需要左侧移 Left1move", Chest_ball_x)
action_append("Left1move")
angle_dis_count = 0
elif Chest_ball_x > 260:
# if Chest_ball_x - 240 > 40:
# print("451L 需要右侧移 Right02move")
# action_append("Right02move")
# else:
print("454L 需要右侧移 Right1move", Chest_ball_x)
action_append("Right1move")
angle_dis_count = 0
else:
print("340L Chest_ball_y---位置ok")
ball_dis_start = False
hole_angle_start = True
if hole_angle_start:
if hole_Angle < 0:
# angle
if hole_Angle > -87:
# y
if Chest_ball_y > 485:
print("475L 需要后挪一点 Back1Run ", Chest_ball_y)
action_append("Back1Run")
angle_dis_count = 0
elif Chest_ball_y < 390:
print("368L 需要前挪一点 Forwalk00", Chest_ball_y)
action_append("Forwalk00")
angle_dis_count = 0
if hole_Angle >= -82:
print("465L 大左转一下 turn001L ", hole_Angle)
action_append("turn001L")
else:
print("468L 左转一下 turn001L ", hole_Angle)
action_append("turn001L")
else:
print("471L hole_Angle---角度ok")
angle_dis_count = angle_dis_count + 1
ball_dis_start = True
hole_angle_start = False
if hole_Angle > 0:
# angle
if hole_Angle < 87:
# y
if Chest_ball_y > 485:
print("475L 需要后挪一点 Back1Run ", Chest_ball_y)
action_append("Back1Run")
angle_dis_count = 0
elif Chest_ball_y < 390:
print("368L 需要前挪一点 Forwalk00 ", Chest_ball_y)
action_append("Forwalk00")
angle_dis_count = 0
if hole_Angle <= 82:
print("479L 大右转一下 turn001R ", hole_Angle)
action_append("turn001R")
else:
print("482L 右转一下 turn001R ", hole_Angle)
action_append("turn001R")
else:
print("485L hole_Angle---角度OK")
angle_dis_count = angle_dis_count + 1
ball_dis_start = True
hole_angle_start = False
if angle_dis_count > 2:
angle_dis_count = 0
step = 6
elif step == 6:
# print("666")
if Chest_ball_angle > 88 and hole_Angle > 88:
ball_hole_angle_ok = True
if Chest_ball_angle < -88 and hole_Angle > 88:
ball_hole_angle_ok = True
if Chest_ball_angle < -88 and hole_Angle < -88:
ball_hole_angle_ok = True
if Chest_ball_angle > 88 and hole_Angle < -88:
ball_hole_angle_ok = True
if Chest_ball_angle > 86 and hole_Angle > 86 and ball_hole_angle_ok == False:
print("391L 右转一点点 turn001R")
action_append("turn001R")
elif Chest_ball_angle < -86 and hole_Angle < -86 and ball_hole_angle_ok == False:
print("393L 左转一点点 turn001L")
action_append("turn001L")
elif Chest_ball_y <= 470:
print("289L 向前挪动一点点 Forwalk00")
action_append("Forwalk00")
else:
print("next step")
step = 7
elif step == 7:
if Chest_ball_x > 200: # 210
print("410L 向右移动 Right1move")
action_append("Right1move")
elif Chest_ball_x < 180:
print("412L 向左移动 Left1move")
action_append("Left1move")
elif Chest_ball_y < 490:
print("289L 向前挪动一点点 Forwalk00")
action_append("Forwalk00")
elif Chest_ball_y > 530:
print("2244L 向后挪动一点点 Back0Run")
action_append("Back0Run")
else:
print("414L 踢球踢球 LfootShot")
action_append("LfootShot")
step = 8
print("完成 77777")
action_append("forwardSlow0403")
action_append("forwardSlow0403")
action_append("forwardSlow0403")
action_append("Stand")
# action_append("turn004L")
# action_append("turn004L")
# action_append("turn004L")
# action_append("turn004L")
# action_append("turn004L")
# action_append("turn004L")
# action_append("turn004L")
# action_append("turn004L")
# action_append("turn004L")
# action_append("turn004L")
action_append("turn005L")
action_append("turn005L")
action_append("turn005L")
action_append("turn005L")
cv2.destroyAllWindows()
def kick_ball():
global state, state_sel, step, reset, skip
global hole_Angle
global golf_angle_ball, golf_angle, Chest_ball_angle, Chest_golf_angle
global ball_x, ball_y, Chest_ball_x, Chest_ball_y
global hole_flag, Chest_ball_flag
global ChestOrg_img
global picnum, img_debug
# 初始化
sum_contours = np.array([[[0, 0]], [[0, 1]], [[1, 1]], [[1, 0]]])
step = 0
state = 7
while state == 7:
if 0 <= step < 8: # 踢球的七步
ChestOrg = ChestOrg_img.copy()
ChestOrg = np.rot90(ChestOrg)
Hole_OrgFrame = ChestOrg.copy()
Ball_OrgFrame = ChestOrg.copy()
img_h, img_w = Hole_OrgFrame.shape[:2]
# 把上中心点和下中心点200改为640/2 fftest
bottom_center = (int(240), int(img_h)) # 图像底中点
top_center = (int(240), int(0)) # 图像顶中点
# bottom_center = (int(640/2), int(img_h)) #图像底中点
# top_center = (int(640/2), int(0)) #图像顶中点
# 开始处理图像
Hole_hsv = cv2.cvtColor(Hole_OrgFrame, cv2.COLOR_BGR2HSV)
Hole_hsv = cv2.GaussianBlur(Hole_hsv, (3, 3), 0)
Hole_Imask = cv2.inRange(Hole_hsv, color_dist['blue_hole']['Lower'], color_dist['blue_hole']['Upper'])
Hole_Imask = cv2.erode(Hole_Imask, None, iterations=1)
Hole_Imask = cv2.dilate(Hole_Imask, np.ones((3, 3), np.uint8), iterations=3)
# cv2.imshow('hole_mask', Hole_Imask) # hole mask
# print('Press a key to continue:')
# cv2.waitKey(0)
# 初始化
hole_center = [0, 0]
Chest_ball_center = [0, 0]
# chest 球洞处理
hole_x = 0
hole_y = 0
_, cnts, hierachy = cv2.findContours(Hole_Imask, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE) # **获得图片轮廓值 #遍历图像层级关系
# *取得一个球洞的轮廓*
for i in range(0, len(cnts)): # 初始化sum_contours,使其等于其中一个c,便于之后拼接的格式统一
area = cv2.contourArea(cnts[i]) # 计算轮廓面积
# print("area : ",area)
if img_debug:
cv2.putText(Hole_OrgFrame, "area:" + str(area), (10, Hole_OrgFrame.shape[0] - 55),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.waitKey(1)
if 640 * 480 * 0.0005 < area < 640 * 480 * 0.45: # 去掉很小的干扰轮廓以及最大的图像边界
# cv2.drawContours(Hole_OrgFrame, cnts, -1, (0, 255, 0), 3)
sum_contours = cnts[i]
break
else:
# cv2.drawContours(Hole_OrgFrame, cnts, -1, (0, 0, 255), 3)
continue
for c in cnts:
area = cv2.contourArea(c) # 计算轮廓面积
if 640 * 480 * 0.0005 < area < 640 * 480 * 0.45:
sum_contours = np.concatenate((sum_contours, c), axis=0) # 数组拼接
cv2.drawContours(Hole_OrgFrame, c, -1, (0, 255, 0), 3)
else:
cv2.drawContours(Hole_OrgFrame, c, -1, (0, 0, 255), 3)
continue
sum_area = cv2.contourArea(sum_contours) # 计算轮廓面积
if sum_area > 3:
cnt_large = sum_contours
else:
cnt_large = None
if cnt_large is not None:
hole_flag = True
(hole_x, hole_y), radius = cv2.minEnclosingCircle(cnt_large) # 最小内接圆形
hole_center = (int(hole_x), int(hole_y))
radius = int(radius)
cv2.circle(Hole_OrgFrame, hole_center, radius, (100, 200, 30), 2)
# ellipse = cv2.fitEllipse(cnt_large)
# cv2.ellipse(OrgFrame,ellipse,(255,255,0),2)
cv2.line(Hole_OrgFrame, hole_center, bottom_center, (0, 0, 100), 2)
if (hole_center[0] - bottom_center[0]) == 0:
hole_Angle = 90
else:
# hole_Angle (y1-y0)/(x1-x0)
hole_Angle = - math.atan(
(hole_center[1] - bottom_center[1]) / (hole_center[0] - bottom_center[0])) * 180.0 / math.pi
else:
hole_flag = False
if img_debug:
cv2.putText(Hole_OrgFrame, "step:" + str(step),
(10, Hole_OrgFrame.shape[0] - 35), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(Hole_OrgFrame, "hole_angle:" + str(hole_Angle),
(10, Hole_OrgFrame.shape[0] - 115), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(Hole_OrgFrame, "hole_x:" + str(hole_x),
(10, Hole_OrgFrame.shape[0] - 75), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(Hole_OrgFrame, "hole_y:" + str(hole_y),
(220, Hole_OrgFrame.shape[0] - 75), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(Hole_OrgFrame, "hole_flag:" + str(hole_flag),
(10, Hole_OrgFrame.shape[0] - 95), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.imshow("Hole_OrgFrame", Hole_OrgFrame)
cv2.waitKey(10)
# chest 红球处理
Chest_ball_x = 0
Chest_ball_y = 0
Chest_Ball_hsv = cv2.cvtColor(Ball_OrgFrame, cv2.COLOR_BGR2HSV)
Chest_Ball_hsv = cv2.GaussianBlur(Chest_Ball_hsv, (3, 3), 0)
Chest_Ball_Imask = cv2.inRange(Chest_Ball_hsv, color_range['white'][0],
color_range['white'][1])
Chest_Ball_Imask = cv2.erode(Chest_Ball_Imask, None, iterations=2)
Chest_Ball_Imask = cv2.dilate(Chest_Ball_Imask, np.ones((3, 3), np.uint8), iterations=2)
# cv2.imshow('ball_mask', Chest_Ball_Imask) # ball mask
# print('Press a key to continue:')
# cv2.waitKey(0)
_, cnts2, hierachy2 = cv2.findContours(Chest_Ball_Imask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if cnts2 is not None:
cnt_large3 = getAreaMaxContour2(cnts2, 10)
else:
print("1135L cnt_large is None")
continue
# 圆球轮廓 计算角度 Chest_ball_angle
if cnt_large3 is not None:
Chest_ball_flag = True
(Chest_circle_x, Chest_circle_y), Chest_radius = cv2.minEnclosingCircle(cnt_large3)
Chest_ball_center = (int(Chest_circle_x), int(Chest_circle_y))
Chest_radius = int(Chest_radius)
cv2.circle(Ball_OrgFrame, Chest_ball_center, Chest_radius, (100, 200, 20), 2)
cv2.line(Ball_OrgFrame, Chest_ball_center, top_center, (0, 100, 0), 2)
# ellipse = cv2.fitEllipse(cnt_large)
# cv2.ellipse(OrgFrame,ellipse,(255,255,0),2)
if (Chest_ball_center[0] - top_center[0]) == 0:
Chest_ball_angle = 90
else:
# *Chest_ball_angle* (y1-y0)/(x1-x0)
Chest_ball_angle = - math.atan((Chest_ball_center[1] - top_center[1]) / (
Chest_ball_center[0] - top_center[0])) * 180.0 / math.pi
Chest_ball_x = int(Chest_circle_x) # *ball_x*
Chest_ball_y = int(Chest_circle_y) # *ball_y*
else:
Chest_ball_flag = False
Chest_ball_y = 0
if img_debug:
cv2.putText(Ball_OrgFrame, "step:" + str(step),
(10, Ball_OrgFrame.shape[0] - 35), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(Ball_OrgFrame, "Chest_ball_x:" + str(Chest_ball_x),
(10, Ball_OrgFrame.shape[0] - 75), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(Ball_OrgFrame, "Chest_ball_y:" + str(Chest_ball_y),
(220, Ball_OrgFrame.shape[0] - 75), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(Ball_OrgFrame, "Chest_ball_flag:" + str(Chest_ball_flag),
(10, Hole_OrgFrame.shape[0] - 95), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(Ball_OrgFrame, "ball_angle:" + str(Chest_ball_angle),
(10, Ball_OrgFrame.shape[0] - 115), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.imshow("Ball_OrgFrame", Ball_OrgFrame)
cv2.waitKey(10)
# if img_debug:
# picnum += 1
# picname = 'photo_save' + str(picnum) + '.bmp'
# cv2.imwrite(picname,Ball_OrgFrame) #保存图片
else:
break
act_move()
##################################################第八关:过洞###############################################
def square_hole():
global ChestOrg_copy, state, state_sel, step, reset, skip, debug
r_w = chest_r_width
r_h = chest_r_height
state_sel = 'hole'
step = 0
state = 8
green = 0
if green == 1:
while state == 8: # 初始化
# 开始处理图像
if True: # head发现黄色区域
t1 = cv2.getTickCount()
HeadOrg_copy = HeadOrg_img.copy()
ChestOrg_copy = np.rot90(ChestOrg_img)
ChestOrg_copy = ChestOrg_copy.copy()
cv2.rectangle(ChestOrg_copy, (0, 580), (480, 640), (255, 255, 255), -1) # 底部涂白,遮盖双脚
handling = cv2.resize(ChestOrg_copy, (r_w, r_h), interpolation=cv2.INTER_CUBIC) # 将图片缩放
frame_gauss = cv2.GaussianBlur(handling, (3, 3), 0) # 高斯模糊
frame_hsv = cv2.cvtColor(frame_gauss, cv2.COLOR_BGR2HSV) # 将图片转换到HSV空间
# Green Green Green_
frame = cv2.inRange(frame_hsv, color_range['green_bridge'][0],
color_range['green_bridge'][1]) # 对原图像和掩模(颜色的字典)进行位运算
opened = cv2.morphologyEx(frame, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8)) # 开运算 去噪点
closed = cv2.morphologyEx(opened, cv2.MORPH_CLOSE, np.ones((9, 9), np.uint8)) # 闭运算 封闭连接
(_, contours, hierarchy) = cv2.findContours(closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # 找出轮廓
cv2.drawContours(handling, contours, 0, (0, 255, 0), 3) # 画出轮廓fftest
area_sum = getAreaSumContour(contours)
percent = round(100 * area_sum / (r_w * r_h), 2)
areaMaxContour, area_max = getAreaMaxContour1(contours) # 找出最大轮廓
if areaMaxContour is not None:
Green_bottom_left = areaMaxContour[0][0] # 初始化
Green_bottom_right = areaMaxContour[0][0] # 初始化
Green_top_right = areaMaxContour[0][0] # 右上角点坐标
Green_top_left = areaMaxContour[0][0] # 左上角点坐标
for c in areaMaxContour: # 遍历找到四个顶
# 底边两顶点
if c[0][0] + 1 * (r_h - c[0][1]) < Green_bottom_left[0] + 1. * (r_h - Green_bottom_left[1]):
Green_bottom_left = c[0]
if c[0][0] + 1 * c[0][1] > Green_bottom_right[0] + 1 * Green_bottom_right[1]:
Green_bottom_right = c[0]
# 上边两定点
if c[0][0] + 1.5 * c[0][1] < Green_top_left[0] + 1.5 * Green_top_left[1]:
Green_top_left = c[0]
if (r_w - c[0][0]) + 1.5 * c[0][1] < (r_w - Green_top_right[0]) + 1.5 * Green_top_right[1]:
Green_top_right = c[0]
Green_angle_bottom = - math.atan((Green_bottom_right[1] - Green_bottom_left[1]) / (
Green_bottom_right[0] - Green_bottom_left[0])) * 180.0 / math.pi
Green_angle_top = - math.atan((Green_top_right[1] - Green_top_left[1]) / (
Green_top_right[0] - Green_top_left[0])) * 180.0 / math.pi
Green_bottom_center_x = int((Green_bottom_right[0] + Green_bottom_left[0]) / 2)
Green_bottom_center_y = int((Green_bottom_right[1] + Green_bottom_left[1]) / 2)
# Green_top_center_x = int((Green_top_right[0] + Green_top_left[0]) / 2)
# Green_top_center_y = int((Green_top_right[1] + Green_top_left[1]) / 2)
if img_debug:
cv2.circle(handling, (Green_bottom_right[0], Green_bottom_right[1]), 5, [0, 255, 255], 2)
cv2.circle(handling, (Green_bottom_left[0], Green_bottom_left[1]), 5, [0, 255, 255], 2)
cv2.circle(handling, (Green_bottom_center_x, Green_bottom_center_y), 5, [0, 0, 255], 2)
# cv2.circle(handling, (Green_top_right[0], Green_top_right[1]), 5, [255, 0, 255], 2)
# cv2.circle(handling, (Green_top_left[0], Green_top_left[1]), 5, [255, 0, 255], 2)
# cv2.circle(handling, (Green_top_center_x, Green_top_center_y), 5, [255, 0, 0], 2)
else:
Green_angle_bottom = 0
# Green_angle_top = 0
Green_bottom_center_x = 0.5 * r_w
Green_bottom_center_y = 0
# Grey Grey Grey Grey Grey_
Grey_angle_bottom = 0
Grey_bottom_center_x = 0
Grey_bottom_center_y = 0
frame = cv2.inRange(frame_hsv, color_range['grey_ground'][0],
color_range['grey_ground'][1]) # 对原图像和掩模(颜色的字典)进行位运算
opened = cv2.morphologyEx(frame, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8)) # 开运算 去噪点
closed = cv2.morphologyEx(frame, cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8)) # 闭运算 封闭连接
(_, contours, hierarchy) = cv2.findContours(closed, cv2.RETR_LIST,
cv2.CHAIN_APPROX_NONE) # 找出轮廓cv2.CHAIN_APPROX_NONE
# areaMaxContour, area_max = getAreaMaxContour1(contours) # 找出最大轮廓
areaMaxContour = getSumContour(contours, 100) # 得到总轮廓
if areaMaxContour is not None:
Grey_bottom_left = areaMaxContour[0][0] # 初始化
Grey_bottom_right = areaMaxContour[0][0] # 初始化
for c in areaMaxContour: # 遍历找到四个顶
# 底边两顶点
if c[0][0] + 1 * (r_h - c[0][1]) < Grey_bottom_left[0] + 1. * (r_h - Grey_bottom_left[1]):
Grey_bottom_left = c[0]
if c[0][0] + 1 * c[0][1] > Grey_bottom_right[0] + 1 * Grey_bottom_right[1]:
Grey_bottom_right = c[0]
Grey_angle_bottom = - math.atan((Grey_bottom_right[1] - Grey_bottom_left[1]) / (
Grey_bottom_right[0] - Grey_bottom_left[0])) * 180.0 / math.pi
Grey_bottom_center_x = (Grey_bottom_right[0] + Grey_bottom_left[0]) / 2
Grey_bottom_center_y = (Grey_bottom_right[1] + Grey_bottom_left[1]) / 2
rect = cv2.minAreaRect(areaMaxContour)
# center, w_h, angle = rect # 中心点 宽高 旋转角度
box = np.int0(cv2.boxPoints(rect)) # 点的坐标
if img_debug:
cv2.circle(handling, (Grey_bottom_right[0], Grey_bottom_right[1]), 5, [0, 255, 255], 2)
cv2.circle(handling, (Grey_bottom_left[0], Grey_bottom_left[1]), 5, [0, 255, 255], 2)
cv2.circle(handling, (int(Grey_bottom_center_x), int(Grey_bottom_center_y)), 5, [255, 0, 0], 2)
cv2.drawContours(handling, [box], 0, (0, 0, 255), 2) # 将大矩形画在图上
else: # 没有识别到黑方洞
angle = 0
Grey_center_x = 0.5 * r_w
Grey_center_y = 0
if img_debug:
t2 = cv2.getTickCount()
time_r = (t2 - t1) / cv2.getTickFrequency()
fps = 1.0 / time_r
cv2.putText(handling, "step:" + str(step), (30, 440), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0),
2) # (0, 0, 255)BGR
cv2.putText(handling, "fps:" + str(int(fps)), (30, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0),
2) # (0, 0, 255)BGR
cv2.putText(handling, 'area: ' + str(percent) + '%', (30, 420), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
(0, 0, 0), 2)
cv2.putText(handling,
'Green_center(x,y): ' + str(Green_bottom_center_x) + ', ' + str(Green_bottom_center_y),
(30, 400), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
cv2.putText(handling,
'Grey_center(x,y): ' + str(Grey_bottom_center_x) + ', ' + str(Grey_bottom_center_y),
(30, 380), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
cv2.putText(handling, 'Green_angle_bottom: ' + str(Green_angle_bottom), (30, 350),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
cv2.putText(handling, 'Grey_angle_bottom: ' + str(Grey_angle_bottom), (30, 330),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
cv2.drawContours(handling, contours, -1, (255, 0, 255), 3)
cv2.imshow('handling', handling) # 显示图像
# cv2.imshow('Green_mask', closed) # 显示图像
# cv2.imshow('Grey_mask', closed) # 显示图像
# cv2.imshow('Head_Camera', HeadOrg_copy) # 显示图像
k = cv2.waitKey(10)
if k == 27:
cv2.destroyWindow('closed_pic')
cv2.destroyWindow('org_img_copy')
break
elif k == ord('s'):
print("save picture123")
cv2.imwrite("picture123.jpg", HeadOrg_copy) # 保存图片
if step == 0: # 大步2前进 黄色下底边调整角度 调整好角度后继续大步前进
if Grey_bottom_center_y > 340: # 黑色方框
print("2621L step 111")
step = 1
if Green_bottom_center_y < 320:
print("2621L head_angle_dis ", Green_bottom_center_y)
# headfftest
print("226L 快走前进 Forwalk05", Green_bottom_center_y)
action_append("Forwalk05")
# print("313L 前进33333")
# action_append( "forwardSlow0403")
# action_append( "forwardSlow0403")
# action_append( "forwardSlow0403")
elif Green_bottom_center_y < 440:
print("2630L head_angle_dis ", Green_bottom_center_y)
# headfftest
print("2585L 快走前进 fastForward03 ", Green_bottom_center_y)
action_append("Forwalk02")
else: # Green_bottom_center_y >= 440
if Green_angle_bottom < -4:
if Green_angle_bottom < -10:
print(Green_angle_bottom, " Green_angle_bottom < -10 向右转动 turn001R")
action_append("turn001R")
else:
print(Green_angle_bottom, " Green_angle_bottom < -2 向右转动")
action_append("turn001R")
elif Green_angle_bottom > 4:
if Green_angle_bottom > 10:
print(Green_angle_bottom, " Green_angle_bottom > 10 向左转动 turn001L")
action_append("turn001L")
else:
print(Green_angle_bottom, " Green_angle_bottom > 2 向左转动")
action_append("turn001L")
elif -4 <= Green_angle_bottom <= 4:
# 调整居中角度
# if Green_bottom_center_x < 200:
# print("355L 向左移动 Left02move")
# action_append( "Left02move")
# elif Green_bottom_center_x > 280:
# print("358L 向右移动 Right02move")
# action_append( "Right02move")
# el
if Green_bottom_center_y < 360:
print("349L Green_bottom_center_y < 66% 前进")
action_append("forwardSlow0403")
else:
print("2665L step goto 11111")
step = 1
elif step == 1: # 依据Green调整左右位置 到接触黑色
if Grey_bottom_center_y > 340: # 黑色方框
print("ste 222")
step = 2
elif Green_bottom_center_x < 200:
print("369L 向左移动 Left02move")
action_append("Left02move")
elif Green_bottom_center_x > 280:
print("372L 向右移动 Right02move")
action_append("Right02move")
else:
print("363L 继续前进")
action_append("forwardSlow0403")
elif step == 2: # 看黑色下顶点,调整方向和位置
# angle
if Grey_angle_bottom < -2.0: # 右转
if Grey_angle_bottom < -6.0: # 大右转
print("386L Grey_angle_bottom < -6 右转 turn001R ", Grey_angle_bottom)
action_append("turn001R")
elif Grey_angle_bottom < -6.0: # 大右转
print("386L Grey_angle_bottom < -4 右转 turn001R ", Grey_angle_bottom)
action_append("turn001R")
else:
print("389L Grey_angle_bottom < -2 右转 turn000R ", Grey_angle_bottom)
action_append("turn000R")
# time.sleep(1) # timefftest
elif Grey_angle_bottom > 2.0: # 左转
if Grey_angle_bottom > 6.0: # 大左转
print("393L Grey_angle_bottom > 6 大左转 turn001L ", Grey_angle_bottom)
action_append("turn001L")
elif Grey_angle_bottom > 6.0: # 大左转
print("393L Grey_angle_bottom > 4 大左转 turn001L ", Grey_angle_bottom)
action_append("turn001L")
else:
print("2709L Grey_angle_bottom > 2 左转 turn000L ", Grey_angle_bottom)
action_append("turn000L")
# time.sleep(1) # timefftest
# x 225 # 240
elif Grey_bottom_center_x > 235: # 小右移 249.6
if Grey_bottom_center_x > 245: # 大右移
print("389L Grey_bottom_center_x > 0.54 大右移 Right02move ", Grey_bottom_center_x)
action_append("Right02move")
else:
print("392L Grey_bottom_center_x > 0.54 小右移 Right1move ", Grey_bottom_center_x)
action_append("Right1move")
elif Grey_bottom_center_x < 215: # 小左移 230.4
if Grey_bottom_center_x < 205: # 大左移
print("2722L Grey_bottom_center_x < 0.48 大左移 Left02move ", Grey_bottom_center_x)
action_append("Left02move")
else:
print("399L Grey_bottom_center_x < 0.48 小左移 Left1move ", Grey_bottom_center_x)
action_append("Left1move")
# y
elif Grey_bottom_center_y > 480:
print("step 33333")
step = 3
elif Grey_bottom_center_y <= 430:
print("383L 继续接近黑线,请稍微往前挪动 forwardSlow0403") # <480 Forwalk01
action_append("forwardSlow0403")
elif Grey_bottom_center_y <= 450:
print("383L 继续接近黑线,请稍微往前挪动 Forwalk01") # Forwalk01
action_append("Forwalk01")
else:
print("386L 继续接近黑线,请稍微往前挪动 Forwalk00")
action_append("Forwalk00")
elif step == 3: # 依据黑线调整完角度和位置后继续前进
if Grey_bottom_center_y < 500: # 小步前挪
print("2627L Grey_bottom_center_y < 500 小步前挪 Forwalk01")
action_append("Forwalk01")
elif Grey_bottom_center_y < 560: # 小步前挪
print("2630L Grey_bottom_center_y < 560 小步前挪 Forwalk00")
action_append("Forwalk00")
elif Grey_bottom_center_y >= 580:
print("417L Grey_bottom_center_y > 580 后退一点点 Back0Run")
action_append("Back0Run")
elif 500 <= Grey_bottom_center_y <= 580:
print(" 趴地过关 ")
action_append("Stand")
action_append("lieForward")
print("307 趴地过关 ")
action_append("Stand")
action_append("forwardSlow0403")
action_append("forwardSlow0403")
action_append("forwardSlow0403")
action_append("Stand")
cv2.destroyAllWindows()
break
else:
while state == 8: # 初始化
# 开始处理图像
if True: # head发现黄色区域
t1 = cv2.getTickCount()
HeadOrg_copy = HeadOrg_img.copy()
ChestOrg_copy = np.rot90(ChestOrg_img)
ChestOrg_copy = ChestOrg_copy.copy()
cv2.rectangle(ChestOrg_copy, (0, 580), (480, 640), (255, 255, 255), -1) # 底部涂白,遮盖双脚
handling = cv2.resize(ChestOrg_copy, (r_w, r_h), interpolation=cv2.INTER_CUBIC) # 将图片缩放
frame_gauss = cv2.GaussianBlur(handling, (3, 3), 0) # 高斯模糊
frame_hsv = cv2.cvtColor(frame_gauss, cv2.COLOR_BGR2HSV) # 将图片转换到HSV空间
# Yellow Yellow Yellow_
frame = cv2.inRange(frame_hsv, color_range['blue'][0],
color_range['blue'][1]) # 对原图像和掩模(颜色的字典)进行位运算
opened = cv2.morphologyEx(frame, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8)) # 开运算 去噪点
closed = cv2.morphologyEx(opened, cv2.MORPH_CLOSE, np.ones((9, 9), np.uint8)) # 闭运算 封闭连接
(_, contours, hierarchy) = cv2.findContours(closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # 找出轮廓
cv2.drawContours(handling, contours, 0, (0, 255, 0), 3) # 画出轮廓fftest
area_sum = getAreaSumContour(contours)
percent = round(100 * area_sum / (r_w * r_h), 2)
areaMaxContour, area_max = getAreaMaxContour1(contours) # 找出最大轮廓
if areaMaxContour is not None:
Yellow_bottom_left = areaMaxContour[0][0] # 初始化
Yellow_bottom_right = areaMaxContour[0][0] # 初始化
Yellow_top_right = areaMaxContour[0][0] # 右上角点坐标
Yellow_top_left = areaMaxContour[0][0] # 左上角点坐标
for c in areaMaxContour: # 遍历找到四个顶
# 底边两顶点
if c[0][0] + 1 * (r_h - c[0][1]) < Yellow_bottom_left[0] + 1. * (r_h - Yellow_bottom_left[1]):
Yellow_bottom_left = c[0]
if c[0][0] + 1 * c[0][1] > Yellow_bottom_right[0] + 1 * Yellow_bottom_right[1]:
Yellow_bottom_right = c[0]
# 上边两定点
if c[0][0] + 1.5 * c[0][1] < Yellow_top_left[0] + 1.5 * Yellow_top_left[1]:
Yellow_top_left = c[0]
if (r_w - c[0][0]) + 1.5 * c[0][1] < (r_w - Yellow_top_right[0]) + 1.5 * Yellow_top_right[1]:
Yellow_top_right = c[0]
Yellow_angle_bottom = - math.atan((Yellow_bottom_right[1] - Yellow_bottom_left[1]) / (
Yellow_bottom_right[0] - Yellow_bottom_left[0])) * 180.0 / math.pi
Yellow_angle_top = - math.atan((Yellow_top_right[1] - Yellow_top_left[1]) / (
Yellow_top_right[0] - Yellow_top_left[0])) * 180.0 / math.pi
Yellow_bottom_center_x = int((Yellow_bottom_right[0] + Yellow_bottom_left[0]) / 2)
Yellow_bottom_center_y = int((Yellow_bottom_right[1] + Yellow_bottom_left[1]) / 2)
# Yellow_top_center_x = int((Yellow_top_right[0] + Yellow_top_left[0]) / 2)
# Yellow_top_center_y = int((Yellow_top_right[1] + Yellow_top_left[1]) / 2)
if img_debug:
cv2.circle(handling, (Yellow_bottom_right[0], Yellow_bottom_right[1]), 5, [0, 255, 255], 2)
cv2.circle(handling, (Yellow_bottom_left[0], Yellow_bottom_left[1]), 5, [0, 255, 255], 2)
cv2.circle(handling, (Yellow_bottom_center_x, Yellow_bottom_center_y), 5, [0, 0, 255], 2)
# cv2.circle(handling, (Yellow_top_right[0], Yellow_top_right[1]), 5, [255, 0, 255], 2)
# cv2.circle(handling, (Yellow_top_left[0], Yellow_top_left[1]), 5, [255, 0, 255], 2)
# cv2.circle(handling, (Yellow_top_center_x, Yellow_top_center_y), 5, [255, 0, 0], 2)
else:
Yellow_angle_bottom = 0
# Yellow_angle_top = 0
Yellow_bottom_center_x = 0.5 * r_w
Yellow_bottom_center_y = 0
# Grey Grey Grey Grey Grey_
Grey_angle_bottom = 0
Grey_bottom_center_x = 0
Grey_bottom_center_y = 0
frame = cv2.inRange(frame_hsv, color_range['grey_ground'][0],
color_range['grey_ground'][1]) # 对原图像和掩模(颜色的字典)进行位运算
opened = cv2.morphologyEx(frame, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8)) # 开运算 去噪点
closed = cv2.morphologyEx(frame, cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8)) # 闭运算 封闭连接
(_, contours, hierarchy) = cv2.findContours(closed, cv2.RETR_LIST,
cv2.CHAIN_APPROX_NONE) # 找出轮廓cv2.CHAIN_APPROX_NONE
# areaMaxContour, area_max = getAreaMaxContour1(contours) # 找出最大轮廓
areaMaxContour = getSumContour(contours, 100) # 得到总轮廓
if areaMaxContour is not None:
Grey_bottom_left = areaMaxContour[0][0] # 初始化
Grey_bottom_right = areaMaxContour[0][0] # 初始化
for c in areaMaxContour: # 遍历找到四个顶
# 底边两顶点
if c[0][0] + 1 * (r_h - c[0][1]) < Grey_bottom_left[0] + 1. * (r_h - Grey_bottom_left[1]):
Grey_bottom_left = c[0]
if c[0][0] + 1 * c[0][1] > Grey_bottom_right[0] + 1 * Grey_bottom_right[1]:
Grey_bottom_right = c[0]
Grey_angle_bottom = - math.atan((Grey_bottom_right[1] - Grey_bottom_left[1]) / (
Grey_bottom_right[0] - Grey_bottom_left[0])) * 180.0 / math.pi
Grey_bottom_center_x = (Grey_bottom_right[0] + Grey_bottom_left[0]) / 2
Grey_bottom_center_y = (Grey_bottom_right[1] + Grey_bottom_left[1]) / 2
rect = cv2.minAreaRect(areaMaxContour)
# center, w_h, angle = rect # 中心点 宽高 旋转角度
box = np.int0(cv2.boxPoints(rect)) # 点的坐标
if img_debug:
cv2.circle(handling, (Grey_bottom_right[0], Grey_bottom_right[1]), 5, [0, 255, 255], 2)
cv2.circle(handling, (Grey_bottom_left[0], Grey_bottom_left[1]), 5, [0, 255, 255], 2)
cv2.circle(handling, (int(Grey_bottom_center_x), int(Grey_bottom_center_y)), 5, [255, 0, 0], 2)
cv2.drawContours(handling, [box], 0, (0, 0, 255), 2) # 将大矩形画在图上
else: # 没有识别到黑方洞
angle = 0
Grey_center_x = 0.5 * r_w
Grey_center_y = 0
if img_debug:
t2 = cv2.getTickCount()
time_r = (t2 - t1) / cv2.getTickFrequency()
fps = 1.0 / time_r
cv2.putText(handling, "step:" + str(step), (30, 440), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0),
2) # (0, 0, 255)BGR
cv2.putText(handling, "fps:" + str(int(fps)), (30, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0),
2) # (0, 0, 255)BGR
cv2.putText(handling, 'area: ' + str(percent) + '%', (30, 420), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
(0, 0, 0), 2)
cv2.putText(handling,
'Yellow_center(x,y): ' + str(Yellow_bottom_center_x) + ', ' + str(Yellow_bottom_center_y),
(30, 400), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
cv2.putText(handling,
'Grey_center(x,y): ' + str(Grey_bottom_center_x) + ', ' + str(Grey_bottom_center_y),
(30, 380), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
cv2.putText(handling, 'Yellow_angle_bottom: ' + str(Yellow_angle_bottom), (30, 350),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
cv2.putText(handling, 'Grey_angle_bottom: ' + str(Grey_angle_bottom), (30, 330),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
cv2.drawContours(handling, contours, -1, (255, 0, 255), 3)
cv2.imshow('handling', handling) # 显示图像
# cv2.imshow('Yellow_mask', closed) # 显示图像
# cv2.imshow('Grey_mask', closed) # 显示图像
# cv2.imshow('Head_Camera', HeadOrg_copy) # 显示图像
k = cv2.waitKey(10)
if k == 27:
cv2.destroyWindow('closed_pic')
cv2.destroyWindow('org_img_copy')
break
elif k == ord('s'):
print("save picture123")
cv2.imwrite("picture123.jpg", HeadOrg_copy) # 保存图片
if step == 0: # 大步2前进 黄色下底边调整角度 调整好角度后继续大步前进
if Grey_bottom_center_y > 340: # 黑色方框
print("2621L step 111")
step = 1
if Yellow_bottom_center_y < 320:
print("2621L head_angle_dis ", Yellow_bottom_center_y)
# headfftest
print("226L 快走前进 Forwalk05", Yellow_bottom_center_y)
action_append("Forwalk05")
# print("313L 前进33333")
# action_append( "forwardSlow0403")
# action_append( "forwardSlow0403")
# action_append( "forwardSlow0403")
elif Yellow_bottom_center_y < 440:
print("2630L head_angle_dis ", Yellow_bottom_center_y)
# headfftest
print("2585L 快走前进 fastForward03 ", Yellow_bottom_center_y)
action_append("Forwalk03")
else: # Yellow_bottom_center_y >= 440
if Yellow_angle_bottom < -4:
if Yellow_angle_bottom < -10:
print(Yellow_angle_bottom, " Yellow_angle_bottom < -10 向右转动 turn001R")
action_append("turn001R")
else:
print(Yellow_angle_bottom, " Yellow_angle_bottom < -2 向右转动")
action_append("turn001R")
elif Yellow_angle_bottom > 4:
if Yellow_angle_bottom > 10:
print(Yellow_angle_bottom, " Yellow_angle_bottom > 10 向左转动 turn001L")
action_append("turn001L")
else:
print(Yellow_angle_bottom, " Yellow_angle_bottom > 2 向左转动")
action_append("turn001L")
elif -4 <= Yellow_angle_bottom <= 4:
# 调整居中角度
# if Yellow_bottom_center_x < 200:
# print("355L 向左移动 Left02move")
# action_append( "Left02move")
# elif Yellow_bottom_center_x > 280:
# print("358L 向右移动 Right02move")
# action_append( "Right02move")
# el
if Yellow_bottom_center_y < 360:
print("349L Yellow_bottom_center_y < 66% 前进")
action_append("forwardSlow0403")
else:
print("2665L step goto 11111")
step = 1
elif step == 1: # 依据Yellow调整左右位置 到接触黑色
if Grey_bottom_center_y > 340: # 黑色方框
print("ste 222")
step = 2
elif Yellow_bottom_center_x < 200:
print("369L 向左移动 Left02move")
action_append("Left02move")
elif Yellow_bottom_center_x > 280:
print("372L 向右移动 Right02move")
action_append("Right02move")
else:
print("363L 继续前进")
action_append("forwardSlow0403")
elif step == 2: # 看黑色下顶点,调整方向和位置
# angle
if Grey_angle_bottom < -2.0: # 右转
if Grey_angle_bottom < -6.0: # 大右转
print("386L Grey_angle_bottom < -6 右转 turn001R ", Grey_angle_bottom)
action_append("turn001R")
elif Grey_angle_bottom < -6.0: # 大右转
print("386L Grey_angle_bottom < -4 右转 turn001R ", Grey_angle_bottom)
action_append("turn001R")
else:
print("389L Grey_angle_bottom < -2 右转 turn000R ", Grey_angle_bottom)
action_append("turn000R")
# time.sleep(1) # timefftest
elif Grey_angle_bottom > 2.0: # 左转
if Grey_angle_bottom > 6.0: # 大左转
print("393L Grey_angle_bottom > 6 大左转 turn001L ", Grey_angle_bottom)
action_append("turn001L")
elif Grey_angle_bottom > 6.0: # 大左转
print("393L Grey_angle_bottom > 4 大左转 turn001L ", Grey_angle_bottom)
action_append("turn001L")
else:
print("2709L Grey_angle_bottom > 2 左转 turn000L ", Grey_angle_bottom)
action_append("turn000L")
# time.sleep(1) # timefftest
# x 225 # 240
elif Grey_bottom_center_x > 235: # 小右移 249.6
if Grey_bottom_center_x > 245: # 大右移
print("389L Grey_bottom_center_x > 0.54 大右移 Right02move ", Grey_bottom_center_x)
action_append("Right02move")
else:
print("392L Grey_bottom_center_x > 0.54 小右移 Right1move ", Grey_bottom_center_x)
action_append("Right1move")
elif Grey_bottom_center_x < 215: # 小左移 230.4
if Grey_bottom_center_x < 205: # 大左移
print("2722L Grey_bottom_center_x < 0.48 大左移 Left02move ", Grey_bottom_center_x)
action_append("Left02move")
else:
print("399L Grey_bottom_center_x < 0.48 小左移 Left1move ", Grey_bottom_center_x)
action_append("Left1move")
# y
elif Grey_bottom_center_y > 480:
print("step 33333")
step = 3
elif Grey_bottom_center_y <= 430:
print("383L 继续接近黑线,请稍微往前挪动 forwardSlow0403") # <480 Forwalk01
action_append("forwardSlow0403")
elif Grey_bottom_center_y <= 450:
print("383L 继续接近黑线,请稍微往前挪动 Forwalk01") # Forwalk01
action_append("Forwalk01")
else:
print("386L 继续接近黑线,请稍微往前挪动 Forwalk00")
action_append("Forwalk00")
elif step == 3: # 依据黑线调整完角度和位置后继续前进
if Grey_bottom_center_y < 500: # 小步前挪
print("2627L Grey_bottom_center_y < 500 小步前挪 Forwalk01")
action_append("Forwalk01")
elif Grey_bottom_center_y < 560: # 小步前挪
print("2630L Grey_bottom_center_y < 560 小步前挪 Forwalk00")
action_append("Forwalk00")
elif Grey_bottom_center_y >= 580:
print("417L Grey_bottom_center_y > 580 后退一点点 Back0Run")
action_append("Back0Run")
elif 500 <= Grey_bottom_center_y <= 580:
print(" 趴地过关 ")
action_append("Stand")
action_append("lieForward")
print("307 趴地过关 ")
action_append("Stand")
action_append("forwardSlow0403")
action_append("forwardSlow0403")
action_append("forwardSlow0403")
action_append("Stand")
cv2.destroyAllWindows()
break
################################################第一关:起点#############################################
def start_door():
global HeadOrg_img, state, state_sel, step, reset, skip, img_debug
state_sel = 'hole'
state = 1
if state == 1: # 初始化
print("/-/-/-/-/-/-/-/-/-进入start_door")
step = 0
else:
return
while state == 1:
if step == 0: # 判断门是否抬起
t1 = cv2.getTickCount() # 时间计算
org_img_copy = ChestOrg_img.copy()
org_img_copy = np.rot90(org_img_copy)
handling = org_img_copy.copy()
border = cv2.copyMakeBorder(handling, 12, 12, 16, 16, borderType=cv2.BORDER_CONSTANT,
value=(255, 255, 255)) # 扩展白边,防止边界无法识别
handling = cv2.resize(border, (chest_r_width, chest_r_height), interpolation=cv2.INTER_CUBIC) # 将图片缩放
frame_gauss = cv2.GaussianBlur(handling, (21, 21), 0) # 高斯模糊
frame_hsv = cv2.cvtColor(frame_gauss, cv2.COLOR_BGR2HSV) # 将图片转换到HSV空间
frame_door_yellow = cv2.inRange(frame_hsv, color_range['yellow_door'][0],
color_range['yellow_door'][1]) # 对原图像和掩模(颜色的字典)进行位运算
frame_door_black = cv2.inRange(frame_hsv, color_range['black_door'][0],
color_range['black_door'][1]) # 对原图像和掩模(颜色的字典)进行位运算
frame_door = cv2.add(frame_door_yellow, frame_door_black)
open_pic = cv2.morphologyEx(frame_door, cv2.MORPH_OPEN, np.ones((5, 5), np.uint8)) # 开运算 去噪点
closed_pic = cv2.morphologyEx(open_pic, cv2.MORPH_CLOSE, np.ones((50, 50), np.uint8)) # 闭运算 封闭连接
# print(closed_pic)
(image, contours, hierarchy) = cv2.findContours(closed_pic, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE) # 找出轮廓
areaMaxContour, area_max = getAreaMaxContour1(contours) # 找出最大轮廓
percent = round(100 * area_max / (chest_r_width * chest_r_height), 2) # 最大轮廓的百分比
if areaMaxContour is not None:
rect = cv2.minAreaRect(areaMaxContour) # 矩形框选
box = np.int0(cv2.boxPoints(rect)) # 点的坐标
if img_debug:
cv2.drawContours(handling, [box], 0, (153, 200, 0), 2) # 将最小外接矩形画在图上
if img_debug:
cv2.putText(handling, 'area: ' + str(percent) + '%', (30, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
(0, 0, 255), 2)
t2 = cv2.getTickCount()
time_r = (t2 - t1) / cv2.getTickFrequency()
fps = 1.0 / time_r
cv2.putText(handling, "fps:" + str(int(fps)), (30, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.imshow('handling', handling) # 显示图像
cv2.imshow('frame_door_yellow', frame_door_yellow) # 显示图像
cv2.imshow('frame_door_black', frame_door_black) # 显示图像
cv2.imshow('closed_pic', closed_pic) # 显示图像
k = cv2.waitKey(10)
if k == 27:
cv2.destroyWindow('closed_pic')
cv2.destroyWindow('handling')
break
elif k == ord('s'):
print("save picture123")
cv2.imwrite("picture123.jpg", org_img_copy) # 保存图片
# 根据比例得到是否前进的信息
if percent > 1: # 检测到横杆
print(percent, "%")
print("有障碍 等待 contours len:", len(contours))
time.sleep(0.1)
else:
print(percent)
# print("231L 执行3步")
# action_append("forwardSlow0403")
# action_append("forwardSlow0403")
# action_append("forwardSlow0403")
print("231L 执行快走555")
action_append("fastForward04")
step = 1
elif step == 1: # 寻找下一关卡
print("判断下一关是什么~~~~~~~~~~~~~~~~")
if state_sel is not None:
print('state_sel: %s' % state_sel)
print("开启下一关")
# action_append("forwardSlow0403")
# action_append("Stand")
step = 0
cv2.destroyAllWindows()
break
else:
print("直行0步")
# C("forwardSlow0403")
if __name__ == '__main__':
while len(CMDcontrol.action_list) > 0:
print("等待启动")
time.sleep(1)
action_append("HeadTurnMM")
num = 0
while 1:
if ChestOrg_img is not None and chest_ret:
k = cv2.waitKey(10)
if k == 27:
cv2.destroyWindow('camera_test')
break
print("start door START")
start_door()
action_append("turn010R")
action_append("Forwalk02")
square_hole()
obstacle()
baffle()
action_append("turn010L")
action_append("turn010L")
action_append("Left02move")
action_append("Left02move")
action_append("Forwalk05")
action_append("Forwalk05")
Greenbridge()
# --------------------------------4------------------
kick_ball()
print("3123L 快走前进 fastForward 3 3 3")
action_append("Forwalk05")
floor() # 台阶fm4
action_append("Forwalk05")
print("end door end door ")
start_door()
while (1):
print("结束")
time.sleep(10000)
else:
print('image is empty chest_ret:', chest_ret)
time.sleep(1)
cv2.destroyAllWindows()
|
whatsapp.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys;
sys.dont_write_bytecode = True;
import binascii;
from whatsapp_defines import WAMetrics;
from whatsapp_binary_writer import whatsappWriteBinary;
import os;
import signal;
import base64;
from threading import Thread, Timer
import math;
import time;
import datetime;
import json;
import io;
from time import sleep;
from threading import Thread;
from Crypto.Cipher import AES;
from Crypto.Hash import SHA256;
from Crypto import Random;
import hashlib;
import hmac;
import traceback;
import websocket;
import curve25519;
import pyqrcode;
from utilities import *;
from whatsapp_binary_reader import whatsappReadBinary;
reload(sys);
sys.setdefaultencoding("utf-8");
def HmacSha256(key, sign):
return hmac.new(key, sign, hashlib.sha256).digest();
def HKDF(key, length, appInfo=""): # implements RFC 5869, some parts from https://github.com/MirkoDziadzka/pyhkdf
key = HmacSha256("\0"*32, key);
keyStream = "";
keyBlock = "";
blockIndex = 1;
while len(keyStream) < length:
keyBlock = hmac.new(key, msg=keyBlock+appInfo+chr(blockIndex), digestmod=hashlib.sha256).digest();
blockIndex += 1;
keyStream += keyBlock;
return keyStream[:length];
def AESPad(s):
bs = AES.block_size;
return s + (bs - len(s) % bs) * chr(bs - len(s) % bs);
def to_bytes(n, length, endianess='big'):
h = '%x' % n
s = ('0'*(len(h) % 2) + h).zfill(length*2).decode('hex')
return s if endianess == 'big' else s[::-1]
def AESUnpad(s):
return s[:-ord(s[len(s)-1:])];
def AESEncrypt(key, plaintext): # like "AESPad"/"AESUnpad" from https://stackoverflow.com/a/21928790
plaintext = AESPad(plaintext);
iv = os.urandom(AES.block_size);
cipher = AES.new(key, AES.MODE_CBC, iv);
return iv + cipher.encrypt(plaintext);
def WhatsAppEncrypt(encKey, macKey, plaintext):
enc = AESEncrypt(encKey, plaintext)
return HmacSha256(macKey, enc) + enc; # this may need padding to 64 byte boundary
def AESDecrypt(key, ciphertext): # from https://stackoverflow.com/a/20868265
iv = ciphertext[:AES.block_size];
cipher = AES.new(key, AES.MODE_CBC, iv);
plaintext = cipher.decrypt(ciphertext[AES.block_size:]);
return AESUnpad(plaintext);
class WhatsAppWebClient:
websocketIsOpened = False;
onOpenCallback = None;
onMessageCallback = None;
onCloseCallback = None;
activeWs = None;
messageSentCount = 0;
websocketThread = None;
messageQueue = {}; # maps message tags (provided by WhatsApp) to more information (description and callback)
loginInfo = {
"clientId": None,
"serverRef": None,
"privateKey": None,
"publicKey": None,
"key": {
"encKey": None,
"macKey": None
}
};
connInfo = {
"clientToken": None,
"serverToken": None,
"browserToken": None,
"secret": None,
"sharedSecret": None,
"me": None
};
def __init__(self, onOpenCallback, onMessageCallback, onCloseCallback):
self.onOpenCallback = onOpenCallback;
self.onMessageCallback = onMessageCallback;
self.onCloseCallback = onCloseCallback;
websocket.enableTrace(True);
self.connect();
def onOpen(self, ws):
try:
self.websocketIsOpened = True;
if self.onOpenCallback is not None and "func" in self.onOpenCallback:
self.onOpenCallback["func"](self.onOpenCallback);
eprint("WhatsApp backend Websocket opened.");
except:
eprint(traceback.format_exc());
def onError(self, ws, error):
eprint(error);
def onClose(self, ws):
self.websocketIsOpened = False;
if self.onCloseCallback is not None and "func" in self.onCloseCallback:
self.onCloseCallback["func"](self.onCloseCallback);
eprint("WhatsApp backend Websocket closed.");
def keepAlive(self):
if self.activeWs is not None:
self.activeWs.send("?,,")
Timer(20.0, self.keepAlive).start()
def onMessage(self, ws, message):
try:
messageSplit = message.split(",", 1);
messageTag = messageSplit[0];
if len(messageSplit) == 2: messageContent = messageSplit[1]
else: messageContent = ''
if messageTag in self.messageQueue: # when the server responds to a client's message
pend = self.messageQueue[messageTag];
if pend["desc"] == "_status":
if messageContent[0] == 'Pong' and messageContent[1] == True:
pend["callback"]({"Connected": True,"user":self.connInfo["me"],"pushname":self.connInfo["pushname"]})
elif pend["desc"] == "_restoresession":
eprint("") # TODO implement Challenge Solving
elif pend["desc"] == "_login":
eprint("Message after login: ", message);
self.loginInfo["serverRef"] = json.loads(messageContent)["ref"];
eprint("set server id: " + self.loginInfo["serverRef"]);
self.loginInfo["privateKey"] = curve25519.Private();
self.loginInfo["publicKey"] = self.loginInfo["privateKey"].get_public();
qrCodeContents = self.loginInfo["serverRef"] + "," + base64.b64encode(self.loginInfo["publicKey"].serialize()) + "," + self.loginInfo["clientId"];
eprint("qr code contents: " + qrCodeContents);
svgBuffer = io.BytesIO(); # from https://github.com/mnooner256/pyqrcode/issues/39#issuecomment-207621532
pyqrcode.create(qrCodeContents, error='L').svg(svgBuffer, scale=6, background="rgba(0,0,0,0.0)", module_color="#122E31", quiet_zone=0);
if "callback" in pend and pend["callback"] is not None and "func" in pend["callback"] and pend["callback"]["func"] is not None and "tag" in pend["callback"] and pend["callback"]["tag"] is not None:
pend["callback"]["func"]({ "type": "generated_qr_code", "image": "data:image/svg+xml;base64," + base64.b64encode(svgBuffer.getvalue()), "content": qrCodeContents }, pend["callback"]);
else:
try:
jsonObj = json.loads(messageContent); # try reading as json
except ValueError, e:
if messageContent != "":
hmacValidation = HmacSha256(self.loginInfo["key"]["macKey"], messageContent[32:]);
if hmacValidation != messageContent[:32]:
raise ValueError("Hmac mismatch");
decryptedMessage = AESDecrypt(self.loginInfo["key"]["encKey"], messageContent[32:]);
try:
processedData = whatsappReadBinary(decryptedMessage, True);
messageType = "binary";
except:
processedData = { "traceback": traceback.format_exc().splitlines() };
messageType = "error";
finally:
self.onMessageCallback["func"](processedData, self.onMessageCallback, { "message_type": messageType });
else:
self.onMessageCallback["func"](jsonObj, self.onMessageCallback, { "message_type": "json" });
if isinstance(jsonObj, list) and len(jsonObj) > 0: # check if the result is an array
eprint(json.dumps(jsonObj));
if jsonObj[0] == "Conn":
Timer(20.0, self.keepAlive).start()
self.connInfo["clientToken"] = jsonObj[1]["clientToken"];
self.connInfo["serverToken"] = jsonObj[1]["serverToken"];
self.connInfo["browserToken"] = jsonObj[1]["browserToken"];
self.connInfo["me"] = jsonObj[1]["wid"];
self.connInfo["secret"] = base64.b64decode(jsonObj[1]["secret"]);
self.connInfo["sharedSecret"] = self.loginInfo["privateKey"].get_shared_key(curve25519.Public(self.connInfo["secret"][:32]), lambda a: a);
sse = self.connInfo["sharedSecretExpanded"] = HKDF(self.connInfo["sharedSecret"], 80);
hmacValidation = HmacSha256(sse[32:64], self.connInfo["secret"][:32] + self.connInfo["secret"][64:]);
if hmacValidation != self.connInfo["secret"][32:64]:
raise ValueError("Hmac mismatch");
keysEncrypted = sse[64:] + self.connInfo["secret"][64:];
keysDecrypted = AESDecrypt(sse[:32], keysEncrypted);
self.loginInfo["key"]["encKey"] = keysDecrypted[:32];
self.loginInfo["key"]["macKey"] = keysDecrypted[32:64];
# eprint("private key : ", base64.b64encode(self.loginInfo["privateKey"].serialize()));
# eprint("secret : ", base64.b64encode(self.connInfo["secret"]));
# eprint("shared secret : ", base64.b64encode(self.connInfo["sharedSecret"]));
# eprint("shared secret expanded : ", base64.b64encode(self.connInfo["sharedSecretExpanded"]));
# eprint("hmac validation : ", base64.b64encode(hmacValidation));
# eprint("keys encrypted : ", base64.b64encode(keysEncrypted));
# eprint("keys decrypted : ", base64.b64encode(keysDecrypted));
eprint("set connection info: client, server and browser token; secret, shared secret, enc key, mac key");
eprint("logged in as " + jsonObj[1]["pushname"] + " (" + jsonObj[1]["wid"] + ")");
elif jsonObj[0] == "Stream":
pass;
elif jsonObj[0] == "Props":
pass;
except:
eprint(traceback.format_exc());
def connect(self):
self.activeWs = websocket.WebSocketApp("wss://web.whatsapp.com/ws",
on_message = lambda ws, message: self.onMessage(ws, message),
on_error = lambda ws, error: self.onError(ws, error),
on_open = lambda ws: self.onOpen(ws),
on_close = lambda ws: self.onClose(ws),
header = { "Origin: https://web.whatsapp.com" });
self.websocketThread = Thread(target = self.activeWs.run_forever);
self.websocketThread.daemon = True;
self.websocketThread.start();
def generateQRCode(self, callback=None):
self.loginInfo["clientId"] = base64.b64encode(os.urandom(16));
messageTag = str(getTimestamp());
self.messageQueue[messageTag] = { "desc": "_login", "callback": callback };
message = messageTag + ',["admin","init",[0,3,2390],["Chromium at ' + datetime.datetime.now().isoformat() + '","Chromium"],"' + self.loginInfo["clientId"] + '",true]';
self.activeWs.send(message);
def restoreSession(self, callback=None):
messageTag = str(getTimestamp())
message = messageTag + ',["admin","init",[0,3,2390],["Chromium at ' + datetime.now().isoformat() + '","Chromium"],"' + self.loginInfo["clientId"] + '",true]'
self.activeWs.send(message)
messageTag = str(getTimestamp())
self.messageQueue[messageTag] = {"desc": "_restoresession"}
message = messageTag + ',["admin","login","' + self.connInfo["clientToken"] + '", "' + self.connInfo[
"serverToken"] + '", "' + self.loginInfo["clientId"] + '", "takeover"]'
self.activeWs.send(message)
def getLoginInfo(self, callback):
callback["func"]({ "type": "login_info", "data": self.loginInfo }, callback);
def getConnectionInfo(self, callback):
callback["func"]({ "type": "connection_info", "data": self.connInfo }, callback);
def getChatHistory(self):
messageId = "3EB0"+binascii.hexlify(Random.get_random_bytes(8)).upper()
msgData = ["query",{ "type": "message", "kind": "before", "jid": "5511972578075@s.whatsapp.net", "count": "1000", "owner": "false", "epoch": "4" }, None];
encryptedMessage = WhatsAppEncrypt(self.loginInfo["key"]["encKey"], self.loginInfo["key"]["macKey"],whatsappWriteBinary(msgData))
payload = bytearray(messageId) + bytearray(",") + bytearray(to_bytes(WAMetrics.QUERY_MESSAGES, 1)) + bytearray([0x80]) + encryptedMessage
self.activeWs.send(payload, websocket.ABNF.OPCODE_BINARY)
def sendTextMessage(self, number, text):
messageId = "3EB0"+binascii.hexlify(Random.get_random_bytes(8)).upper()
messageTag = str(getTimestamp())
messageParams = {"key": {"fromMe": True, "remoteJid": number + "@s.whatsapp.net", "id": messageId},"messageTimestamp": getTimestamp(), "status": 1, "message": {"conversation": text}}
msgData = ["action", {"type": "relay", "epoch": str(self.messageSentCount)},[["message", None, WAWebMessageInfo.encode(messageParams)]]]
encryptedMessage = WhatsAppEncrypt(self.loginInfo["key"]["encKey"], self.loginInfo["key"]["macKey"],whatsappWriteBinary(msgData))
payload = bytearray(messageId) + bytearray(",") + bytearray(to_bytes(WAMetrics.MESSAGE, 1)) + bytearray([0x80]) + encryptedMessage
self.messageSentCount = self.messageSentCount + 1
self.messageQueue[messageId] = {"desc": "__sending"}
self.activeWs.send(payload, websocket.ABNF.OPCODE_BINARY)
def status(self, callback=None):
if self.activeWs is not None:
messageTag = str(getTimestamp())
self.messageQueue[messageTag] = {"desc": "_status", "callback": callback}
message = messageTag + ',["admin", "test"]'
self.activeWs.send(message)
def disconnect(self):
self.activeWs.send('goodbye,,["admin","Conn","disconnect"]'); # WhatsApp server closes connection automatically when client wants to disconnect
#time.sleep(0.5);
#self.activeWs.close();
|
chrome_test_server_spawner.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A "Test Server Spawner" that handles killing/stopping per-test test servers.
It's used to accept requests from the device to spawn and kill instances of the
chrome test server on the host.
"""
# pylint: disable=W0702
import BaseHTTPServer
import json
import logging
import os
import select
import struct
import subprocess
import sys
import threading
import time
import urlparse
from devil.android import forwarder
from devil.android import ports
from pylib import constants
from pylib.constants import host_paths
# Path that are needed to import necessary modules when launching a testserver.
os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '') + (':%s:%s:%s:%s:%s'
% (os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party'),
os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party', 'tlslite'),
os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party', 'pyftpdlib',
'src'),
os.path.join(host_paths.DIR_SOURCE_ROOT, 'net', 'tools', 'testserver'),
os.path.join(host_paths.DIR_SOURCE_ROOT, 'sync', 'tools', 'testserver')))
SERVER_TYPES = {
'http': '',
'ftp': '-f',
'sync': '', # Sync uses its own script, and doesn't take a server type arg.
'tcpecho': '--tcp-echo',
'udpecho': '--udp-echo',
}
# The timeout (in seconds) of starting up the Python test server.
TEST_SERVER_STARTUP_TIMEOUT = 10
def _WaitUntil(predicate, max_attempts=5):
"""Blocks until the provided predicate (function) is true.
Returns:
Whether the provided predicate was satisfied once (before the timeout).
"""
sleep_time_sec = 0.025
for _ in xrange(1, max_attempts):
if predicate():
return True
time.sleep(sleep_time_sec)
sleep_time_sec = min(1, sleep_time_sec * 2) # Don't wait more than 1 sec.
return False
def _CheckPortAvailable(port):
"""Returns True if |port| is available."""
return _WaitUntil(lambda: ports.IsHostPortAvailable(port))
def _CheckPortNotAvailable(port):
"""Returns True if |port| is not available."""
return _WaitUntil(lambda: not ports.IsHostPortAvailable(port))
def _CheckDevicePortStatus(device, port):
"""Returns whether the provided port is used."""
return _WaitUntil(lambda: ports.IsDevicePortUsed(device, port))
def _GetServerTypeCommandLine(server_type):
"""Returns the command-line by the given server type.
Args:
server_type: the server type to be used (e.g. 'http').
Returns:
A string containing the command-line argument.
"""
if server_type not in SERVER_TYPES:
raise NotImplementedError('Unknown server type: %s' % server_type)
if server_type == 'udpecho':
raise Exception('Please do not run UDP echo tests because we do not have '
'a UDP forwarder tool.')
return SERVER_TYPES[server_type]
class TestServerThread(threading.Thread):
"""A thread to run the test server in a separate process."""
def __init__(self, ready_event, arguments, device, tool):
"""Initialize TestServerThread with the following argument.
Args:
ready_event: event which will be set when the test server is ready.
arguments: dictionary of arguments to run the test server.
device: An instance of DeviceUtils.
tool: instance of runtime error detection tool.
"""
threading.Thread.__init__(self)
self.wait_event = threading.Event()
self.stop_flag = False
self.ready_event = ready_event
self.ready_event.clear()
self.arguments = arguments
self.device = device
self.tool = tool
self.test_server_process = None
self.is_ready = False
self.host_port = self.arguments['port']
assert isinstance(self.host_port, int)
# The forwarder device port now is dynamically allocated.
self.forwarder_device_port = 0
# Anonymous pipe in order to get port info from test server.
self.pipe_in = None
self.pipe_out = None
self.process = None
self.command_line = []
def _WaitToStartAndGetPortFromTestServer(self):
"""Waits for the Python test server to start and gets the port it is using.
The port information is passed by the Python test server with a pipe given
by self.pipe_out. It is written as a result to |self.host_port|.
Returns:
Whether the port used by the test server was successfully fetched.
"""
assert self.host_port == 0 and self.pipe_out and self.pipe_in
(in_fds, _, _) = select.select([self.pipe_in, ], [], [],
TEST_SERVER_STARTUP_TIMEOUT)
if len(in_fds) == 0:
logging.error('Failed to wait to the Python test server to be started.')
return False
# First read the data length as an unsigned 4-byte value. This
# is _not_ using network byte ordering since the Python test server packs
# size as native byte order and all Chromium platforms so far are
# configured to use little-endian.
# TODO(jnd): Change the Python test server and local_test_server_*.cc to
# use a unified byte order (either big-endian or little-endian).
data_length = os.read(self.pipe_in, struct.calcsize('=L'))
if data_length:
(data_length,) = struct.unpack('=L', data_length)
assert data_length
if not data_length:
logging.error('Failed to get length of server data.')
return False
port_json = os.read(self.pipe_in, data_length)
if not port_json:
logging.error('Failed to get server data.')
return False
logging.info('Got port json data: %s', port_json)
port_json = json.loads(port_json)
if port_json.has_key('port') and isinstance(port_json['port'], int):
self.host_port = port_json['port']
return _CheckPortNotAvailable(self.host_port)
logging.error('Failed to get port information from the server data.')
return False
def _GenerateCommandLineArguments(self):
"""Generates the command line to run the test server.
Note that all options are processed by following the definitions in
testserver.py.
"""
if self.command_line:
return
args_copy = dict(self.arguments)
# Translate the server type.
type_cmd = _GetServerTypeCommandLine(args_copy.pop('server-type'))
if type_cmd:
self.command_line.append(type_cmd)
# Use a pipe to get the port given by the instance of Python test server
# if the test does not specify the port.
assert self.host_port == args_copy['port']
if self.host_port == 0:
(self.pipe_in, self.pipe_out) = os.pipe()
self.command_line.append('--startup-pipe=%d' % self.pipe_out)
# Pass the remaining arguments as-is.
for key, values in args_copy.iteritems():
if not isinstance(values, list):
values = [values]
for value in values:
if value is None:
self.command_line.append('--%s' % key)
else:
self.command_line.append('--%s=%s' % (key, value))
def _CloseUnnecessaryFDsForTestServerProcess(self):
# This is required to avoid subtle deadlocks that could be caused by the
# test server child process inheriting undesirable file descriptors such as
# file lock file descriptors.
for fd in xrange(0, 1024):
if fd != self.pipe_out:
try:
os.close(fd)
except:
pass
def run(self):
logging.info('Start running the thread!')
self.wait_event.clear()
self._GenerateCommandLineArguments()
command = host_paths.DIR_SOURCE_ROOT
if self.arguments['server-type'] == 'sync':
command = [os.path.join(command, 'sync', 'tools', 'testserver',
'sync_testserver.py')] + self.command_line
else:
command = [os.path.join(command, 'net', 'tools', 'testserver',
'testserver.py')] + self.command_line
logging.info('Running: %s', command)
# Disable PYTHONUNBUFFERED because it has a bad interaction with the
# testserver. Remove once this interaction is fixed.
unbuf = os.environ.pop('PYTHONUNBUFFERED', None)
# Pass DIR_SOURCE_ROOT as the child's working directory so that relative
# paths in the arguments are resolved correctly.
self.process = subprocess.Popen(
command, preexec_fn=self._CloseUnnecessaryFDsForTestServerProcess,
cwd=host_paths.DIR_SOURCE_ROOT)
if unbuf:
os.environ['PYTHONUNBUFFERED'] = unbuf
if self.process:
if self.pipe_out:
self.is_ready = self._WaitToStartAndGetPortFromTestServer()
else:
self.is_ready = _CheckPortNotAvailable(self.host_port)
if self.is_ready:
forwarder.Forwarder.Map([(0, self.host_port)], self.device, self.tool)
# Check whether the forwarder is ready on the device.
self.is_ready = False
device_port = forwarder.Forwarder.DevicePortForHostPort(self.host_port)
if device_port and _CheckDevicePortStatus(self.device, device_port):
self.is_ready = True
self.forwarder_device_port = device_port
# Wake up the request handler thread.
self.ready_event.set()
# Keep thread running until Stop() gets called.
_WaitUntil(lambda: self.stop_flag, max_attempts=sys.maxint)
if self.process.poll() is None:
self.process.kill()
forwarder.Forwarder.UnmapDevicePort(self.forwarder_device_port, self.device)
self.process = None
self.is_ready = False
if self.pipe_out:
os.close(self.pipe_in)
os.close(self.pipe_out)
self.pipe_in = None
self.pipe_out = None
logging.info('Test-server has died.')
self.wait_event.set()
def Stop(self):
"""Blocks until the loop has finished.
Note that this must be called in another thread.
"""
if not self.process:
return
self.stop_flag = True
self.wait_event.wait()
class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler used to process http GET/POST request."""
def _SendResponse(self, response_code, response_reason, additional_headers,
contents):
"""Generates a response sent to the client from the provided parameters.
Args:
response_code: number of the response status.
response_reason: string of reason description of the response.
additional_headers: dict of additional headers. Each key is the name of
the header, each value is the content of the header.
contents: string of the contents we want to send to client.
"""
self.send_response(response_code, response_reason)
self.send_header('Content-Type', 'text/html')
# Specify the content-length as without it the http(s) response will not
# be completed properly (and the browser keeps expecting data).
self.send_header('Content-Length', len(contents))
for header_name in additional_headers:
self.send_header(header_name, additional_headers[header_name])
self.end_headers()
self.wfile.write(contents)
self.wfile.flush()
def _StartTestServer(self):
"""Starts the test server thread."""
logging.info('Handling request to spawn a test server.')
content_type = self.headers.getheader('content-type')
if content_type != 'application/json':
raise Exception('Bad content-type for start request.')
content_length = self.headers.getheader('content-length')
if not content_length:
content_length = 0
try:
content_length = int(content_length)
except:
raise Exception('Bad content-length for start request.')
logging.info(content_length)
test_server_argument_json = self.rfile.read(content_length)
logging.info(test_server_argument_json)
assert not self.server.test_server_instance
ready_event = threading.Event()
self.server.test_server_instance = TestServerThread(
ready_event,
json.loads(test_server_argument_json),
self.server.device,
self.server.tool)
self.server.test_server_instance.setDaemon(True)
self.server.test_server_instance.start()
ready_event.wait()
if self.server.test_server_instance.is_ready:
self._SendResponse(200, 'OK', {}, json.dumps(
{'port': self.server.test_server_instance.forwarder_device_port,
'message': 'started'}))
logging.info('Test server is running on port: %d.',
self.server.test_server_instance.host_port)
else:
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during starting a test server.')
def _KillTestServer(self):
"""Stops the test server instance."""
# There should only ever be one test server at a time. This may do the
# wrong thing if we try and start multiple test servers.
if not self.server.test_server_instance:
return
port = self.server.test_server_instance.host_port
logging.info('Handling request to kill a test server on port: %d.', port)
self.server.test_server_instance.Stop()
# Make sure the status of test server is correct before sending response.
if _CheckPortAvailable(port):
self._SendResponse(200, 'OK', {}, 'killed')
logging.info('Test server on port %d is killed', port)
else:
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during killing a test server.')
self.server.test_server_instance = None
def do_POST(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
logging.info('Action for POST method is: %s.', action)
if action == '/start':
self._StartTestServer()
else:
self._SendResponse(400, 'Unknown request.', {}, '')
logging.info('Encounter unknown request: %s.', action)
def do_GET(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
params = urlparse.parse_qs(parsed_path.query, keep_blank_values=1)
logging.info('Action for GET method is: %s.', action)
for param in params:
logging.info('%s=%s', param, params[param][0])
if action == '/kill':
self._KillTestServer()
elif action == '/ping':
# The ping handler is used to check whether the spawner server is ready
# to serve the requests. We don't need to test the status of the test
# server when handling ping request.
self._SendResponse(200, 'OK', {}, 'ready')
logging.info('Handled ping request and sent response.')
else:
self._SendResponse(400, 'Unknown request', {}, '')
logging.info('Encounter unknown request: %s.', action)
class SpawningServer(object):
"""The class used to start/stop a http server."""
def __init__(self, test_server_spawner_port, device, tool):
logging.info('Creating new spawner on port: %d.', test_server_spawner_port)
self.server = BaseHTTPServer.HTTPServer(('', test_server_spawner_port),
SpawningServerRequestHandler)
self.server.device = device
self.server.tool = tool
self.server.test_server_instance = None
self.server.build_type = constants.GetBuildType()
def _Listen(self):
logging.info('Starting test server spawner')
self.server.serve_forever()
def Start(self):
"""Starts the test server spawner."""
listener_thread = threading.Thread(target=self._Listen)
listener_thread.setDaemon(True)
listener_thread.start()
def Stop(self):
"""Stops the test server spawner.
Also cleans the server state.
"""
self.CleanupState()
self.server.shutdown()
def CleanupState(self):
"""Cleans up the spawning server state.
This should be called if the test server spawner is reused,
to avoid sharing the test server instance.
"""
if self.server.test_server_instance:
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
|
xmlrpc_server_example.py | from __future__ import absolute_import, division, print_function
# This is an example of how a 3rd-party program with Python embedded, such
# as Coot or PyMOL, can be interfaced with CCTBX-based software. Something
# much like this is used for the Phenix GUI extensions to those programs.
# I haven't tried this with any other software, but anything with a reasonably
# recent version of Python and support for either persistent Python threads
# or some sort of timer callback should be able to use it.
DEFAULT_PORT = 40000
import os, sys, string, signal
import xmlrpclib
try :
from SimpleXMLRPCServer import SimpleXMLRPCServer
class external_xmlrpc_server(SimpleXMLRPCServer):
def __init__(self, addr, cctbx_interface):
self.cctbx_interface = cctbx_interface
SimpleXMLRPCServer.__init__(self, addr, logRequests=0)
def _dispatch(self, method, params):
if not self.cctbx_interface.enable_xmlrpc :
return -1
result = -1
func = getattr(self.cctbx_interface, method, None)
if not callable(func):
print("%s is not a callable object!" % method)
else :
result = func(*params)
if result is None :
result = -1
return result
class external_xmlrpc_interface(object):
def __init__(self, program_id, auto_start=True, verbose=False):
self.enable_xmlrpc = True
self.xmlrpc_server = None
self.cctbx_server = None
self.verbose = verbose
self.timeout = string.atoi(os.environ.get("CCTBX_XMLRPC_TIMEOUT", "250"))
self.program_id = program_id
self.supported_modules = []
self.setup_modules()
self.setup_server()
if auto_start :
self.start_server()
def setup_modules(self):
pass
def add_module(self, module_object=None, module_path=None):
if module_object is not None :
self.supported_modules.append(module_object)
elif module_path is not None :
module_object = __import__(module_path)
self.supported_modules.append(module_object)
def setup_server(self):
port = os.environ.get("CCTBX_%s_PORT" % self.program_id, DEFAULT_PORT)
if port is not None :
self.port = int(port)
self.xmlrpc_server = external_xmlrpc_server(("127.0.0.1", self.port),
self)
if self.verbose :
print("Listening on port %s" % port)
cctbx_port = os.environ.get("CCTBX_XMLRPC_PORT", None)
if cctbx_port is not None :
uri = "http://localhost:%s/RPC2" % cctbx_port
self.cctbx_server = xmlrpclib.ServerProxy(uri=uri)
if self.verbose :
print("Connecting to XML-RPC server on port %s" % cctbx_port)
def start_server(self):
if self.xmlrpc_server is not None :
print("XML-RPC server started on port %d" % self.port)
self.xmlrpc_server.serve_forever()
def start_server_in_separate_thread(self):
import threading
t = threading.Thread(target=self.start_server)
t.setDaemon(1)
t.start()
def set_socket_timeout(self, timeout):
if self.xmlrpc_server is not None :
self.xmlrpc_server.socket.settimeout(timeout)
def timeout_func(self, *args):
if self.xmlrpc_server is not None :
self.xmlrpc_server.handle_request()
return True
def is_alive(self):
return True
# XXX: this should be replaced by the proper quit function for the program
# being extended - e.g. cmd.quit() in PyMOL.
def quit(self):
print("quitting")
sys.stdout.flush()
os.kill(os.getpid(), signal.SIGKILL)
def __getattr__(self, name):
for module_object in self.supported_modules :
if hasattr(module_object, name):
return getattr(module_object, name)
return None
except KeyboardInterrupt :
raise
except ImportError :
def external_xmlrpc_server(*args, **kwds):
raise Exception("SimpleXMLRPCServer not available on this platform.")
def external_cctbx_interface(*args, **kwds):
raise Exception("SimpleXMLRPCServer not available on this platform.")
def test_server():
class test_module(object):
def echo_test(self):
print("hello, world!")
sys.stdout.flush()
return True
# os.environ["CCTBX_TEST_PORT"] = "48000"
test_server = external_xmlrpc_interface("TEST", auto_start=False,
verbose=False)
module_object = test_module()
test_server.add_module(module_object)
test_server.start_server()
def coot_server():
server = external_xmlrpc_interface("COOT",
auto_start=False,
verbose=True)
server.set_socket_timeout(0.01)
import coot
import gobject
server.add_module(coot)
gobject.timeout_add(200, server.timeout_func)
if __name__ == "__main__" :
#test_server()
coot_server()
#---end
|
tests.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import errno
import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
from datetime import datetime, timedelta
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation, SuspiciousOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile,
)
from django.db.models.fields.files import FileDescriptor
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, ignore_warnings,
override_settings,
)
from django.test.utils import requires_tz_support
from django.urls import NoReverseMatch, reverse_lazy
from django.utils import six, timezone
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.six.moves.urllib.request import urlopen
from .models import Storage, temp_storage, temp_storage_location
FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with six.assertRaisesRegex(self, ImportError, "No module named '?storage'?"):
get_storage_class('storage.NonExistingStorage')
def test_get_nonexisting_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
with self.assertRaises(ImportError):
get_storage_class('django.core.files.storage.NonExistingStorage')
def test_get_nonexisting_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
with six.assertRaisesRegex(self, ImportError, "No module named '?(django.core.files.)?non_existing_storage'?"):
get_storage_class('django.core.files.non_existing_storage.NonExistingStorage')
class FileSystemStorageTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, tuple())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
def test_lazy_base_url_init(self):
"""
FileSystemStorage.__init__() shouldn't evaluate base_url.
"""
storage = FileSystemStorage(base_url=reverse_lazy('app:url'))
with self.assertRaises(NoReverseMatch):
storage.url(storage.base_url)
class FileStorageTests(SimpleTestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir, base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, upath(os.getcwd()))
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def _test_file_time_getter(self, getter):
# Check for correct behavior under both USE_TZ=True and USE_TZ=False.
# The tests are similar since they both set up a situation where the
# system time zone, Django's TIME_ZONE, and UTC are distinct.
self._test_file_time_getter_tz_handling_on(getter)
self._test_file_time_getter_tz_handling_off(getter)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_on(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5. The following will be aware in UTC.
now = timezone.now()
self.assertFalse(self.storage.exists('test.file.tz.on'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.on', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be aware, in UTC
self.assertTrue(timezone.is_aware(dt))
self.assertEqual(now.tzname(), dt.tzname())
# Check that the three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and now should be the same effective time.
self.assertLess(abs(dt - now), timedelta(seconds=2))
@override_settings(USE_TZ=False, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_off(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5.
self.assertFalse(self.storage.exists('test.file.tz.off'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.off', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be naive, in system (+1) TZ
self.assertTrue(timezone.is_naive(dt))
# Check that the three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and naive_now should be the same effective time.
self.assertLess(abs(dt - naive_now), timedelta(seconds=2))
# If we convert dt to an aware object using the Algiers
# timezone then it should be the same effective time to
# now_in_algiers.
_dt = timezone.make_aware(dt, now_in_algiers.tzinfo)
self.assertLess(abs(_dt - now_in_algiers), timedelta(seconds=2))
def test_file_get_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
atime = self.storage.get_accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_accessed_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_accessed_time_timezone(self):
self._test_file_time_getter(self.storage.get_accessed_time)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_file_accessed_time(self):
"""
File storage returns a datetime for the last accessed time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
atime = self.storage.accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.accessed_time(f_name), timedelta(seconds=2))
def test_file_get_created_time(self):
"""
File storage returns a datetime for the creation time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
ctime = self.storage.get_created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_created_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_created_time_timezone(self):
self._test_file_time_getter(self.storage.get_created_time)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_file_created_time(self):
"""
File storage returns a datetime for the creation time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
ctime = self.storage.created_time(f_name)
self.addCleanup(self.storage.delete, f_name)
self.assertEqual(ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.created_time(f_name), timedelta(seconds=2))
def test_file_get_modified_time(self):
"""
File storage returns a datetime for the last modified time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
mtime = self.storage.get_modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_modified_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_modified_time_timezone(self):
self._test_file_time_getter(self.storage.get_modified_time)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_file_modified_time(self):
"""
File storage returns a datetime for the last modified time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
mtime = self.storage.modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.modified_time(f_name), timedelta(seconds=2))
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file', ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_save_doesnt_close(self):
with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file:
file.write(b'1')
file.seek(0)
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(six.StringIO('1'), '', 'test', 'text/plain', 1, 'utf8')
with file:
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'), self.storage.base_url + 'test.file')
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(
self.storage.url(r"~!*()'@#$%^&*abc`+ =.file"),
"/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file"
)
self.assertEqual(self.storage.url("ab\0c"), "/test_media_url/ab%00c")
# should translate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""), "/test_media_url/a/b/c.file")
# #25905: remove leading slashes from file names to prevent unsafe url output
self.assertEqual(self.storage.url("/evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url("///evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\\\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(None), "/test_media_url/")
def test_base_url(self):
"""
File storage returns a url even when its base_url is unset or modified.
"""
self.storage.base_url = None
with self.assertRaises(ValueError):
self.storage.url('test.file')
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(location=self.temp_dir, base_url='/no_ending_slash')
self.assertEqual(
storage.url('test.file'),
'%s%s' % (storage.base_url, 'test.file')
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), {'storage_dir_1'})
self.assertEqual(set(files), {'storage_test_1', 'storage_test_2'})
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
with self.assertRaises(SuspiciousOperation):
self.storage.exists('..')
with self.assertRaises(SuspiciousOperation):
self.storage.exists('/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case), other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path)
raise OSError(errno.EEXIST, 'simulated EEXIST')
elif path == os.path.join(self.temp_dir, 'error'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file', ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file', ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Check that OSErrors aside from EEXIST are still raised.
with self.assertRaises(OSError):
self.storage.save('error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise OSError(errno.ENOENT, 'simulated ENOENT')
elif path == os.path.join(self.temp_dir, 'error.file'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Check that OSErrors aside from ENOENT are still raised.
self.storage.save('error.file', ContentFile('delete with error'))
with self.assertRaises(OSError):
self.storage.delete('error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise IOError
f1.chunks = failing_chunks
with self.assertRaises(IOError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
with self.assertRaises(AssertionError):
self.storage.delete('')
@override_settings(
MEDIA_ROOT='media_root',
MEDIA_URL='media_url/',
FILE_UPLOAD_PERMISSIONS=0o777,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777,
)
def test_setting_changed(self):
"""
Properties using settings values as defaults should be updated on
referenced settings change while specified values should be unchanged.
"""
storage = self.storage_class(
location='explicit_location',
base_url='explicit_base_url/',
file_permissions_mode=0o666,
directory_permissions_mode=0o666,
)
defaults_storage = self.storage_class()
settings = {
'MEDIA_ROOT': 'overriden_media_root',
'MEDIA_URL': 'overriden_media_url/',
'FILE_UPLOAD_PERMISSIONS': 0o333,
'FILE_UPLOAD_DIRECTORY_PERMISSIONS': 0o333,
}
with self.settings(**settings):
self.assertEqual(storage.base_location, 'explicit_location')
self.assertIn('explicit_location', storage.location)
self.assertEqual(storage.base_url, 'explicit_base_url/')
self.assertEqual(storage.file_permissions_mode, 0o666)
self.assertEqual(storage.directory_permissions_mode, 0o666)
self.assertEqual(defaults_storage.base_location, settings['MEDIA_ROOT'])
self.assertIn(settings['MEDIA_ROOT'], defaults_storage.location)
self.assertEqual(defaults_storage.base_url, settings['MEDIA_URL'])
self.assertEqual(defaults_storage.file_permissions_mode, settings['FILE_UPLOAD_PERMISSIONS'])
self.assertEqual(
defaults_storage.directory_permissions_mode, settings['FILE_UPLOAD_DIRECTORY_PERMISSIONS']
)
class CustomStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class CustomStorageLegacyDatetimeHandling(FileSystemStorage):
# Use the legacy accessed_time() et al from FileSystemStorage and the
# shim get_accessed_time() et al from the Storage baseclass. Both of those
# raise warnings, so the testcase class ignores them all.
def get_accessed_time(self, name):
return super(FileSystemStorage, self).get_accessed_time(name)
def get_created_time(self, name):
return super(FileSystemStorage, self).get_created_time(name)
def get_modified_time(self, name):
return super(FileSystemStorage, self).get_modified_time(name)
@ignore_warnings(category=RemovedInDjango20Warning)
class CustomStorageLegacyDatetimeHandlingTests(FileStorageTests):
storage_class = CustomStorageLegacyDatetimeHandling
class DiscardingFalseContentStorage(FileSystemStorage):
def _save(self, name, content):
if content:
return super(DiscardingFalseContentStorage, self)._save(name, content)
return ''
class DiscardingFalseContentStorageTests(FileStorageTests):
storage_class = DiscardingFalseContentStorage
def test_custom_storage_discarding_empty_content(self):
"""
When Storage.save() wraps a file-like object in File, it should include
the name argument so that bool(file) evaluates to True (#26495).
"""
output = six.StringIO('content')
self.storage.save('tests/stringio', output)
self.assertTrue(self.storage.exists('tests/stringio'))
with self.storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
class FileFieldStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def _storage_max_filename_length(self, storage):
"""
Query filesystem for maximum filename length (e.g. AUFS has 242).
"""
dir_to_test = storage.location
while not os.path.exists(dir_to_test):
dir_to_test = os.path.dirname(dir_to_test)
try:
return os.pathconf(dir_to_test, 'PC_NAME_MAX')
except Exception:
return 255 # Should be safe on most backends
def test_files(self):
self.assertIsInstance(Storage.normal, FileDescriptor)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
with self.assertRaises(ValueError):
obj1.normal.size
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
six.assertRegex(self, obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
six.assertRegex(self, obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_filefield_write(self):
# Files can be written to.
obj = Storage.objects.create(normal=SimpleUploadedFile('rewritten.txt', b'content'))
with obj.normal as normal:
normal.open('wb')
normal.write(b'updated')
obj.refresh_from_db()
self.assertEqual(obj.normal.read(), b'updated')
obj.normal.close()
def test_filefield_reopen(self):
obj = Storage.objects.create(normal=SimpleUploadedFile('reopen.txt', b'content'))
with obj.normal as normal:
normal.open()
obj.normal.open()
obj.normal.file.seek(0)
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
six.assertRegex(self, names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX)
finally:
for o in objs:
o.delete()
def test_file_truncation(self):
# Given the max_length is limited, when multiple files get uploaded
# under the same name, then the filename get truncated in order to fit
# in _(7 random chars). When most of the max_length is taken by
# dirname + extension and there are not enough characters in the
# filename to truncate, an exception should be raised.
objs = [Storage() for i in range(2)]
filename = 'filename.ext'
for o in objs:
o.limited_length.save(filename, ContentFile('Same Content'))
try:
# Testing truncation.
names = [o.limited_length.name for o in objs]
self.assertEqual(names[0], 'tests/%s' % filename)
six.assertRegex(self, names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX)
# Testing exception is raised when filename is too short to truncate.
filename = 'short.longext'
objs[0].limited_length.save(filename, ContentFile('Same Content'))
with self.assertRaisesMessage(SuspiciousFileOperation, 'Storage can not find an available filename'):
objs[1].limited_length.save(*(filename, ContentFile('Same Content')))
finally:
for o in objs:
o.delete()
@unittest.skipIf(
sys.platform.startswith('win'),
"Windows supports at most 260 characters in a path.",
)
def test_extended_length_storage(self):
# Testing FileField with max_length > 255. Most systems have filename
# length limitation of 255. Path takes extra chars.
filename = (self._storage_max_filename_length(temp_storage) - 4) * 'a' # 4 chars for extension.
obj = Storage()
obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content'))
self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename)
self.assertEqual(obj.extended_length.read(), b'Same Content')
obj.extended_length.close()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_custom_valid_name_callable_upload_to(self):
"""
Storage.get_valid_name() should be called when upload_to is a callable.
"""
obj = Storage()
obj.custom_valid_name.save("random_file", ContentFile("random content"))
# CustomValidNameStorage.get_valid_name() appends '_valid' to the name
self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid"))
obj.custom_valid_name.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = six.StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super(ContentFile, self).chunks()
class FileSaveRaceConditionTest(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], 'conflict')
six.assertRegex(self, files[1], 'conflict_%s' % FILE_SUFFIX_REGEX)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
class FileStoragePathParsing(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], 'test')
six.assertRegex(self, files[1], 'test_%s' % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], '.test')
six.assertRegex(self, files[1], '.test_%s' % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
Test that ContentFile can be saved correctly with the filesystem storage,
both if it was initialized with string or unicode content"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
@override_settings(ROOT_URLCONF='file_storage.urls')
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib2_urlopen(self):
"""
Test the File storage API with a file like object coming from urllib2.urlopen()
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
sim.py | def main():
import argparse
import sys
from collections import deque
from queue import Queue
import time
from ev3sim.file_helper import find_abs
parser = argparse.ArgumentParser(description='Run the simulation, include some robots.')
parser.add_argument('--preset', type=str, help="Path of preset file to load. (You shouldn't need to change this, by default it is presets/soccer.yaml)", default='soccer.yaml', dest='preset')
parser.add_argument('robots', nargs='+', help='Path of robots to load. Separate each robot path by a space.')
args = parser.parse_args(sys.argv[1:])
import yaml
from ev3sim.simulation.loader import runFromConfig
preset_file = find_abs(args.preset, allowed_areas=['local', 'local/presets/', 'package', 'package/presets/'])
with open(preset_file, 'r') as f:
config = yaml.safe_load(f)
config['robots'] = config.get('robots', []) + args.robots
shared_data = {
'tick': 0, # Current tick
'write_stack': deque(), # All write actions are processed through this
'data_queue': {}, # Simulation data for each bot
'active_count': {}, # Keeps track of which code connection each bot has.
'bot_locks': {}, # Threading Locks and Conditions for each bot to wait for connection actions
'bot_communications_data': {}, # Buffers and information for all bot communications
'tick_updates': {}, # Simply a dictionary where the simulation tick will push static data, so the other methods are aware of when the simulation has exited.
}
result_bucket = Queue(maxsize=1)
from threading import Thread
from ev3sim.simulation.communication import start_server_with_shared_data
def run(shared_data, result):
try:
runFromConfig(config, shared_data)
except Exception as e:
result.put(('Simulation', e))
return
result.put(True)
comm_thread = Thread(target=start_server_with_shared_data, args=(shared_data, result_bucket), daemon=True)
sim_thread = Thread(target=run, args=(shared_data, result_bucket), daemon=True)
comm_thread.start()
sim_thread.start()
try:
with result_bucket.not_empty:
while not result_bucket._qsize():
result_bucket.not_empty.wait(0.1)
r = result_bucket.get()
if r is not True:
print(f"An error occured in the {r[0]} thread. Raising an error now...")
time.sleep(1)
raise r[1]
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main() |
test_user_secrets.py | import json
import os
import threading
import unittest
from http.server import BaseHTTPRequestHandler, HTTPServer
from test.support import EnvironmentVarGuard
from urllib.parse import urlparse
from google.auth.exceptions import DefaultCredentialsError
from google.cloud import bigquery
from kaggle_secrets import (_KAGGLE_URL_BASE_ENV_VAR_NAME,
_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME,
CredentialError, UserSecretsClient)
_TEST_JWT = 'test-secrets-key'
class UserSecretsHTTPHandler(BaseHTTPRequestHandler):
def set_request(self):
raise NotImplementedError()
def get_response(self):
raise NotImplementedError()
def do_HEAD(s):
s.send_response(200)
def do_GET(s):
s.set_request()
s.send_response(200)
s.send_header("Content-type", "application/json")
s.end_headers()
s.wfile.write(json.dumps(s.get_response()).encode("utf-8"))
class TestUserSecrets(unittest.TestCase):
SERVER_ADDRESS = urlparse(os.getenv(_KAGGLE_URL_BASE_ENV_VAR_NAME))
def _test_client(self, client_func, expected_path, secret):
_request = {}
class AccessTokenHandler(UserSecretsHTTPHandler):
def set_request(self):
_request['path'] = self.path
_request['headers'] = self.headers
def get_response(self):
return {"Secret": secret}
env = EnvironmentVarGuard()
env.set(_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME, _TEST_JWT)
with env:
with HTTPServer((self.SERVER_ADDRESS.hostname, self.SERVER_ADDRESS.port), AccessTokenHandler) as httpd:
threading.Thread(target=httpd.serve_forever).start()
try:
client_func()
finally:
httpd.shutdown()
path, headers = _request['path'], _request['headers']
self.assertEqual(
path,
expected_path,
msg="Fake server did not receive the right request from the UserSecrets client.")
self.assertTrue(
any(
k for k in headers
if k == "Authorization" and headers[k] == f'Bearer {_TEST_JWT}'),
msg="Authorization header was missing from the UserSecrets request.")
def test_no_token_fails(self):
env = EnvironmentVarGuard()
env.unset(_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME)
with env:
with self.assertRaises(CredentialError):
client = UserSecretsClient()
def test_get_access_token_succeeds(self):
secret = '12345'
def call_get_access_token():
client = UserSecretsClient()
secret_response = client.get_bigquery_access_token()
self.assertEqual(secret_response, secret)
self._test_client(call_get_access_token,
'/requests/GetUserSecretRequest?Target=1', secret)
|
coap_payload_size_fuzzer.py | import logging
import multiprocessing
import random
import signal
import time
import unittest
from coapthon.client.helperclient import HelperClient
from Entity.attack import Attack
from Entity.input_format import InputFormat
from protocols import CoAP as PeniotCoAP
class CoAPPayloadSizeFuzzerAttack(Attack):
"""
CoAP Protocol - Payload Size Fuzzer Attack module
It is created to test any CoAP device as black box test with malformed or semi-malformed inputs
"""
client = None
# Input Fields
host = None
port = None
path = None
method = None
method_string = PeniotCoAP.get_coap_methods_as_string(PeniotCoAP.CoAPMethods.POST)
fuzzing_turn = 10
# Miscellaneous Members
logger = None
max_payload_length = 2 ** 16 - 1
sent_message_count = 0 # Transmitted fuzzing packets
stopped_flag = False
def __init__(self):
default_parameters = ["", "", "", "", 10, self.max_payload_length]
inputs = [
InputFormat("Host Name", "host", "", str, mandatory=True),
InputFormat("Port Number", "port", "", int, mandatory=True),
InputFormat("Endpoint", "path", "", str, mandatory=True),
InputFormat("Method", "method_string", self.method_string, str, mandatory=True),
InputFormat("Fuzzing Round Count", "fuzzing_turn", self.fuzzing_turn, int),
InputFormat("Maximum Payload Size", "max_payload_length", self.max_payload_length, int, mandatory=True)
]
Attack.__init__(self, "CoAP Payload Size Fuzzer Attack", inputs, default_parameters,
" CoAP Payload size fuzzer attack description")
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s:%(levelname)s:%(name)s:%(message)s")
# Signal handler to exit from function
signal.signal(signal.SIGINT, self.signal_handler)
def signal_handler(self, sig, frame):
self.stop_attack()
def stop_attack(self):
self.logger.info("Sent message count: {0}, exitting...".format(self.sent_message_count))
self.stopped_flag = True
if self.client is not None:
self.client.stop()
self.client = None
time.sleep(2) # Sleep two seconds so the user can see the message
def pre_attack_init(self):
try:
assert PeniotCoAP.does_method_have_payload(self.method) and self.fuzzing_turn >= 2
except AssertionError as e:
raise
self.client = HelperClient(server=(self.host, self.port))
self.method = PeniotCoAP.get_coap_methods_by_name(self.method_string)
def run(self):
Attack.run(self)
self.pre_attack_init()
# Fill the size list as randomly generated
size_list = [0, self.max_payload_length]
size_list.extend([random.randint(0, self.max_payload_length) for _ in range(self.fuzzing_turn - 2)])
fuzzing = 0
self.logger.info("Size payload fuzzing is started. Please consider it may take some time.")
for payload_size in size_list:
if self.stopped_flag is True: # Attack is terminated
break
# Create payload and send it
random_strings = "".join([chr(_) for _ in range(65, 91)]) + "".join([chr(_) for _ in range(97, 123)])
random_character = random.choice(random_strings)
sized_payload = random_character * payload_size
PeniotCoAP.make_request(self.client, self.path, self.method, sized_payload)
# Informative procedures
self.logger.info("Turn {0} is completed".format(fuzzing + 1))
self.sent_message_count += 1
fuzzing += 1
time.sleep(1)
if self.stopped_flag is False:
self.logger.info("Payload size attack is finished.")
else:
self.logger.info("Payload size attack has been terminated.")
if self.client is not None:
self.client.stop()
self.client = None
class TestCoAPPayloadSizeAttack(unittest.TestCase):
def setUp(self):
self.coap_payload_size_fuzzer = CoAPPayloadSizeFuzzerAttack()
def tearDown(self):
pass
def test_name(self):
self.assertEqual("CoAP Payload Size Fuzzer Attack", self.coap_payload_size_fuzzer.get_attack_name())
def test_inputs(self):
inputs = self.coap_payload_size_fuzzer.get_inputs()
self.assertIsNotNone(inputs)
self.assertGreater(len(inputs), 0, "Non inserted inputs")
self.assertEquals(len(inputs), 6)
def test_non_initialized_inputs(self):
inputs = self.coap_payload_size_fuzzer.get_inputs()
for _input in inputs:
value = getattr(self.coap_payload_size_fuzzer, _input.get_name())
self.assertTrue(value is None or type(value) == _input.get_type())
def test_after_getting_inputs(self):
example_inputs = ["a.b.c.d", 8888, "peniot-coap-test", "PuT", 13, 6583]
for index, _input in enumerate(example_inputs):
self.coap_payload_size_fuzzer.inputs[index].set_value(_input)
# Previously it should not be set
self.assertIsNone(self.coap_payload_size_fuzzer.client)
super(CoAPPayloadSizeFuzzerAttack, self.coap_payload_size_fuzzer).run()
inputs = self.coap_payload_size_fuzzer.get_inputs()
for index, _input in enumerate(inputs):
value = getattr(self.coap_payload_size_fuzzer, _input.get_name())
self.assertEqual(example_inputs[index], value)
def test_invalid_method(self):
example_inputs = ["127.0.0.1", 8888, "peniot-coap-test", "geT", 13, 6583]
for index, _input in enumerate(example_inputs):
self.coap_payload_size_fuzzer.inputs[index].set_value(_input)
super(CoAPPayloadSizeFuzzerAttack, self.coap_payload_size_fuzzer).run()
try:
self.coap_payload_size_fuzzer.pre_attack_init()
except AssertionError as e:
self.assertTrue(True)
def test_invalid_fuzzing_turn(self):
example_inputs = ["127.0.0.1", 8888, "peniot-coap-test", "puT", 1, 6583]
for index, _input in enumerate(example_inputs):
self.coap_payload_size_fuzzer.inputs[index].set_value(_input)
super(CoAPPayloadSizeFuzzerAttack, self.coap_payload_size_fuzzer).run()
try:
self.coap_payload_size_fuzzer.pre_attack_init()
except AssertionError as e:
self.assertTrue(True)
def test_payload_size_fuzzing_attack(self):
def run_attack():
example_inputs = ["127.0.0.1", 5683, "peniot", "pOsT", 3, 6583]
for index, _input in enumerate(example_inputs):
self.coap_payload_size_fuzzer.inputs[index].set_value(_input)
try:
self.coap_payload_size_fuzzer.run()
except Exception as e:
self.assertTrue(False)
print "* If server is not initialized this test will not execute properly."
p = multiprocessing.Process(target=run_attack, name="DoS Attack")
p.start()
time.sleep(5)
if p.is_alive():
p.terminate()
p.join()
if __name__ == '__main__':
unittest.main()
|
server.py | # coding: utf-8
import os
import sys
import socket
import threading
import buffer
from time import sleep
from scheduler import scheduler
class Receiver :
def __init__(self, host='0.0.0.0', port=6666):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(5)
except socket.error as msg:
print(msg)
sys.exit(1)
while True:
conn, addr = s.accept()
connbuf = buffer.Buffer(conn)
recv_thread = threading.Thread(target = self.deal_data, args=(connbuf, addr))
recv_thread.start()
def deal_data(self, connbuf, addr):
print()
print("Got a connection from ", addr)
absolute_path = '/var/www/socialmails/schedule_server/'
connbuf.put_utf8('Hi, Welcome to the server!')
smtp_data = connbuf.get_utf8()
db_data = connbuf.get_utf8()
eml_type = connbuf.get_utf8()
eml_name = absolute_path+'eml/'+connbuf.get_utf8().split('/')[-1]
user_group = connbuf.get_utf8()
mail_excel = absolute_path+'excel/'+connbuf.get_utf8().split('/')[-1]
annex = absolute_path+'annex/'+connbuf.get_utf8().split('/')[-1]
url = connbuf.get_utf8()
datetime = connbuf.get_utf8()
absolute_path = '/var/www/socialmails/schedule_server/'
for file_name in [eml_name, mail_excel, annex]:
file_size = int(connbuf.get_utf8())
print('file size: ', file_size )
with open(file_name, 'wb') as f:
remaining = file_size
while remaining:
chunk_size = 4096 if remaining >= 4096 else remaining
chunk = connbuf.get_bytes(chunk_size)
if not chunk: break
f.write(chunk)
remaining -= len(chunk)
if remaining:
print(file_name,' incomplete. Missing',remaining,'bytes.')
else:
print(file_name,' received successfully.')
print('All data ({0}, {1}, {2})'.format(smtp_data, db_data, url))
print()
scheduler(datetime, [smtp_data, db_data, eml_type, eml_name, user_group, mail_excel, annex, url])
if __name__ == "__main__":
receiver = Receiver()
|
test_v2_0_0_container.py | import multiprocessing
import queue
import random
import threading
import unittest
import requests
import time
from dateutil.parser import parse
from .fixtures import APITestCase
class ContainerTestCase(APITestCase):
def test_list(self):
r = requests.get(self.uri("/containers/json"), timeout=5)
self.assertEqual(r.status_code, 200, r.text)
obj = r.json()
self.assertEqual(len(obj), 1)
def test_list_filters(self):
r = requests.get(
self.podman_url
+ "/v1.40/containers/json?filters%3D%7B%22status%22%3A%5B%22running%22%5D%7D"
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
containerAmnt = len(payload)
self.assertGreater(containerAmnt, 0)
def test_list_all(self):
r = requests.get(self.uri("/containers/json?all=true"))
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
def test_inspect(self):
r = requests.get(self.uri(self.resolve_container("/containers/{}/json")))
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
_ = parse(r.json()["Created"])
r = requests.post(
self.podman_url + "/v1.40/containers/create?name=topcontainer",
json={
"Cmd": ["top"],
"Image": "alpine:latest",
"Healthcheck": {
"Test": ["CMD", "pidof", "top"],
"Interval": 5000000000,
"Timeout": 2000000000,
"Retries": 3,
"StartPeriod": 5000000000,
},
},
)
self.assertEqual(r.status_code, 201, r.text)
payload = r.json()
container_id = payload["Id"]
self.assertIsNotNone(container_id)
r = requests.get(self.podman_url + f"/v1.40/containers/{container_id}/json")
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
out = r.json()
self.assertIsNotNone(out["State"].get("Health"))
self.assertListEqual(["CMD", "pidof", "top"], out["Config"]["Healthcheck"]["Test"])
self.assertEqual(5000000000, out["Config"]["Healthcheck"]["Interval"])
self.assertEqual(2000000000, out["Config"]["Healthcheck"]["Timeout"])
self.assertEqual(3, out["Config"]["Healthcheck"]["Retries"])
self.assertEqual(5000000000, out["Config"]["Healthcheck"]["StartPeriod"])
r = requests.get(self.uri(f"/containers/{container_id}/json"))
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
out = r.json()
hc = out["Config"]["Healthcheck"]["Test"]
self.assertListEqual(["CMD", "pidof", "top"], hc)
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/start")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(self.podman_url + f"/v1.40/containers/{container_id}/json")
self.assertEqual(r.status_code, 200, r.text)
out = r.json()
state = out["State"]["Health"]
self.assertIsInstance(state, dict)
def test_stats(self):
r = requests.get(self.uri(self.resolve_container("/containers/{}/stats?stream=false")))
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.assertId(r.content)
r = requests.get(
self.uri(self.resolve_container("/containers/{}/stats?stream=false&one-shot=true"))
)
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.assertId(r.content)
def test_delete(self):
r = requests.delete(self.uri(self.resolve_container("/containers/{}?force=true")))
self.assertEqual(r.status_code, 200, r.text)
def test_stop(self):
r = requests.post(self.uri(self.resolve_container("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(self.uri(self.resolve_container("/containers/{}/stop")))
self.assertIn(r.status_code, (204, 304), r.text)
def test_start(self):
r = requests.post(self.uri(self.resolve_container("/containers/{}/stop")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(self.uri(self.resolve_container("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
def test_restart(self):
r = requests.post(self.uri(self.resolve_container("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(self.uri(self.resolve_container("/containers/{}/restart")), timeout=5)
self.assertEqual(r.status_code, 204, r.text)
def test_resize(self):
r = requests.post(self.uri(self.resolve_container("/containers/{}/resize?h=43&w=80")))
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.assertEqual(r.text, "", r.text)
def test_attach(self):
self.skipTest("FIXME: Test timeouts")
r = requests.post(self.uri(self.resolve_container("/containers/{}/attach?logs=true")), timeout=5)
self.assertIn(r.status_code, (101, 500), r.text)
def test_logs(self):
r = requests.get(self.uri(self.resolve_container("/containers/{}/logs?stdout=true")))
self.assertEqual(r.status_code, 200, r.text)
r = requests.post(
self.podman_url + "/v1.40/containers/create?name=topcontainer",
json={"Cmd": ["top", "ls"], "Image": "alpine:latest"},
)
self.assertEqual(r.status_code, 201, r.text)
payload = r.json()
container_id = payload["Id"]
self.assertIsNotNone(container_id)
r = requests.get(
self.podman_url
+ f"/v1.40/containers/{payload['Id']}/logs?follow=false&stdout=true&until=0"
)
self.assertEqual(r.status_code, 200, r.text)
r = requests.get(
self.podman_url
+ f"/v1.40/containers/{payload['Id']}/logs?follow=false&stdout=true&until=1"
)
self.assertEqual(r.status_code, 200, r.text)
def test_commit(self):
r = requests.post(self.uri(self.resolve_container("/commit?container={}")))
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
obj = r.json()
self.assertIsInstance(obj, dict)
def test_prune(self):
name = f"Container_{random.getrandbits(160):x}"
r = requests.post(
self.podman_url + f"/v1.40/containers/create?name={name}",
json={
"Cmd": ["cp", "/etc/motd", "/motd.size_test"],
"Image": "alpine:latest",
"NetworkDisabled": True,
},
)
self.assertEqual(r.status_code, 201, r.text)
create = r.json()
r = requests.post(self.podman_url + f"/v1.40/containers/{create['Id']}/start")
self.assertEqual(r.status_code, 204, r.text)
r = requests.post(self.podman_url + f"/v1.40/containers/{create['Id']}/wait")
self.assertEqual(r.status_code, 200, r.text)
wait = r.json()
self.assertEqual(wait["StatusCode"], 0, wait["Error"])
prune = requests.post(self.podman_url + "/v1.40/containers/prune")
self.assertEqual(prune.status_code, 200, prune.status_code)
prune_payload = prune.json()
self.assertGreater(prune_payload["SpaceReclaimed"], 0)
self.assertIn(create["Id"], prune_payload["ContainersDeleted"])
# Delete any orphaned containers
r = requests.get(self.podman_url + "/v1.40/containers/json?all=true")
self.assertEqual(r.status_code, 200, r.text)
for self.resolve_container in r.json():
requests.delete(
self.podman_url + f"/v1.40/containers/{self.resolve_container['Id']}?force=true"
)
# Image prune here tied to containers freeing up
prune = requests.post(self.podman_url + "/v1.40/images/prune")
self.assertEqual(prune.status_code, 200, prune.text)
prune_payload = prune.json()
self.assertGreater(prune_payload["SpaceReclaimed"], 0)
# FIXME need method to determine which image is going to be "pruned" to fix test
# TODO should handler be recursive when deleting images?
# self.assertIn(img["Id"], prune_payload["ImagesDeleted"][1]["Deleted"])
# FIXME (@vrothberg): I commented this line out during the `libimage` migration.
# It doesn't make sense to report anything to be deleted if the reclaimed space
# is zero. I think the test needs some rewrite.
# self.assertIsNotNone(prune_payload["ImagesDeleted"][1]["Deleted"])
def test_status(self):
r = requests.post(
self.podman_url + "/v1.40/containers/create?name=topcontainer",
json={"Cmd": ["top"], "Image": "alpine:latest"},
)
self.assertEqual(r.status_code, 201, r.text)
payload = r.json()
container_id = payload["Id"]
self.assertIsNotNone(container_id)
r = requests.get(
self.podman_url + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
self.assertEqual(payload[0]["Status"], "Created")
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/start")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(
self.podman_url + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
self.assertTrue(str(payload[0]["Status"]).startswith("Up"))
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/pause")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(
self.podman_url + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
self.assertTrue(str(payload[0]["Status"]).startswith("Up"))
self.assertTrue(str(payload[0]["Status"]).endswith("(Paused)"))
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/unpause")
self.assertEqual(r.status_code, 204, r.text)
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/stop")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(
self.podman_url + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
self.assertTrue(str(payload[0]["Status"]).startswith("Exited"))
r = requests.delete(self.podman_url + f"/v1.40/containers/{container_id}")
self.assertEqual(r.status_code, 204, r.text)
def test_top_no_stream(self):
uri = self.uri(self.resolve_container("/containers/{}/top"))
q = queue.Queue()
def _impl(fifo):
fifo.put(requests.get(uri, params={"stream": False}, timeout=2))
top = threading.Thread(target=_impl, args=(q,))
top.start()
time.sleep(2)
self.assertFalse(top.is_alive(), f"GET {uri} failed to return in 2s")
qr = q.get(False)
self.assertEqual(qr.status_code, 200, qr.text)
qr.close()
top.join()
def test_top_stream(self):
uri = self.uri(self.resolve_container("/containers/{}/top"))
q = queue.Queue()
stop_thread = False
def _impl(fifo, stop):
try:
with requests.get(uri, params={"stream": True, "delay": 1}, stream=True) as r:
r.raise_for_status()
fifo.put(r)
for buf in r.iter_lines(chunk_size=None):
if stop():
break
fifo.put(buf)
except Exception:
pass
top = threading.Thread(target=_impl, args=(q, (lambda: stop_thread)))
top.start()
time.sleep(4)
self.assertTrue(top.is_alive(), f"GET {uri} exited too soon")
stop_thread = True
for _ in range(10):
try:
qr = q.get_nowait()
if qr is not None:
self.assertEqual(qr.status_code, 200)
qr.close()
break
except queue.Empty:
pass
finally:
time.sleep(1)
else:
self.fail("Server failed to respond in 10s")
top.join()
if __name__ == "__main__":
unittest.main()
|
TelloController3.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import threading
import socket
import time
import sys
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class TelloController1(QWidget):
def __init__(self):
QWidget.__init__(self)
self.initConnection()
self.initUI()
# 最初にcommandコマンドを送信
try:
sent = self.sock.sendto('command'.encode(encoding="utf-8"), self.tello)
except:
pass
# 速度を遅めに設定
try:
sent = self.sock.sendto('speed 50'.encode(encoding="utf-8"), self.tello)
except:
pass
# 問い合わせスレッド起動
askThread = threading.Thread(target=self.askTello)
askThread.setDaemon(True)
askThread.start()
# 通信の設定
def initConnection(self):
host = ''
port = 9000
locaddr = (host,port)
self.tello = ('192.168.10.1', 8889)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(locaddr)
# 受信スレッド起動
recvThread = threading.Thread(target=self.recvSocket)
recvThread.setDaemon(True)
recvThread.start()
# UIの作成
def initUI(self):
# 情報表示用ラベル
self.label = QLabel('')
self.label.setFrameStyle(QFrame.Box | QFrame.Plain)
self.batteryLabel = QLabel('100%')
self.batteryLabel.setFrameStyle(QFrame.Box | QFrame.Plain)
self.batteryLabel.setAlignment(Qt.AlignBottom | Qt.AlignRight)
self.timeLabel = QLabel('0s')
self.timeLabel.setFrameStyle(QFrame.Box | QFrame.Plain)
self.timeLabel.setAlignment(Qt.AlignBottom | Qt.AlignRight)
# 終了ボタン
endBtn = QPushButton("End")
endBtn.clicked.connect(self.endBtnClicked)
# 離着陸ボタン
takeoffBtn = QPushButton("Takeoff")
takeoffBtn.clicked.connect(self.takeoffBtnClicked)
landBtn = QPushButton("Land")
landBtn.clicked.connect(self.landBtnClicked)
# 上昇下降回転ボタン
upBtn = QPushButton("↑↑")
upBtn.clicked.connect(self.upBtnClicked)
downBtn = QPushButton("↓↓")
downBtn.clicked.connect(self.downBtnClicked)
cwBtn = QPushButton("→↓")
cwBtn.clicked.connect(self.cwBtnClicked)
ccwBtn = QPushButton("↓←")
ccwBtn.clicked.connect(self.ccwBtnClicked)
# 前後左右ボタン
forwardBtn = QPushButton("↑")
forwardBtn.clicked.connect(self.forwardBtnClicked)
backBtn = QPushButton("↓")
backBtn.clicked.connect(self.backBtnClicked)
rightBtn = QPushButton("→")
rightBtn.clicked.connect(self.rightBtnClicked)
leftBtn = QPushButton("←")
leftBtn.clicked.connect(self.leftBtnClicked)
# UIのレイアウト
layout = QGridLayout()
layout.addWidget(self.label,0,0)
layout.addWidget(self.batteryLabel,0,1)
layout.addWidget(self.timeLabel,0,2)
layout.addWidget(endBtn,0,6)
layout.addWidget(takeoffBtn,0,3)
layout.addWidget(landBtn,1,3)
layout.addWidget(upBtn,2,1)
layout.addWidget(downBtn,4,1)
layout.addWidget(cwBtn,3,2)
layout.addWidget(ccwBtn,3,0)
layout.addWidget(forwardBtn,2,5)
layout.addWidget(backBtn,4,5)
layout.addWidget(rightBtn,3,6)
layout.addWidget(leftBtn,3,4)
self.setLayout(layout)
# 終了処理
def endBtnClicked(self):
sys.exit()
# 各種コマンド送信
def takeoffBtnClicked(self):
try:
sent = self.sock.sendto('takeoff'.encode(encoding="utf-8"), self.tello)
except:
pass
def landBtnClicked(self):
try:
sent = self.sock.sendto('land'.encode(encoding="utf-8"), self.tello)
except:
pass
def upBtnClicked(self):
try:
sent = self.sock.sendto('up 20'.encode(encoding="utf-8"), self.tello)
except:
pass
def downBtnClicked(self):
try:
sent = self.sock.sendto('down 20'.encode(encoding="utf-8"), self.tello)
except:
pass
def cwBtnClicked(self):
try:
sent = self.sock.sendto('cw 45'.encode(encoding="utf-8"), self.tello)
except:
pass
def ccwBtnClicked(self):
try:
sent = self.sock.sendto('ccw 45'.encode(encoding="utf-8"), self.tello)
except:
pass
def forwardBtnClicked(self):
try:
sent = self.sock.sendto('forward 20'.encode(encoding="utf-8"), self.tello)
except:
pass
def backBtnClicked(self):
try:
sent = self.sock.sendto('back 20'.encode(encoding="utf-8"), self.tello)
except:
pass
def rightBtnClicked(self):
try:
sent = self.sock.sendto('right 20'.encode(encoding="utf-8"), self.tello)
except:
pass
def leftBtnClicked(self):
try:
sent = self.sock.sendto('left 20'.encode(encoding="utf-8"), self.tello)
except:
pass
# Telloからのレスポンス受信
def recvSocket(self):
while True:
try:
data, server = self.sock.recvfrom(1518)
resp = data.decode(encoding="utf-8").strip()
if resp.isdecimal(): # 数字だけなら充電量
self.batteryLabel.setText(resp + "%")
elif resp[-1:] == "s": # 最後の文字がsなら飛行時間
self.timeLabel.setText(resp)
elif resp == "OK": # OKは黒
self.label.setStyleSheet("color:black;")
self.label.setText(resp)
else: # それ以外は赤
self.label.setStyleSheet("color:red;")
self.label.setText(resp)
except:
pass
# 問い合わせ
def askTello(self):
while True:
try:
sent = self.sock.sendto('battery?'.encode(encoding="utf-8"), self.tello)
except:
pass
time.sleep(0.5)
try:
sent = self.sock.sendto('time?'.encode(encoding="utf-8"), self.tello)
except:
pass
time.sleep(0.5)
if __name__ == '__main__':
app = QApplication(sys.argv)
window = TelloController1()
window.show()
sys.exit(app.exec_())
|
DateTime.py | import os, sys
parentPath = os.path.abspath("..")
if parentPath not in sys.path:
sys.path.insert(0, parentPath)
from widgets.Label import Label
from widgets.Image import Image
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, GObject, Gdk
import datetime
import psutil
#from weather import Weather, Unit
gi.require_version('Notify', '0.7')
from gi.repository import Notify
import time
import threading
class DateTime(Label):
def __init__(self, bgColor='#ffffff', fgColor='#000000', fmt="", delay=0.5,
fontSize=10, font="", decoratePos="DOWN", decoreateImg=""):
super().__init__(bgColor, fgColor, fontSize=fontSize, font=font,
decoratePos=decoratePos, decoreateImg=decoreateImg)
self.fmt = fmt
self.delay = delay
self.set_events(Gdk.EventMask.SCROLL_MASK|Gdk.EventMask.BUTTON_PRESS_MASK)
self.connect('button-press-event', self.__onClick)
self.connect('scroll-event', self.__onScroll)
self.txt, self.newTxt = "", ""
th = threading.Thread(target=self.UpdateThread)
th.daemon = True
th.start()
def UpdateThread(self):
while True:
nowdt = datetime.datetime.now()
self.newTxt = '<span font="%s">%s</span>' % (str(self.fontSize), nowdt.strftime(self.fmt))
time.sleep(self.delay)
def Update(self):
if self.txt != self.newTxt:
self.txt = self.newTxt
self.label.set_markup(self.txt)
return True
#return GObject.SOURCE_CONTINUE
def __onClick(self, widget, event = None):
if event.button == 1:
print("l")
elif event.button == 2:
print("m")
elif event.button == 3:
print("r")
def __onScroll(self, widget, event):
direction = event.direction
if direction == Gdk.ScrollDirection.DOWN:
print("-")
elif direction == Gdk.ScrollDirection.UP:
print("+")
|
dataloader.py | import os
import torch
from torch.autograd import Variable
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image, ImageDraw
from SPPE.src.utils.img import load_image, cropBox, im_to_torch
from opt import opt
from yolo.preprocess import prep_image, prep_frame, inp_to_image
from pPose_nms import pose_nms, write_json
from SPPE.src.utils.eval import getPrediction
from yolo.util import write_results, dynamic_write_results
from yolo.darknet import Darknet
from tqdm import tqdm
import cv2
import json
import numpy as np
import sys
import time
import torch.multiprocessing as mp
from multiprocessing import Process
from multiprocessing import Queue as pQueue
from threading import Thread
# import the Queue class from Python 3
if sys.version_info >= (3, 0):
from queue import Queue, LifoQueue
# otherwise, import the Queue class for Python 2.7
else:
from Queue import Queue, LifoQueue
if opt.vis_fast:
from fn import vis_frame_fast as vis_frame
else:
from fn import vis_frame
class Image_loader(data.Dataset):
def __init__(self, im_names, format='yolo'):
super(Image_loader, self).__init__()
self.img_dir = opt.inputpath
self.imglist = im_names
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
self.format = format
def getitem_ssd(self, index):
im_name = self.imglist[index].rstrip('\n').rstrip('\r')
im_name = os.path.join(self.img_dir, im_name)
im = Image.open(im_name)
inp = load_image(im_name)
if im.mode == 'L':
im = im.convert('RGB')
ow = oh = 512
im = im.resize((ow, oh))
im = self.transform(im)
return im, inp, im_name
def getitem_yolo(self, index):
inp_dim = int(opt.inp_dim)
im_name = self.imglist[index].rstrip('\n').rstrip('\r')
im_name = os.path.join(self.img_dir, im_name)
im, orig_img, im_dim = prep_image(im_name, inp_dim)
#im_dim = torch.FloatTensor([im_dim]).repeat(1, 2)
inp = load_image(im_name)
return im, inp, orig_img, im_name, im_dim
def __getitem__(self, index):
if self.format == 'ssd':
return self.getitem_ssd(index)
elif self.format == 'yolo':
return self.getitem_yolo(index)
else:
raise NotImplementedError
def __len__(self):
return len(self.imglist)
class ImageLoader:
def __init__(self, im_names, batchSize=1, format='yolo', queueSize=50):
self.img_dir = opt.inputpath
self.imglist = im_names
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
self.format = format
self.batchSize = batchSize
self.datalen = len(self.imglist)
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store data
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = mp.Queue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
if self.format == 'ssd':
if opt.sp:
p = Thread(target=self.getitem_ssd, args=())
else:
p = mp.Process(target=self.getitem_ssd, args=())
elif self.format == 'yolo':
if opt.sp:
p = Thread(target=self.getitem_yolo, args=())
else:
p = mp.Process(target=self.getitem_yolo, args=())
else:
raise NotImplementedError
p.daemon = True
p.start()
return self
def getitem_ssd(self):
length = len(self.imglist)
for index in range(length):
im_name = self.imglist[index].rstrip('\n').rstrip('\r')
im_name = os.path.join(self.img_dir, im_name)
im = Image.open(im_name)
inp = load_image(im_name)
if im.mode == 'L':
im = im.convert('RGB')
ow = oh = 512
im = im.resize((ow, oh))
im = self.transform(im)
while self.Q.full():
time.sleep(2)
self.Q.put((im, inp, im_name))
def getitem_yolo(self):
for i in range(self.num_batches):
img = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(i*self.batchSize, min((i + 1)*self.batchSize, self.datalen)):
inp_dim = int(opt.inp_dim)
im_name_k = self.imglist[k].rstrip('\n').rstrip('\r')
im_name_k = os.path.join(self.img_dir, im_name_k)
img_k, orig_img_k, im_dim_list_k = prep_image(im_name_k, inp_dim)
img.append(img_k)
orig_img.append(orig_img_k)
im_name.append(im_name_k)
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Human Detection
img = torch.cat(img)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
im_dim_list_ = im_dim_list
while self.Q.full():
time.sleep(2)
self.Q.put((img, orig_img, im_name, im_dim_list))
def getitem(self):
return self.Q.get()
def length(self):
return len(self.imglist)
def len(self):
return self.Q.qsize()
class VideoLoader:
def __init__(self, path, batchSize=1, queueSize=50):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.path = path
self.stream = cv2.VideoCapture(path)
assert self.stream.isOpened(), 'Cannot capture source'
self.stopped = False
self.batchSize = batchSize
self.datalen = int(self.stream.get(cv2.CAP_PROP_FRAME_COUNT))
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = mp.Queue(maxsize=queueSize)
def length(self):
return self.datalen
def start(self):
# start a thread to read frames from the file video stream
if opt.sp:
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
else:
p = mp.Process(target=self.update, args=())
p.daemon = True
p.start()
return self
def update(self):
stream = cv2.VideoCapture(self.path)
assert stream.isOpened(), 'Cannot capture source'
for i in range(self.num_batches):
img = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(i*self.batchSize, min((i + 1)*self.batchSize, self.datalen)):
inp_dim = int(opt.inp_dim)
(grabbed, frame) = stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.Q.put((None, None, None, None))
print('===========================> This video get '+str(k)+' frames in total.')
sys.stdout.flush()
return
# process and add the frame to the queue
img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
img.append(img_k)
orig_img.append(orig_img_k)
im_name.append(str(k)+'.jpg')
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Human Detection
img = torch.cat(img)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
im_dim_list_ = im_dim_list
while self.Q.full():
time.sleep(2)
self.Q.put((img, orig_img, im_name, im_dim_list))
def videoinfo(self):
# indicate the video info
fourcc=int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps=self.stream.get(cv2.CAP_PROP_FPS)
frameSize=(int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc,fps,frameSize)
def getitem(self):
# return next frame in the queue
return self.Q.get()
def len(self):
return self.Q.qsize()
class DetectionLoader:
def __init__(self, dataloder, batchSize=1, queueSize=1024):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
self.det_model.load_weights('models/yolo/yolov3-spp.weights')
self.det_model.net_info['height'] = opt.inp_dim
self.det_inp_dim = int(self.det_model.net_info['height'])
assert self.det_inp_dim % 32 == 0
assert self.det_inp_dim > 32
self.det_model.cuda()
self.det_model.eval()
self.stopped = False
self.dataloder = dataloder
self.batchSize = batchSize
self.datalen = self.dataloder.length()
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = mp.Queue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
if opt.sp:
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
else:
p = mp.Process(target=self.update, args=())
p.daemon = True
p.start()
return self
def update(self):
# keep looping the whole dataset
for i in range(self.num_batches):
img, orig_img, im_name, im_dim_list = self.dataloder.getitem()
if img is None:
self.Q.put((None, None, None, None, None, None, None))
return
with torch.no_grad():
# Human Detection
img = img.cuda()
prediction = self.det_model(img, CUDA=True)
# NMS process
dets = dynamic_write_results(prediction, opt.confidence,
opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(orig_img)):
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], None, None, None, None, None))
continue
dets = dets.cpu()
im_dim_list = torch.index_select(im_dim_list,0, dets[:, 0].long())
scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
boxes = dets[:, 1:5]
scores = dets[:, 5:6]
for k in range(len(orig_img)):
boxes_k = boxes[dets[:,0]==k]
if isinstance(boxes_k, int) or boxes_k.shape[0] == 0:
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], None, None, None, None, None))
continue
inps = torch.zeros(boxes_k.size(0), 3, opt.inputResH, opt.inputResW)
pt1 = torch.zeros(boxes_k.size(0), 2)
pt2 = torch.zeros(boxes_k.size(0), 2)
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], boxes_k, scores[dets[:,0]==k], inps, pt1, pt2))
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue len
return self.Q.qsize()
class DetectionProcessor:
def __init__(self, detectionLoader, queueSize=1024):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.detectionLoader = detectionLoader
self.stopped = False
self.datalen = self.detectionLoader.datalen
# initialize the queue used to store data
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = pQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
if opt.sp:
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
else:
p = mp.Process(target=self.update, args=())
p.daemon = True
p.start()
return self
def update(self):
# keep looping the whole dataset
for i in range(self.datalen):
with torch.no_grad():
(orig_img, im_name, boxes, scores, inps, pt1, pt2) = self.detectionLoader.read()
if orig_img is None:
self.Q.put((None, None, None, None, None, None, None))
return
if boxes is None or boxes.nelement() == 0:
while self.Q.full():
time.sleep(0.2)
self.Q.put((None, orig_img, im_name, boxes, scores, None, None))
continue
inp = im_to_torch(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB))
inps, pt1, pt2 = crop_from_dets(inp, boxes, inps, pt1, pt2)
while self.Q.full():
time.sleep(0.2)
self.Q.put((inps, orig_img, im_name, boxes, scores, pt1, pt2))
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue len
return self.Q.qsize()
class VideoDetectionLoader:
def __init__(self, path, batchSize=4, queueSize=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
self.det_model.load_weights('models/yolo/yolov3-spp.weights')
self.det_model.net_info['height'] = opt.inp_dim
self.det_inp_dim = int(self.det_model.net_info['height'])
assert self.det_inp_dim % 32 == 0
assert self.det_inp_dim > 32
self.det_model.cuda()
self.det_model.eval()
self.stream = cv2.VideoCapture(path)
assert self.stream.isOpened(), 'Cannot capture source'
self.stopped = False
self.batchSize = batchSize
self.datalen = int(self.stream.get(cv2.CAP_PROP_FRAME_COUNT))
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store frames read from
# the video file
self.Q = Queue(maxsize=queueSize)
def length(self):
return self.datalen
def len(self):
return self.Q.qsize()
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping the whole video
for i in range(self.num_batches):
img = []
inp = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(i*self.batchSize, min((i + 1)*self.batchSize, self.datalen)):
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
# process and add the frame to the queue
inp_dim = int(opt.inp_dim)
img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
inp_k = im_to_torch(orig_img_k)
img.append(img_k)
inp.append(inp_k)
orig_img.append(orig_img_k)
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
ht = inp[0].size(1)
wd = inp[0].size(2)
# Human Detection
img = Variable(torch.cat(img)).cuda()
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
im_dim_list = im_dim_list.cuda()
prediction = self.det_model(img, CUDA=True)
# NMS process
dets = dynamic_write_results(prediction, opt.confidence,
opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(inp)):
while self.Q.full():
time.sleep(0.2)
self.Q.put((inp[k], orig_img[k], None, None))
continue
im_dim_list = torch.index_select(im_dim_list,0, dets[:, 0].long())
scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
boxes = dets[:, 1:5].cpu()
scores = dets[:, 5:6].cpu()
for k in range(len(inp)):
while self.Q.full():
time.sleep(0.2)
self.Q.put((inp[k], orig_img[k], boxes[dets[:,0]==k], scores[dets[:,0]==k]))
def videoinfo(self):
# indicate the video info
fourcc=int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps=self.stream.get(cv2.CAP_PROP_FPS)
frameSize=(int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc,fps,frameSize)
def read(self):
# return next frame in the queue
return self.Q.get()
def more(self):
# return True if there are still frames in the queue
return self.Q.qsize() > 0
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class WebcamLoader:
def __init__(self, webcam, queueSize=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoCapture(int(webcam))
assert self.stream.isOpened(), 'Cannot capture source'
self.stopped = False
# initialize the queue used to store frames read from
# the video file
self.Q = LifoQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
while True:
# otherwise, ensure the queue has room in it
if not self.Q.full():
# read the next frame from the file
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
# process and add the frame to the queue
inp_dim = int(opt.inp_dim)
img, orig_img, dim = prep_frame(frame, inp_dim)
inp = im_to_torch(orig_img)
im_dim_list = torch.FloatTensor([dim]).repeat(1, 2)
self.Q.put((img, orig_img, inp, im_dim_list))
else:
with self.Q.mutex:
self.Q.queue.clear()
def videoinfo(self):
# indicate the video info
fourcc=int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps=self.stream.get(cv2.CAP_PROP_FPS)
frameSize=(int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc,fps,frameSize)
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue size
return self.Q.qsize()
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class DataWriter:
def __init__(self, save_video=False,
savepath='examples/res/1.avi', fourcc=cv2.VideoWriter_fourcc(*'XVID'), fps=25, frameSize=(640,480),
queueSize=1024):
if save_video:
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoWriter(savepath, fourcc, fps, frameSize)
assert self.stream.isOpened(), 'Cannot open video for writing'
self.save_video = save_video
self.stopped = False
self.final_result = []
# initialize the queue used to store frames read from
# the video file
self.Q = Queue(maxsize=queueSize)
if opt.save_img:
if not os.path.exists(opt.outputpath + '/vis'):
os.mkdir(opt.outputpath + '/vis')
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
while True:
# if the thread indicator variable is set, stop the
# thread
if self.stopped:
if self.save_video:
self.stream.release()
return
# otherwise, ensure the queue is not empty
if not self.Q.empty():
(boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get()
orig_img = np.array(orig_img, dtype=np.uint8)
if boxes is None:
if opt.save_img or opt.save_video or opt.vis:
img = orig_img
if opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if opt.save_img:
cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
if opt.save_video:
self.stream.write(img)
else:
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
preds_hm, preds_img, preds_scores = getPrediction(
hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
result = pose_nms(boxes, scores, preds_img, preds_scores)
result = {
'imgname': im_name,
'result': result
}
self.final_result.append(result)
if opt.save_img or opt.save_video or opt.vis:
img = vis_frame(orig_img, result)
if opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if opt.save_img:
cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
if opt.save_video:
self.stream.write(img)
else:
time.sleep(0.1)
def running(self):
# indicate that the thread is still running
time.sleep(0.2)
return not self.Q.empty()
def save(self, boxes, scores, hm_data, pt1, pt2, orig_img, im_name):
# save next frame in the queue
self.Q.put((boxes, scores, hm_data, pt1, pt2, orig_img, im_name))
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
time.sleep(0.2)
def results(self):
# return final result
return self.final_result
def len(self):
# return queue len
return self.Q.qsize()
class Mscoco(data.Dataset):
def __init__(self, train=True, sigma=1,
scale_factor=(0.2, 0.3), rot_factor=40, label_type='Gaussian'):
self.img_folder = '../data/coco/images' # root image folders
self.is_train = train # training set or test set
self.inputResH = opt.inputResH
self.inputResW = opt.inputResW
self.outputResH = opt.outputResH
self.outputResW = opt.outputResW
self.sigma = sigma
self.scale_factor = scale_factor
self.rot_factor = rot_factor
self.label_type = label_type
self.nJoints_coco = 17
self.nJoints_mpii = 16
self.nJoints = 33
self.accIdxs = (1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17)
self.flipRef = ((2, 3), (4, 5), (6, 7),
(8, 9), (10, 11), (12, 13),
(14, 15), (16, 17))
def __getitem__(self, index):
pass
def __len__(self):
pass
def crop_from_dets(img, boxes, inps, pt1, pt2):
'''
Crop human from origin image according to Dectecion Results
'''
imght = img.size(1)
imgwidth = img.size(2)
tmp_img = img
tmp_img[0].add_(-0.406)
tmp_img[1].add_(-0.457)
tmp_img[2].add_(-0.480)
for i, box in enumerate(boxes):
upLeft = torch.Tensor(
(float(box[0]), float(box[1])))
bottomRight = torch.Tensor(
(float(box[2]), float(box[3])))
ht = bottomRight[1] - upLeft[1]
width = bottomRight[0] - upLeft[0]
if width > 100:
scaleRate = 0.2
else:
scaleRate = 0.3
upLeft[0] = max(0, upLeft[0] - width * scaleRate / 2)
upLeft[1] = max(0, upLeft[1] - ht * scaleRate / 2)
bottomRight[0] = max(
min(imgwidth - 1, bottomRight[0] + width * scaleRate / 2), upLeft[0] + 5)
bottomRight[1] = max(
min(imght - 1, bottomRight[1] + ht * scaleRate / 2), upLeft[1] + 5)
try:
inps[i] = cropBox(tmp_img, upLeft, bottomRight, opt.inputResH, opt.inputResW)
except IndexError:
print(tmp_img.shape)
print(upLeft)
print(bottomRight)
print('===')
pt1[i] = upLeft
pt2[i] = bottomRight
return inps, pt1, pt2
|
test_insert.py | import pytest
from pymilvus import DataType, ParamError, BaseException
from utils import *
from constants import *
ADD_TIMEOUT = 60
uid = "test_insert"
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
default_single_query = {
"bool": {
"must": [
{"vector": {field_name: {"topk": 10, "query": gen_vectors(1, default_dim), "metric_type": "L2",
"params": {"nprobe": 10}}}}
]
}
}
class TestInsertBase:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
if request.param["index_type"] in index_cpu_not_support():
pytest.skip("CPU not support index_type: ivf_sq8h")
logging.getLogger().info(request.param)
return request.param
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_empty_entity(self, connect, collection):
'''
target: test insert with empty entity list
method: set empty entity list as insert method params
expected: raises a ParamError exception
'''
entities = []
with pytest.raises(ParamError) as e:
connect.insert(collection, entities)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_None(self, connect, collection):
'''
target: test insert with None
method: set None as insert method params
expected: raises a ParamError
'''
entity = None
with pytest.raises(Exception) as e:
connect.insert(collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_collection_not_existed(self, connect):
'''
target: test insert, with collection not existed
method: insert entity into a random named collection
expected: raise a BaseException
'''
collection_name = gen_unique_str(uid)
with pytest.raises(BaseException) as e:
connect.insert(collection_name, default_entities)
@pytest.mark.level(2)
def test_insert_without_connect(self, dis_connect, collection):
'''
target: test insert entities without connection
method: create collection and insert entities in it, check if inserted successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
dis_connect.insert(collection, default_entities)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_drop_collection(self, connect, collection):
'''
target: test delete collection after insert entities
method: insert entities and drop collection
expected: has_collection false
'''
ids = connect.insert(collection, default_entity)
assert len(ids) == 1
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_flush_drop_collection(self, connect, collection):
'''
target: test drop collection after insert entities for a while
method: insert entities, sleep, and delete collection
expected: has_collection false
'''
ids = connect.insert(collection, default_entity)
assert len(ids) == 1
connect.flush([collection])
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_create_index(self, connect, collection, get_simple_index):
'''
target: test build index insert after entities
method: insert entities and build index
expected: no error raised
'''
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_after_create_index(self, connect, collection, get_simple_index):
'''
target: test build index insert after vector
method: insert entities and build index
expected: no error raised
'''
connect.create_index(collection, field_name, get_simple_index)
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_search(self, connect, collection):
'''
target: test search entity after insert entity after a while
method: insert entity, sleep, and search collection
expected: no error raised
'''
ids = connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
res = connect.search(collection, default_single_query)
assert len(res[0]) == default_top_k
def _test_insert_segment_row_count(self, connect, collection):
nb = default_segment_row_limit + 1
res_ids = connect.insert(collection, gen_entities(nb))
connect.flush([collection])
assert len(res_ids) == nb
stats = connect.get_collection_stats(collection)
assert len(stats['partitions'][0]['segments']) == 2
for segment in stats['partitions'][0]['segments']:
assert segment['row_count'] in [default_segment_row_limit, 1]
@pytest.fixture(
scope="function",
params=[
1,
2000
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_ids(self, connect, id_collection, insert_count):
'''
target: test insert entities in collection, use customize ids
method: create collection and insert entities in it, check the ids returned and the collection length after entities inserted
expected: the length of ids and the collection row count
'''
nb = insert_count
ids = [i for i in range(nb)]
res_ids = connect.insert(id_collection, gen_entities(nb), ids)
connect.flush([id_collection])
assert len(res_ids) == nb
assert res_ids == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_the_same_ids(self, connect, id_collection, insert_count):
'''
target: test insert vectors in collection, use customize the same ids
method: create collection and insert vectors in it, check the ids returned and the collection length after vectors inserted
expected: the length of ids and the collection row count
'''
nb = insert_count
ids = [1 for i in range(nb)]
res_ids = connect.insert(id_collection, gen_entities(nb), ids)
connect.flush([id_collection])
assert len(res_ids) == nb
assert res_ids == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields, insert entities into id with ids
method: create collection with diff fields: metric/field_type/..., insert, and count
expected: row count correct
'''
nb = 5
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str("test_collection")
fields = {
"fields": [filter_field, vector_field],
"auto_id": False
}
connect.create_collection(collection_name, fields)
ids = [i for i in range(nb)]
entities = gen_entities_by_fields(fields["fields"], nb, default_dim)
logging.getLogger().info(entities)
res_ids = connect.insert(collection_name, entities, ids)
assert res_ids == ids
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == nb
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_not_match(self, connect, id_collection, insert_count):
'''
target: test insert entities in collection without ids
method: create id_collection and insert entities without
expected: exception raised
'''
nb = insert_count
with pytest.raises(Exception) as e:
connect.insert(id_collection, gen_entities(nb))
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_twice_ids_no_ids(self, connect, id_collection):
'''
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use customize ids first, and then use no ids
expected: BaseException raised
'''
ids = [i for i in range(default_nb)]
connect.insert(id_collection, default_entities, ids)
with pytest.raises(Exception) as e:
connect.insert(id_collection, default_entities)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_not_ids(self, connect, id_collection):
'''
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use not ids first, and then use customize ids
expected: error raised
'''
with pytest.raises(Exception) as e:
connect.insert(id_collection, default_entities)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_ids_length_not_match_batch(self, connect, id_collection):
'''
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
'''
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
with pytest.raises(Exception) as e:
connect.insert(id_collection, default_entities, ids)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_length_not_match_single(self, connect, id_collection):
'''
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
'''
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
with pytest.raises(BaseException) as e:
connect.insert(id_collection, default_entity, ids)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_partition(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
'''
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
assert connect.has_partition(collection, default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_partition_with_ids(self, connect, id_collection):
'''
target: test insert entities in collection created before, insert with ids
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
res_ids = connect.insert(id_collection, gen_entities(default_nb), ids=ids, partition_name=default_tag)
assert res_ids == ids
logging.getLogger().info(connect.describe_collection(id_collection))
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_default_partition(self, connect, collection):
'''
target: test insert entities into default partition
method: create partition and insert info collection without tag params
expected: the collection row count equals to nb
'''
ids = connect.insert(collection, default_entities, partition_name=default_partition_name)
assert len(ids) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_partition_not_existed(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it, with the not existed partition_name param
expected: error raised
'''
tag = gen_unique_str()
with pytest.raises(Exception) as e:
connect.insert(collection, default_entities, partition_name=tag)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_partition_repeatedly(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it repeatly, with the partition_name param
expected: the collection row count equals to nq
'''
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
res = connect.get_collection_stats(collection)
assert res[row_count] == 2 * default_nb
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_dim_not_matched(self, connect, collection):
'''
target: test insert entities, the vector dimension is not equal to the collection dimension
method: the entities dimension is half of the collection dimension, check the status
expected: error raised
'''
vectors = gen_vectors(default_nb, int(default_dim) // 2)
insert_entities = copy.deepcopy(default_entities)
insert_entities[-1]["values"] = vectors
with pytest.raises(Exception) as e:
connect.insert(collection, insert_entities)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_field_name_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field name updated
method: update entity field name
expected: error raised
'''
tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", "int64new")
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_field_type_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field type updated
method: update entity field type
expected: error raised
'''
tmp_entity = update_field_type(copy.deepcopy(default_entity), "int64", DataType.FLOAT)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_field_value_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field value updated
method: update entity field value
expected: error raised
'''
tmp_entity = update_field_value(copy.deepcopy(default_entity), DataType.FLOAT, 's')
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_field_more(self, connect, collection):
'''
target: test insert entities, with more fields than collection schema
method: add entity field
expected: error raised
'''
tmp_entity = add_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_field_vector_more(self, connect, collection):
'''
target: test insert entities, with more fields than collection schema
method: add entity vector field
expected: error raised
'''
tmp_entity = add_vector_field(default_nb, default_dim)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_field_less(self, connect, collection):
'''
target: test insert entities, with less fields than collection schema
method: remove entity field
expected: error raised
'''
tmp_entity = remove_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_field_vector_less(self, connect, collection):
'''
target: test insert entities, with less fields than collection schema
method: remove entity vector field
expected: error raised
'''
tmp_entity = remove_vector_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_no_field_vector_value(self, connect, collection):
'''
target: test insert entities, with no vector field value
method: remove entity values of vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["values"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_no_field_vector_type(self, connect, collection):
'''
target: test insert entities, with no vector field type
method: remove entity vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["type"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_no_field_vector_name(self, connect, collection):
'''
target: test insert entities, with no vector field name
method: remove entity vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["name"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
# todo fix timeout
@pytest.mark.level(2)
@pytest.mark.timeout(30)
def test_collection_insert_rows_count_multi_threading(self, args, collection):
'''
target: test collection rows_count is correct or not with multi threading
method: create collection and insert entities in it(idmap),
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
'''
if args["handler"] == "HTTP":
pytest.skip("Skip test in http mode")
thread_num = 8
threads = []
milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"], try_connect=False)
def insert(thread_i):
logging.getLogger().info("In thread-%d" % thread_i)
res_ids = milvus.insert(collection, default_entities)
milvus.flush([collection])
for i in range(thread_num):
x = threading.Thread(target=insert, args=(i,))
threads.append(x)
x.start()
for th in threads:
th.join()
stats = milvus.get_collection_stats(collection)
assert stats[row_count] == thread_num * default_nb
# TODO: unable to set config
@pytest.mark.level(2)
def _test_insert_disable_auto_flush(self, connect, collection):
'''
target: test insert entities, with disable autoflush
method: disable autoflush and insert, get entity
expected: the count is equal to 0
'''
delete_nums = 500
disable_flush(connect)
ids = connect.insert(collection, default_entities)
res = connect.get_entity_by_id(collection, ids[:delete_nums])
assert len(res) == delete_nums
assert res[0] is None
class TestInsertBinary:
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_binary_entities(self, connect, binary_collection):
'''
target: test insert entities in binary collection
method: create collection and insert binary entities in it
expected: the collection row count equals to nb
'''
ids = connect.insert(binary_collection, default_binary_entities)
assert len(ids) == default_nb
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_binary_partition(self, connect, binary_collection):
'''
target: test insert entities and create partition tag
method: create collection and insert binary entities in it, with the partition_name param
expected: the collection row count equals to nb
'''
connect.create_partition(binary_collection, default_tag)
ids = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
assert len(ids) == default_nb
assert connect.has_partition(binary_collection, default_tag)
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
def test_insert_binary_multi_times(self, connect, binary_collection):
'''
target: test insert entities multi times and final flush
method: create collection and insert binary entity multi and final flush
expected: the collection row count equals to nb
'''
for i in range(default_nb):
ids = connect.insert(binary_collection, default_binary_entity)
assert len(ids) == 1
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
'''
target: test insert binary entities after build index
method: build index and insert entities
expected: no error raised
'''
connect.create_index(binary_collection, binary_field_name, get_binary_index)
ids = connect.insert(binary_collection, default_binary_entities)
assert len(ids) == default_nb
connect.flush([binary_collection])
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
'''
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
'''
ids = connect.insert(binary_collection, default_binary_entities)
assert len(ids) == default_nb
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_binary_search(self, connect, binary_collection):
'''
target: test search vector after insert vector after a while
method: insert vector, sleep, and search collection
expected: no error raised
'''
ids = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
query, vecs = gen_query_vectors(binary_field_name, default_binary_entities, default_top_k, 1,
metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
logging.getLogger().debug(res)
assert len(res[0]) == default_top_k
class TestInsertAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
@pytest.fixture(
scope="function",
params=[
1,
1000
],
)
def insert_count(self, request):
yield request.param
def check_status(self, result):
logging.getLogger().info("In callback check status")
assert not result
def check_result(self, result):
logging.getLogger().info("In callback check results")
assert result
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_async(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
future = connect.insert(collection, gen_entities(nb), _async=True)
ids = future.result()
connect.flush([collection])
assert len(ids) == nb
@pytest.mark.level(2)
def test_insert_async_false(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
ids = connect.insert(collection, gen_entities(nb), _async=False)
# ids = future.result()
connect.flush([collection])
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_async_callback(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_result)
future.done()
ids = future.result()
assert len(ids) == nb
@pytest.mark.level(2)
def test_insert_async_long(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = 50000
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_result)
ids = future.result()
assert len(ids) == nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
assert stats[row_count] == nb
@pytest.mark.level(2)
def test_insert_async_callback_timeout(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = 100000
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_status, timeout=1)
with pytest.raises(Exception) as e:
result = future.result()
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_async_invalid_params(self, connect):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
collection_new = gen_unique_str()
future = connect.insert(collection_new, default_entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
ids = future.result()
# 1339
def test_insert_async_invalid_params_raise_exception(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
entities = []
future = connect.insert(collection, entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
future.result()
class TestInsertMultiCollections:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
def test_insert_entity_multi_collections(self, connect):
'''
target: test insert entities
method: create 10 collections and insert entities into them in turn
expected: row count
'''
collection_num = 10
collection_list = []
for i in range(collection_num):
collection_name = gen_unique_str(uid)
collection_list.append(collection_name)
connect.create_collection(collection_name, default_fields)
ids = connect.insert(collection_name, default_entities)
connect.flush([collection_name])
assert len(ids) == default_nb
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == default_nb
for i in range(collection_num):
connect.drop_collection(collection_list[i])
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_collection_insert_entity_another(self, connect, collection):
'''
target: test insert vector to collection_1 after collection_2 deleted
method: delete collection_2 and insert vector to collection_1
expected: row count equals the length of entities inserted
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.drop_collection(collection)
ids = connect.insert(collection_name, default_entity)
connect.flush([collection_name])
assert len(ids) == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_index_insert_entity_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.create_index(collection, field_name, get_simple_index)
ids = connect.insert(collection_name, default_entity)
assert len(ids) == 1
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
connect.drop_collection(collection_name)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_entity_create_index_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection_name, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_entity_sleep_create_index_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1 for a while
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_entity_insert_entity_another(self, connect, collection):
'''
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.load_collection(collection)
res = connect.search(collection, default_single_query)
assert len(res[0]) == 0
connect.insert(collection_name, default_entity)
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == 1
@pytest.mark.skip("xige-16-search-without-insert")
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_entity_search_entity_another(self, connect, collection):
'''
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, default_single_query)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_entity_sleep_search_entity_another(self, connect, collection):
'''
target: test insert entity to collection_1 after search collection_2 a while
method: search collection, sleep, and insert entity
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, default_single_query)
assert len(res[0]) == 0
@pytest.mark.timeout(ADD_TIMEOUT)
def _test_insert_entity_during_release_collection(self, connect, collection):
'''
target: test insert entity during release
method: release collection async, then do insert operation
expected: insert ok
'''
for i in range(10):
connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
def release():
connect.release_collection(collection)
t = threading.Thread(target=release, args=(collection,))
t.start()
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
class TestInsertInvalid(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
def test_insert_ids_invalid(self, connect, id_collection, get_entity_id):
'''
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
'''
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(id_collection, default_entities, ids)
def test_insert_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception):
connect.insert(collection_name, default_entity)
def test_insert_with_invalid_partition_name(self, connect, collection, get_tag_name):
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
if tag_name is not None:
with pytest.raises(Exception):
connect.insert(collection, default_entity, partition_name=tag_name)
else:
connect.insert(collection, default_entity, partition_name=tag_name)
def test_insert_with_invalid_field_name(self, connect, collection, get_field_name):
tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
def test_insert_with_invalid_field_type(self, connect, collection, get_field_type):
field_type = get_field_type
tmp_entity = update_field_type(copy.deepcopy(default_entity), 'float', field_type)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
def test_insert_with_invalid_field_value(self, connect, collection, get_field_int_value):
field_value = get_field_int_value
tmp_entity = update_field_type(copy.deepcopy(default_entity), 'int64', field_value)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
def test_insert_with_invalid_field_entity_value(self, connect, collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_entity)
src_vector = tmp_entity[-1]["values"]
src_vector[0][1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
class TestInsertInvalidBinary(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.level(2)
def test_insert_with_invalid_field_name(self, connect, binary_collection, get_field_name):
tmp_entity = update_field_name(copy.deepcopy(default_binary_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_invalid_field_value(self, connect, binary_collection, get_field_int_value):
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', get_field_int_value)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_invalid_field_entity_value(self, connect, binary_collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_binary_entity)
src_vectors = tmp_entity[-1]["values"]
src_vectors[0] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_ids_invalid(self, connect, binary_id_collection, get_entity_id):
'''
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
'''
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(binary_id_collection, default_binary_entities, ids)
@pytest.mark.level(2)
def test_insert_with_invalid_field_type(self, connect, binary_collection, get_field_type):
field_type = get_field_type
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', field_type)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_invalid_field_entities_value(self, connect, binary_collection, get_field_vectors_value):
tmp_entities = copy.deepcopy(default_binary_entities)
src_vector = tmp_entities[-1]["values"]
src_vector[1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entities)
|
trainer_utils.py | # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for the Trainer and TFTrainer class. Should be independent from PyTorch and TensorFlow.
"""
import copy
import functools
import gc
import inspect
import os
import random
import re
import threading
import time
from typing import Any, Dict, NamedTuple, Optional, Tuple, Union
import numpy as np
from .file_utils import (
ExplicitEnum,
is_psutil_available,
is_sagemaker_dp_enabled,
is_tf_available,
is_torch_available,
is_torch_cuda_available,
is_torch_tpu_available,
)
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if
installed).
Args:
seed (:obj:`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# ^^ safe to call this function even if cuda is not available
if is_tf_available():
tf.random.set_seed(seed)
class EvalPrediction(NamedTuple):
"""
Evaluation output (always contains labels), to be used to compute metrics.
Parameters:
predictions (:obj:`np.ndarray`): Predictions of the model.
label_ids (:obj:`np.ndarray`): Targets to be matched.
"""
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: np.ndarray
class EvalLoopOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[np.ndarray]
metrics: Optional[Dict[str, float]]
num_samples: Optional[int]
class PredictionOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[np.ndarray]
metrics: Optional[Dict[str, float]]
class TrainOutput(NamedTuple):
global_step: int
training_loss: float
metrics: Dict[str, float]
PREFIX_CHECKPOINT_DIR = "checkpoint"
_re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$")
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [
path
for path in content
if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path))
]
if len(checkpoints) == 0:
return
return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0])))
class IntervalStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class EvaluationStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class BestRun(NamedTuple):
"""
The best run found by an hyperparameter search (see :class:`~transformers.Trainer.hyperparameter_search`).
Parameters:
run_id (:obj:`str`):
The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending
with run-{run_id}).
objective (:obj:`float`):
The objective that was obtained for this run.
hyperparameters (:obj:`Dict[str, Any]`):
The hyperparameters picked to get this run.
"""
run_id: str
objective: float
hyperparameters: Dict[str, Any]
def default_compute_objective(metrics: Dict[str, float]) -> float:
"""
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the :class:`~transformers.Trainer`, the sum of all metrics otherwise.
Args:
metrics (:obj:`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
:obj:`float`: The objective to minimize or maximize
"""
metrics = copy.deepcopy(metrics)
loss = metrics.pop("eval_loss", None)
_ = metrics.pop("epoch", None)
# Remove speed metrics
speed_metrics = [m for m in metrics.keys() if m.endswith("_runtime") or m.endswith("_per_second")]
for sm in speed_metrics:
_ = metrics.pop(sm, None)
return loss if len(metrics) == 0 else sum(metrics.values())
def default_hp_space_optuna(trial) -> Dict[str, float]:
from .integrations import is_optuna_available
assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`"
return {
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
"num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5),
"seed": trial.suggest_int("seed", 1, 40),
"per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]),
}
def default_hp_space_ray(trial) -> Dict[str, float]:
from .integrations import is_ray_tune_available
assert is_ray_tune_available(), "This function needs ray installed: `pip " "install ray[tune]`"
from ray import tune
return {
"learning_rate": tune.loguniform(1e-6, 1e-4),
"num_train_epochs": tune.choice(list(range(1, 6))),
"seed": tune.uniform(1, 40),
"per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]),
}
class HPSearchBackend(ExplicitEnum):
OPTUNA = "optuna"
RAY = "ray"
default_hp_space = {
HPSearchBackend.OPTUNA: default_hp_space_optuna,
HPSearchBackend.RAY: default_hp_space_ray,
}
def is_main_process(local_rank):
"""
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on
`local_rank`.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.get_ordinal() == 0
return local_rank in [-1, 0]
def total_processes_number(local_rank):
"""
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.xrt_world_size()
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.distributed as dist
return dist.get_world_size()
elif local_rank != -1 and is_torch_available():
import torch
return torch.distributed.get_world_size()
return 1
def speed_metrics(split, start_time, num_samples=None, num_steps=None):
"""
Measure and return speed performance metrics.
This function requires a time snapshot `start_time` before the operation to be measured starts and this function
should be run immediately after the operation to be measured has completed.
Args:
- split: name to prefix metric (like train, eval, test...)
- start_time: operation start time
- num_samples: number of samples processed
"""
runtime = time.time() - start_time
result = {f"{split}_runtime": round(runtime, 4)}
if num_samples is not None:
samples_per_second = num_samples / runtime
result[f"{split}_samples_per_second"] = round(samples_per_second, 3)
if num_steps is not None:
steps_per_second = num_steps / runtime
result[f"{split}_steps_per_second"] = round(steps_per_second, 3)
return result
class SchedulerType(ExplicitEnum):
LINEAR = "linear"
COSINE = "cosine"
COSINE_WITH_RESTARTS = "cosine_with_restarts"
POLYNOMIAL = "polynomial"
CONSTANT = "constant"
CONSTANT_WITH_WARMUP = "constant_with_warmup"
class TrainerMemoryTracker:
"""
A helper class that tracks cpu and gpu memory.
This class will silently skip unless ``psutil`` is available. Install with ``pip install psutil``.
When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage.
Example ::
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
code ...
metrics = {"train_runtime": 10.5}
self._memory_tracker.stop_and_update_metrics(metrics)
At the moment GPU tracking is only for ``pytorch``, but can be extended to support ``tensorflow``.
To understand this class' intricacies please read the documentation of :meth:`~transformers.Trainer.log_metrics`.
"""
# map trainer methods to metrics prefix
stages = {
"__init__": "init",
"train": "train",
"evaluate": "eval",
"predict": "test",
}
def __init__(self, skip_memory_metrics=False):
self.skip_memory_metrics = skip_memory_metrics
if not is_psutil_available():
# soft dependency on psutil
self.skip_memory_metrics = True
if self.skip_memory_metrics:
return
import psutil # noqa
if is_torch_cuda_available():
import torch
self.torch = torch
self.gpu = {}
else:
self.torch = None
self.process = psutil.Process()
self.cur_stage = None
self.cpu = {}
self.init_reported = False
def derive_stage(self):
"""derives the stage/caller name automatically"""
caller = inspect.currentframe().f_back.f_back.f_code.co_name
if caller in self.stages:
return self.stages[caller]
else:
raise ValueError(
f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}"
)
def cpu_mem_used(self):
"""get resident set size memory for the current process"""
return self.process.memory_info().rss
def peak_monitor_func(self):
self.cpu_mem_used_peak = -1
while True:
self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
# time.sleep(0.001) # 1msec
if not self.peak_monitoring:
break
def start(self):
"""start tracking for the caller's stage"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
self.cur_stage = stage
gc.collect()
if self.torch is not None:
self.torch.cuda.reset_peak_memory_stats()
self.torch.cuda.empty_cache()
# gpu
if self.torch is not None:
self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated()
# cpu
self.cpu_mem_used_at_start = self.cpu_mem_used()
self.peak_monitoring = True
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
def stop(self, stage):
"""stop tracking for the passed stage"""
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# this sends a signal to peak_monitor_func to complete its loop
self.peak_monitoring = False
# first ensure all objects get collected and their memory is freed
gc.collect()
if self.torch is not None:
self.torch.cuda.empty_cache()
# concepts:
# - alloc_delta: the difference of allocated memory between the end and the start
# - peaked_delta: the difference between the peak memory and the current memory
# in order to know how much memory the measured code consumed one needs to sum these two
# gpu
if self.torch is not None:
self.gpu_mem_used_now = self.torch.cuda.memory_allocated()
self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated()
self.gpu[self.cur_stage] = dict(
alloc=(self.gpu_mem_used_now - self.gpu_mem_used_at_start),
peaked=max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now),
)
# cpu
self.cpu_mem_used_now = self.cpu_mem_used()
self.cpu[self.cur_stage] = dict(
alloc=(self.cpu_mem_used_now - self.cpu_mem_used_at_start),
peaked=max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now),
)
# reset - cycle finished
self.cur_stage = None
def update_metrics(self, stage, metrics):
"""updates the metrics"""
if self.skip_memory_metrics:
return
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# since we don't have a way to return init metrics, we push them into the first of train/val/predict
stages = [stage]
if not self.init_reported:
stages.insert(0, "init")
self.init_reported = True
for stage in stages:
for t in ["alloc", "peaked"]:
if stage in self.cpu and t in self.cpu[stage]:
metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t]
if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t]
def stop_and_update_metrics(self, metrics=None):
"""combine stop and metrics update in one call for simpler code"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
self.stop(stage)
# init doesn't have metrics to update so we just save that data for later stages to retrieve
if metrics is not None:
self.update_metrics(stage, metrics)
def denumpify_detensorize(metrics):
"""
Recursively calls `.item()` on the element of the dictionary passed
"""
if isinstance(metrics, (list, tuple)):
return type(metrics)(denumpify_detensorize(m) for m in metrics)
elif isinstance(metrics, dict):
return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()})
elif isinstance(metrics, np.generic):
return metrics.item()
elif is_torch_available() and isinstance(metrics, torch.Tensor) and metrics.numel() == 1:
return metrics.item()
return metrics
def number_of_arguments(func):
"""
Return the number of arguments of the passed function, even if it's a partial function.
"""
if isinstance(func, functools.partial):
total_args = len(inspect.signature(func.func).parameters)
return total_args - len(func.args) - len(func.keywords)
return len(inspect.signature(func).parameters)
class ShardedDDPOption(ExplicitEnum):
SIMPLE = "simple"
ZERO_DP_2 = "zero_dp_2"
ZERO_DP_3 = "zero_dp_3"
OFFLOAD = "offload"
AUTO_WRAP = "auto_wrap"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.